nix-super/src/libstore/remote-store.cc

1122 lines
37 KiB
C++
Raw Normal View History

#include "serialise.hh"
#include "util.hh"
#include "path-with-outputs.hh"
#include "gc-store.hh"
#include "remote-fs-accessor.hh"
#include "build-result.hh"
2006-11-30 20:35:50 +02:00
#include "remote-store.hh"
#include "remote-store-connection.hh"
#include "worker-protocol.hh"
#include "worker-protocol-impl.hh"
2006-11-30 22:45:20 +02:00
#include "archive.hh"
#include "globals.hh"
#include "derivations.hh"
#include "pool.hh"
#include "finally.hh"
#include "git.hh"
2020-07-02 18:04:31 +03:00
#include "logging.hh"
#include "callback.hh"
#include "filetransfer.hh"
#include "signals.hh"
#include <nlohmann/json.hpp>
2006-11-30 20:35:50 +02:00
namespace nix {
/* TODO: Separate these store types into different files, give them better names */
RemoteStore::RemoteStore(const Params & params)
: RemoteStoreConfig(params)
, Store(params)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
[this]() {
auto conn = openConnectionWrapper();
try {
initConnection(*conn);
} catch (...) {
failed = true;
throw;
}
return conn;
},
[this](const ref<Connection> & r) {
return
r->to.good()
&& r->from.good()
&& std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::steady_clock::now() - r->startTime).count() < maxConnectionAge;
}
))
2006-12-04 15:28:14 +02:00
{
}
2017-03-03 20:35:34 +02:00
ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
{
if (failed)
throw Error("opening a connection to remote store '%s' previously failed", getUri());
2017-03-03 20:35:34 +02:00
try {
return openConnection();
} catch (...) {
failed = true;
throw;
}
}
void RemoteStore::initConnection(Connection & conn)
{
/* Send the magic greeting, check for the reply. */
try {
conn.from.endOfFileError = "Nix daemon disconnected unexpectedly (maybe it crashed?)";
conn.to << WORKER_MAGIC_1;
conn.to.flush();
StringSink saved;
try {
TeeSource tee(conn.from, saved);
unsigned int magic = readInt(tee);
if (magic != WORKER_MAGIC_2)
throw Error("protocol mismatch");
} catch (SerialisationError & e) {
/* In case the other side is waiting for our input, close
it. */
conn.closeWrite();
auto msg = conn.from.drain();
throw Error("protocol mismatch, got '%s'", chomp(saved.s + msg));
}
conn.from >> conn.daemonVersion;
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
throw Error("Nix daemon protocol version not supported");
if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
throw Error("the Nix daemon version is too old");
conn.to << PROTOCOL_VERSION;
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
// Obsolete CPU affinity.
conn.to << 0;
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
conn.to << false; // obsolete reserveSpace
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 33) {
conn.to.flush();
conn.daemonNixVersion = readString(conn.from);
}
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) {
conn.remoteTrustsUs = WorkerProto::Serialise<std::optional<TrustedFlag>>::read(*this, conn);
} else {
// We don't know the answer; protocol to old.
conn.remoteTrustsUs = std::nullopt;
}
auto ex = conn.processStderr();
if (ex) std::rethrow_exception(ex);
}
catch (Error & e) {
throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what());
}
setOptions(conn);
2006-11-30 20:35:50 +02:00
}
void RemoteStore::setOptions(Connection & conn)
{
conn.to << WorkerProto::Op::SetOptions
2015-07-20 02:16:16 +03:00
<< settings.keepFailed
<< settings.keepGoing
<< settings.tryFallback
<< verbosity
2015-07-20 02:16:16 +03:00
<< settings.maxBuildJobs
<< settings.maxSilentTime
<< true
<< (settings.verboseBuild ? lvlError : lvlVomit)
<< 0 // obsolete log type
<< 0 /* obsolete print build trace */
<< settings.buildCores
<< settings.useSubstitutes;
2012-07-31 00:13:25 +03:00
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
std::map<std::string, Config::SettingInfo> overrides;
settings.getSettings(overrides, true); // libstore settings
fileTransferSettings.getSettings(overrides, true);
overrides.erase(settings.keepFailed.name);
overrides.erase(settings.keepGoing.name);
overrides.erase(settings.tryFallback.name);
overrides.erase(settings.maxBuildJobs.name);
overrides.erase(settings.maxSilentTime.name);
overrides.erase(settings.buildCores.name);
overrides.erase(settings.useSubstitutes.name);
2020-07-02 18:04:31 +03:00
overrides.erase(loggerSettings.showTrace.name);
overrides.erase(experimentalFeatureSettings.experimentalFeatures.name);
overrides.erase(settings.pluginFiles.name);
conn.to << overrides.size();
2015-07-20 02:16:16 +03:00
for (auto & i : overrides)
conn.to << i.first << i.second.value;
2012-08-01 01:19:44 +03:00
}
auto ex = conn.processStderr();
if (ex) std::rethrow_exception(ex);
}
RemoteStore::ConnectionHandle::~ConnectionHandle()
{
if (!daemonException && std::uncaught_exceptions()) {
handle.markBad();
debug("closing daemon connection because of an exception");
}
}
void RemoteStore::ConnectionHandle::processStderr(Sink * sink, Source * source, bool flush)
{
auto ex = handle->processStderr(sink, source, flush);
if (ex) {
daemonException = true;
try {
std::rethrow_exception(ex);
} catch (const Error & e) {
// Nix versions before #4628 did not have an adequate behavior for reporting that the derivation format was upgraded.
// To avoid having to add compatibility logic in many places, we expect to catch almost all occurrences of the
// old incomprehensible error here, so that we can explain to users what's going on when their daemon is
// older than #4628 (2023).
if (experimentalFeatureSettings.isEnabled(Xp::DynamicDerivations) &&
GET_PROTOCOL_MINOR(handle->daemonVersion) <= 35)
{
auto m = e.msg();
if (m.find("parsing derivation") != std::string::npos &&
m.find("expected string") != std::string::npos &&
m.find("Derive([") != std::string::npos)
throw Error("%s, this might be because the daemon is too old to understand dependencies on dynamic derivations. Check to see if the raw derivation is in the form '%s'", std::move(m), "DrvWithVersion(..)");
}
throw;
}
}
}
RemoteStore::ConnectionHandle RemoteStore::getConnection()
{
return ConnectionHandle(connections->get());
}
void RemoteStore::setOptions()
{
setOptions(*(getConnection().handle));
}
bool RemoteStore::isValidPathUncached(const StorePath & path)
2006-11-30 20:35:50 +02:00
{
auto conn(getConnection());
conn->to << WorkerProto::Op::IsValidPath << printStorePath(path);
conn.processStderr();
return readInt(conn->from);
2006-11-30 20:35:50 +02:00
}
StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
StorePathSet res;
2015-07-17 20:24:28 +03:00
for (auto & i : paths)
if (isValidPath(i)) res.insert(i);
return res;
} else {
conn->to << WorkerProto::Op::QueryValidPaths;
WorkerProto::write(*this, *conn, paths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) {
conn->to << maybeSubstitute;
}
conn.processStderr();
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
}
StorePathSet RemoteStore::queryAllValidPaths()
{
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryAllValidPaths;
conn.processStderr();
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
StorePathSet res;
2015-07-17 20:24:28 +03:00
for (auto & i : paths) {
conn->to << WorkerProto::Op::HasSubstitutes << printStorePath(i);
conn.processStderr();
if (readInt(conn->from)) res.insert(i);
}
return res;
} else {
conn->to << WorkerProto::Op::QuerySubstitutablePaths;
WorkerProto::write(*this, *conn, paths);
conn.processStderr();
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
2006-11-30 20:35:50 +02:00
}
void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, SubstitutablePathInfos & infos)
download-from-binary-cache: parallelise fetching of NAR info files Getting substitute information using the binary cache substituter has non-trivial latency overhead. A package or NixOS system configuration can have hundreds of dependencies, and in the worst case (when the local info cache is empty) we have to do a separate HTTP request for each of these. If the ping time to the server is t, getting N info files will take tN seconds; e.g., with a ping time of 0.1s to nixos.org, sequentially downloading 1000 info files (a typical NixOS config) will take at least 100 seconds. To fix this problem, the binary cache substituter can now perform requests in parallel. This required changing the substituter interface to support a function querySubstitutablePathInfos() that queries multiple paths at the same time, and rewriting queryMissing() to take advantage of parallelism. (Due to local caching, parallelising queryMissing() is sufficient for most use cases, since it's almost always called before building a derivation and thus fills the local info cache.) For example, parallelism speeds up querying all 1056 paths in a particular NixOS system configuration from 116s to 2.6s. It works so well because the eccentricity of the top-level derivation in the dependency graph is only 9. So we only need 10 round-trips (when using an unlimited number of parallel connections) to get everything. Currently we do a maximum of 150 parallel connections to the server. Thus it's important that the binary cache server (e.g. nixos.org) has a high connection limit. Alternatively we could use HTTP pipelining, but WWW::Curl doesn't support it and libcurl has a hard-coded limit of 5 requests per pipeline.
2012-07-07 02:08:20 +03:00
{
if (pathsMap.empty()) return;
auto conn(getConnection());
2012-07-31 00:13:25 +03:00
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
2012-07-31 00:13:25 +03:00
for (auto & i : pathsMap) {
SubstitutablePathInfo info;
conn->to << WorkerProto::Op::QuerySubstitutablePathInfo << printStorePath(i.first);
conn.processStderr();
unsigned int reply = readInt(conn->from);
if (reply == 0) continue;
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
infos.insert_or_assign(i.first, std::move(info));
}
2012-07-31 00:13:25 +03:00
} else {
2012-07-31 00:13:25 +03:00
conn->to << WorkerProto::Op::QuerySubstitutablePathInfos;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 22) {
StorePathSet paths;
for (auto & path : pathsMap)
paths.insert(path.first);
WorkerProto::write(*this, *conn, paths);
} else
WorkerProto::write(*this, *conn, pathsMap);
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
for (size_t n = 0; n < count; n++) {
SubstitutablePathInfo & info(infos[parseStorePath(readString(conn->from))]);
auto deriver = readString(conn->from);
if (deriver != "")
info.deriver = parseStorePath(deriver);
info.references = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
info.downloadSize = readLongLong(conn->from);
info.narSize = readLongLong(conn->from);
}
2012-07-31 00:13:25 +03:00
download-from-binary-cache: parallelise fetching of NAR info files Getting substitute information using the binary cache substituter has non-trivial latency overhead. A package or NixOS system configuration can have hundreds of dependencies, and in the worst case (when the local info cache is empty) we have to do a separate HTTP request for each of these. If the ping time to the server is t, getting N info files will take tN seconds; e.g., with a ping time of 0.1s to nixos.org, sequentially downloading 1000 info files (a typical NixOS config) will take at least 100 seconds. To fix this problem, the binary cache substituter can now perform requests in parallel. This required changing the substituter interface to support a function querySubstitutablePathInfos() that queries multiple paths at the same time, and rewriting queryMissing() to take advantage of parallelism. (Due to local caching, parallelising queryMissing() is sufficient for most use cases, since it's almost always called before building a derivation and thus fills the local info cache.) For example, parallelism speeds up querying all 1056 paths in a particular NixOS system configuration from 116s to 2.6s. It works so well because the eccentricity of the top-level derivation in the dependency graph is only 9. So we only need 10 round-trips (when using an unlimited number of parallel connections) to get everything. Currently we do a maximum of 150 parallel connections to the server. Thus it's important that the binary cache server (e.g. nixos.org) has a high connection limit. Alternatively we could use HTTP pipelining, but WWW::Curl doesn't support it and libcurl has a hard-coded limit of 5 requests per pipeline.
2012-07-07 02:08:20 +03:00
}
}
void RemoteStore::queryPathInfoUncached(const StorePath & path,
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
{
2018-03-27 23:16:01 +03:00
try {
std::shared_ptr<const ValidPathInfo> info;
{
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryPathInfo << printStorePath(path);
try {
conn.processStderr();
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
throw InvalidPath(std::move(e.info()));
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
bool valid; conn->from >> valid;
if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path));
}
info = std::make_shared<ValidPathInfo>(
StorePath{path},
WorkerProto::Serialise<UnkeyedValidPathInfo>::read(*this, *conn));
}
2018-03-27 23:16:01 +03:00
callback(std::move(info));
} catch (...) { callback.rethrow(); }
}
void RemoteStore::queryReferrers(const StorePath & path,
StorePathSet & referrers)
2006-11-30 20:35:50 +02:00
{
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryReferrers << printStorePath(path);
conn.processStderr();
for (auto & i : WorkerProto::Serialise<StorePathSet>::read(*this, *conn))
referrers.insert(i);
2006-11-30 20:35:50 +02:00
}
StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryValidDerivers << printStorePath(path);
conn.processStderr();
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
{
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
return Store::queryDerivationOutputs(path);
}
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryDerivationOutputs << printStorePath(path);
conn.processStderr();
return WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
}
std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivationOutputMap(const StorePath & path, Store * evalStore_)
{
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
if (!evalStore_) {
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryDerivationOutputMap << printStorePath(path);
conn.processStderr();
return WorkerProto::Serialise<std::map<std::string, std::optional<StorePath>>>::read(*this, *conn);
} else {
auto & evalStore = *evalStore_;
auto outputs = evalStore.queryStaticPartialDerivationOutputMap(path);
// union with the first branch overriding the statically-known ones
// when non-`std::nullopt`.
for (auto && [outputName, optPath] : queryPartialDerivationOutputMap(path, nullptr)) {
if (optPath)
outputs.insert_or_assign(std::move(outputName), std::move(optPath));
else
outputs.insert({std::move(outputName), std::nullopt});
}
return outputs;
}
} else {
auto & evalStore = evalStore_ ? *evalStore_ : *this;
// Fallback for old daemon versions.
// For floating-CA derivations (and their co-dependencies) this is an
// under-approximation as it only returns the paths that can be inferred
// from the derivation itself (and not the ones that are known because
// the have been built), but as old stores don't handle floating-CA
// derivations this shouldn't matter
return evalStore.queryStaticPartialDerivationOutputMap(path);
}
}
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::QueryPathFromHashPart << hashPart;
conn.processStderr();
Path path = readString(conn->from);
if (path.empty()) return {};
return parseStorePath(path);
}
2020-09-22 12:40:19 +03:00
ref<const ValidPathInfo> RemoteStore::addCAToStore(
Source & dump,
std::string_view name,
ContentAddressMethod caMethod,
HashAlgorithm hashAlgo,
const StorePathSet & references,
RepairFlag repair)
2020-09-17 20:27:11 +03:00
{
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
2020-09-17 20:27:11 +03:00
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
conn->to
<< WorkerProto::Op::AddToStore
2020-09-17 20:27:11 +03:00
<< name
<< caMethod.renderWithAlgo(hashAlgo);
WorkerProto::write(*this, *conn, references);
2020-09-18 11:06:34 +03:00
conn->to << repair;
2020-09-17 20:27:11 +03:00
// The dump source may invoke the store, so we need to make some room.
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
conn.withFramedSink([&](Sink & sink) {
dump.drainInto(sink);
});
}
2020-09-17 20:27:11 +03:00
return make_ref<ValidPathInfo>(
WorkerProto::Serialise<ValidPathInfo>::read(*this, *conn));
2020-09-17 20:27:11 +03:00
}
else {
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
std::visit(overloaded {
[&](const TextIngestionMethod & thm) -> void {
if (hashAlgo != HashAlgorithm::SHA256)
2023-05-09 20:24:53 +03:00
throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
name, printHashAlgo(hashAlgo));
std::string s = dump.drain();
conn->to << WorkerProto::Op::AddTextToStore << name << s;
WorkerProto::write(*this, *conn, references);
conn.processStderr();
},
[&](const FileIngestionMethod & fim) -> void {
conn->to
<< WorkerProto::Op::AddToStore
<< name
<< ((hashAlgo == HashAlgorithm::SHA256 && fim == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
<< (fim == FileIngestionMethod::Recursive ? 1 : 0)
<< printHashAlgo(hashAlgo);
2020-09-17 20:27:11 +03:00
try {
conn->to.written = 0;
connections->incCapacity();
{
Finally cleanup([&]() { connections->decCapacity(); });
if (fim == FileIngestionMethod::Recursive) {
dump.drainInto(conn->to);
} else {
std::string contents = dump.drain();
dumpString(contents, conn->to);
}
}
2020-09-17 20:27:11 +03:00
conn.processStderr();
} catch (SysError & e) {
/* Daemon closed while we were sending the path. Probably OOM
or I/O error. */
if (e.errNo == EPIPE)
try {
conn.processStderr();
} catch (EndOfFile & e) { }
throw;
}
2020-09-17 20:27:11 +03:00
}
}, caMethod.raw);
auto path = parseStorePath(readString(conn->from));
// Release our connection to prevent a deadlock in queryPathInfo().
conn_.reset();
return queryPathInfo(path);
2020-09-17 20:27:11 +03:00
}
}
2020-09-22 12:40:19 +03:00
StorePath RemoteStore::addToStoreFromDump(
Source & dump,
std::string_view name,
FileSerialisationMethod dumpMethod,
ContentAddressMethod hashMethod,
HashAlgorithm hashAlgo,
const StorePathSet & references,
RepairFlag repair)
{
FileSerialisationMethod fsm;
switch (hashMethod.getFileIngestionMethod()) {
case FileIngestionMethod::Flat:
fsm = FileSerialisationMethod::Flat;
break;
case FileIngestionMethod::Recursive:
fsm = FileSerialisationMethod::Recursive;
break;
case FileIngestionMethod::Git:
// Use NAR; Git is not a serialization method
fsm = FileSerialisationMethod::Recursive;
break;
}
if (fsm != dumpMethod)
unsupported("RemoteStore::addToStoreFromDump doesn't support this `dumpMethod` `hashMethod` combination");
return addCAToStore(dump, name, hashMethod, hashAlgo, references, repair)->path;
}
2020-09-17 20:27:11 +03:00
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs)
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
conn->to << WorkerProto::Op::ImportPaths;
auto source2 = sinkToSource([&](Sink & sink) {
sink << 1 // == path follows
;
copyNAR(source, sink);
sink
<< exportMagic
<< printStorePath(info.path);
WorkerProto::write(*this, *conn, info.references);
sink
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< 0 // == no legacy signature
<< 0 // == no path follows
;
});
conn.processStderr(0, source2.get());
auto importedPaths = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
assert(importedPaths.size() <= 1);
}
else {
conn->to << WorkerProto::Op::AddToStoreNar
<< printStorePath(info.path)
<< (info.deriver ? printStorePath(*info.deriver) : "")
<< info.narHash.to_string(HashFormat::Base16, false);
WorkerProto::write(*this, *conn, info.references);
conn->to << info.registrationTime << info.narSize
2020-06-02 03:37:43 +03:00
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
<< repair << !checkSigs;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
conn.withFramedSink([&](Sink & sink) {
copyNAR(source, sink);
});
} else if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21) {
conn.processStderr(0, &source);
} else {
copyNAR(source, conn->to);
conn.processStderr(0, nullptr);
}
}
}
2022-06-08 15:03:46 +03:00
void RemoteStore::addMultipleToStore(
PathsSource & pathsToCopy,
Activity & act,
RepairFlag repair,
CheckSigsFlag checkSigs)
{
auto source = sinkToSource([&](Sink & sink) {
sink << pathsToCopy.size();
for (auto & [pathInfo, pathSource] : pathsToCopy) {
WorkerProto::Serialise<ValidPathInfo>::write(*this,
WorkerProto::WriteConn {
.to = sink,
.version = 16,
},
pathInfo);
2022-06-08 15:03:46 +03:00
pathSource->drainInto(sink);
}
});
addMultipleToStore(*source, repair, checkSigs);
}
void RemoteStore::addMultipleToStore(
Source & source,
RepairFlag repair,
CheckSigsFlag checkSigs)
{
if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
auto conn(getConnection());
conn->to
<< WorkerProto::Op::AddMultipleToStore
<< repair
<< !checkSigs;
conn.withFramedSink([&](Sink & sink) {
source.drainInto(sink);
});
} else
Store::addMultipleToStore(source, repair, checkSigs);
}
void RemoteStore::registerDrvOutput(const Realisation & info)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::RegisterDrvOutput;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
conn->to << info.id.to_string();
conn->to << std::string(info.outPath.to_string());
} else {
WorkerProto::write(*this, *conn, info);
}
conn.processStderr();
}
void RemoteStore::queryRealisationUncached(const DrvOutput & id,
Callback<std::shared_ptr<const Realisation>> callback) noexcept
{
try {
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
return callback(nullptr);
}
conn->to << WorkerProto::Op::QueryRealisation;
conn->to << id.to_string();
conn.processStderr();
auto real = [&]() -> std::shared_ptr<const Realisation> {
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
auto outPaths = WorkerProto::Serialise<std::set<StorePath>>::read(
*this, *conn);
if (outPaths.empty())
return nullptr;
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
} else {
auto realisations = WorkerProto::Serialise<std::set<Realisation>>::read(
*this, *conn);
if (realisations.empty())
return nullptr;
return std::make_shared<const Realisation>(*realisations.begin());
}
}();
callback(std::shared_ptr<const Realisation>(real));
} catch (...) { return callback.rethrow(); }
}
void RemoteStore::copyDrvsFromEvalStore(
const std::vector<DerivedPath> & paths,
std::shared_ptr<Store> evalStore)
2006-11-30 20:35:50 +02:00
{
if (evalStore && evalStore.get() != this) {
/* The remote doesn't have a way to access evalStore, so copy
the .drvs. */
RealisedPath::Set drvPaths2;
for (const auto & i : paths) {
std::visit(overloaded {
[&](const DerivedPath::Opaque & bp) {
// Do nothing, path is hopefully there already
},
[&](const DerivedPath::Built & bp) {
drvPaths2.insert(bp.drvPath->getBaseStorePath());
},
}, i.raw());
}
copyClosure(*evalStore, *this, drvPaths2);
}
}
void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
{
copyDrvsFromEvalStore(drvPaths, evalStore);
auto conn(getConnection());
conn->to << WorkerProto::Op::BuildPaths;
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
WorkerProto::write(*this, *conn, drvPaths);
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
conn->to << buildMode;
else
/* Old daemons did not take a 'buildMode' parameter, so we
need to validate it here on the client side. */
if (buildMode != bmNormal)
throw Error("repairing or checking is not supported when building through the Nix daemon");
conn.processStderr();
readInt(conn->from);
2006-11-30 20:35:50 +02:00
}
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
const std::vector<DerivedPath> & paths,
BuildMode buildMode,
std::shared_ptr<Store> evalStore)
{
copyDrvsFromEvalStore(paths, evalStore);
std::optional<ConnectionHandle> conn_(getConnection());
auto & conn = *conn_;
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
conn->to << WorkerProto::Op::BuildPathsWithResults;
WorkerProto::write(*this, *conn, paths);
conn->to << buildMode;
conn.processStderr();
return WorkerProto::Serialise<std::vector<KeyedBuildResult>>::read(*this, *conn);
} else {
// Avoid deadlock.
conn_.reset();
// Note: this throws an exception if a build/substitution
// fails, but meh.
buildPaths(paths, buildMode, evalStore);
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
std::vector<KeyedBuildResult> results;
for (auto & path : paths) {
std::visit(
overloaded {
[&](const DerivedPath::Opaque & bo) {
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
results.push_back(KeyedBuildResult {
{
.status = BuildResult::Substituted,
},
/* .path = */ bo,
2022-03-09 13:25:35 +02:00
});
},
[&](const DerivedPath::Built & bfd) {
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I realized since derivation goals' wanted outputs can "grow" due to overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called by `Worker::makeDerivationGoalCommon`), the previous bug fix had an unfortunate side effect of causing more pointless rebuilds. In paticular, we have this situation: 1. Goal made from `DerivedPath::Built { foo, {a} }`. 2. Goal gives on on substituting, starts building. 3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just modified original goal. 4. Though the goal had gotten as far as building, so all outputs were going to be produced, `addWantedOutputs` no longer knows that and so the goal is flagged to be restarted. This might sound far-fetched with input-addressed drvs, where we usually basically have all our goals "planned out" before we start doing anything, but with CA derivation goals and especially RFC 92, where *drv resolution* means goals are created after some building is completed, it is more likely to happen. So the first thing to do was restore the clearing of `wantedOutputs` we used to do, and then filter the outputs in `buildPathsWithResults` to only get the ones we care about. But fix also has its own side effect in that the `DerivedPath` in the `BuildResult` in `DerivationGoal` cannot be trusted; it is merely the *first* `DerivedPath` for which this goal was originally created. To remedy this, I made `BuildResult` be like it was before, and instead made `KeyedBuildResult` be a subclass wit the path. Only `buildPathsWithResults` returns `KeyedBuildResult`s, everything else just becomes like it was before, where the "key" is unambiguous from context. I think separating the "primary key" field(s) from the other fields is good practical in general anyways. (I would like to do the same thing for `ValidPathInfo`.) Among other things, it allows constructions like `std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and just precludes the possibility of those duplicate keys being out of sync. We might leverage the above someday to overload `buildPathsWithResults` to take a *set* of return a *map* per the above. ----- Unfortunately, we need to avoid C++20 strictness on designated initializers. (BTW https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html this offers some new syntax for this use-case. Hopefully this will be adopted and we can eventually use it.) No having that yet, maybe it would be better to not make `KeyedBuildResult` a subclass to just avoid this. Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
KeyedBuildResult res {
{
.status = BuildResult::Built
},
/* .path = */ bfd,
2022-03-09 13:25:35 +02:00
};
OutputPathMap outputs;
auto drvPath = resolveDerivedPath(*evalStore, *bfd.drvPath);
auto drv = evalStore->readDerivation(drvPath);
const auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
auto built = resolveDerivedPath(*this, bfd, &*evalStore);
for (auto & [output, outputPath] : built) {
auto outputHash = get(outputHashes, output);
if (!outputHash)
throw Error(
"the derivation '%s' doesn't have an output named '%s'",
printStorePath(drvPath), output);
auto outputId = DrvOutput{ *outputHash, output };
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
auto realisation =
queryRealisation(outputId);
if (!realisation)
throw MissingRealisation(outputId);
res.builtOutputs.emplace(output, *realisation);
} else {
res.builtOutputs.emplace(
output,
Realisation {
.id = outputId,
.outPath = outputPath,
});
}
}
results.push_back(res);
}
},
path.raw());
}
return results;
}
}
2006-11-30 20:35:50 +02:00
BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
BuildMode buildMode)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::BuildDerivation << printStorePath(drvPath);
writeDerivation(conn->to, *this, drv);
conn->to << buildMode;
conn.processStderr();
return WorkerProto::Serialise<BuildResult>::read(*this, *conn);
}
void RemoteStore::ensurePath(const StorePath & path)
2006-11-30 20:35:50 +02:00
{
auto conn(getConnection());
conn->to << WorkerProto::Op::EnsurePath << printStorePath(path);
conn.processStderr();
readInt(conn->from);
2006-11-30 20:35:50 +02:00
}
void RemoteStore::addTempRoot(const StorePath & path)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::AddTempRoot << printStorePath(path);
conn.processStderr();
readInt(conn->from);
}
Roots RemoteStore::findRoots(bool censor)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::FindRoots;
conn.processStderr();
size_t count = readNum<size_t>(conn->from);
Roots result;
while (count--) {
Path link = readString(conn->from);
auto target = parseStorePath(readString(conn->from));
result[std::move(target)].emplace(link);
}
return result;
}
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
{
auto conn(getConnection());
2012-07-31 00:13:25 +03:00
conn->to
<< WorkerProto::Op::CollectGarbage << options.action;
WorkerProto::write(*this, *conn, options.pathsToDelete);
conn->to << options.ignoreLiveness
<< options.maxFreed
/* removed options */
<< 0 << 0 << 0;
2012-07-31 00:13:25 +03:00
conn.processStderr();
2012-07-31 00:13:25 +03:00
results.paths = readStrings<PathSet>(conn->from);
results.bytesFreed = readLongLong(conn->from);
readLongLong(conn->from); // obsolete
{
auto state_(Store::state.lock());
state_->pathInfoCache.clear();
}
}
void RemoteStore::optimiseStore()
{
auto conn(getConnection());
conn->to << WorkerProto::Op::OptimiseStore;
conn.processStderr();
readInt(conn->from);
}
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::VerifyStore << checkContents << repair;
conn.processStderr();
return readInt(conn->from);
}
2016-02-23 17:40:16 +02:00
void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::AddSignatures << printStorePath(storePath) << sigs;
conn.processStderr();
readInt(conn->from);
}
2021-04-05 16:48:18 +03:00
void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
2020-07-30 14:10:49 +03:00
uint64_t & downloadSize, uint64_t & narSize)
{
{
auto conn(getConnection());
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
// Don't hold the connection handle in the fallback case
// to prevent a deadlock.
goto fallback;
conn->to << WorkerProto::Op::QueryMissing;
WorkerProto::write(*this, *conn, targets);
conn.processStderr();
willBuild = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
willSubstitute = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
unknown = WorkerProto::Serialise<StorePathSet>::read(*this, *conn);
conn->from >> downloadSize >> narSize;
return;
}
fallback:
return Store::queryMissing(targets, willBuild, willSubstitute,
unknown, downloadSize, narSize);
}
void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log)
{
auto conn(getConnection());
conn->to << WorkerProto::Op::AddBuildLog << drvPath.to_string();
StringSource source(log);
conn.withFramedSink([&](Sink & sink) {
source.drainInto(sink);
});
readInt(conn->from);
}
std::optional<std::string> RemoteStore::getVersion()
{
auto conn(getConnection());
return conn->daemonNixVersion;
}
void RemoteStore::connect()
{
auto conn(getConnection());
}
unsigned int RemoteStore::getProtocol()
{
auto conn(connections->get());
return conn->daemonVersion;
}
std::optional<TrustedFlag> RemoteStore::isTrustedClient()
{
auto conn(getConnection());
return conn->remoteTrustsUs;
}
void RemoteStore::flushBadConnections()
{
connections->flushBad();
}
2016-02-23 17:40:16 +02:00
RemoteStore::Connection::~Connection()
{
try {
to.flush();
} catch (...) {
ignoreException();
}
}
void RemoteStore::narFromPath(const StorePath & path, Sink & sink)
{
auto conn(connections->get());
conn->to << WorkerProto::Op::NarFromPath << printStorePath(path);
conn->processStderr();
copyNAR(conn->from, sink);
}
2023-11-01 18:09:28 +02:00
ref<SourceAccessor> RemoteStore::getFSAccessor(bool requireValidPath)
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
2016-02-23 17:40:16 +02:00
static Logger::Fields readFields(Source & from)
{
Logger::Fields fields;
size_t size = readInt(from);
for (size_t n = 0; n < size; n++) {
auto type = (decltype(Logger::Field::type)) readInt(from);
if (type == Logger::Field::tInt)
fields.push_back(readNum<uint64_t>(from));
else if (type == Logger::Field::tString)
fields.push_back(readString(from));
else
throw Error("got unsupported field type %x from Nix daemon", (int) type);
}
return fields;
}
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source, bool flush)
{
if (flush)
to.flush();
while (true) {
auto msg = readNum<uint64_t>(from);
if (msg == STDERR_WRITE) {
auto s = readString(from);
2007-02-21 18:34:00 +02:00
if (!sink) throw Error("no sink");
2016-05-04 16:46:25 +03:00
(*sink)(s);
2007-02-21 18:34:00 +02:00
}
else if (msg == STDERR_READ) {
if (!source) throw Error("no source");
size_t len = readNum<size_t>(from);
auto buf = std::make_unique<char[]>(len);
2020-12-02 15:00:43 +02:00
writeString({(const char *) buf.get(), source->read(buf.get(), len)}, to);
to.flush();
}
else if (msg == STDERR_ERROR) {
if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) {
return std::make_exception_ptr(readError(from));
} else {
auto error = readString(from);
unsigned int status = readInt(from);
return std::make_exception_ptr(Error(status, error));
}
}
else if (msg == STDERR_NEXT)
printError(chomp(readString(from)));
else if (msg == STDERR_START_ACTIVITY) {
auto act = readNum<ActivityId>(from);
auto lvl = (Verbosity) readInt(from);
auto type = (ActivityType) readInt(from);
auto s = readString(from);
auto fields = readFields(from);
auto parent = readNum<ActivityId>(from);
logger->startActivity(act, lvl, type, s, fields, parent);
}
else if (msg == STDERR_STOP_ACTIVITY) {
auto act = readNum<ActivityId>(from);
logger->stopActivity(act);
}
else if (msg == STDERR_RESULT) {
auto act = readNum<ActivityId>(from);
auto type = (ResultType) readInt(from);
auto fields = readFields(from);
logger->result(act, type, fields);
}
else if (msg == STDERR_LAST)
break;
else
throw Error("got unknown message type %x from Nix daemon", msg);
}
return nullptr;
}
void RemoteStore::ConnectionHandle::withFramedSink(std::function<void(Sink & sink)> fun)
{
(*this)->to.flush();
std::exception_ptr ex;
/* Handle log messages / exceptions from the remote on a separate
thread. */
std::thread stderrThread([&]()
{
try {
ReceiveInterrupts receiveInterrupts;
processStderr(nullptr, nullptr, false);
} catch (...) {
ex = std::current_exception();
}
});
Finally joinStderrThread([&]()
{
if (stderrThread.joinable()) {
stderrThread.join();
if (ex) {
try {
std::rethrow_exception(ex);
} catch (...) {
ignoreException();
}
}
}
});
{
2020-09-17 23:01:35 +03:00
FramedSink sink((*this)->to, ex);
fun(sink);
sink.flush();
}
stderrThread.join();
if (ex)
std::rethrow_exception(ex);
}
2006-11-30 20:35:50 +02:00
}