2006-11-30 21:54:43 +02:00
|
|
|
#include "serialise.hh"
|
|
|
|
#include "util.hh"
|
2021-03-02 05:50:41 +02:00
|
|
|
#include "path-with-outputs.hh"
|
2022-03-01 20:31:36 +02:00
|
|
|
#include "gc-store.hh"
|
2020-09-07 12:26:09 +03:00
|
|
|
#include "remote-fs-accessor.hh"
|
2022-03-01 21:43:07 +02:00
|
|
|
#include "build-result.hh"
|
2006-11-30 20:35:50 +02:00
|
|
|
#include "remote-store.hh"
|
2006-11-30 22:13:59 +02:00
|
|
|
#include "worker-protocol.hh"
|
2006-11-30 22:45:20 +02:00
|
|
|
#include "archive.hh"
|
2006-12-04 15:09:16 +02:00
|
|
|
#include "globals.hh"
|
2015-09-03 13:56:59 +03:00
|
|
|
#include "derivations.hh"
|
2016-02-23 16:00:59 +02:00
|
|
|
#include "pool.hh"
|
2018-04-16 12:14:39 +03:00
|
|
|
#include "finally.hh"
|
2020-07-02 18:04:31 +03:00
|
|
|
#include "logging.hh"
|
2020-09-21 19:40:11 +03:00
|
|
|
#include "callback.hh"
|
2020-11-02 14:57:58 +02:00
|
|
|
#include "filetransfer.hh"
|
2021-01-26 11:48:41 +02:00
|
|
|
#include <nlohmann/json.hpp>
|
2006-11-30 20:35:50 +02:00
|
|
|
|
|
|
|
namespace nix {
|
|
|
|
|
2016-09-02 21:15:04 +03:00
|
|
|
/* TODO: Separate these store impls into different files, give them better names */
|
2017-03-03 20:05:50 +02:00
|
|
|
RemoteStore::RemoteStore(const Params & params)
|
2020-12-20 17:33:12 +02:00
|
|
|
: RemoteStoreConfig(params)
|
|
|
|
, Store(params)
|
2016-06-01 15:49:12 +03:00
|
|
|
, connections(make_ref<Pool<Connection>>(
|
2017-04-13 16:55:38 +03:00
|
|
|
std::max(1, (int) maxConnections),
|
2020-08-19 22:34:47 +03:00
|
|
|
[this]() {
|
|
|
|
auto conn = openConnectionWrapper();
|
|
|
|
try {
|
|
|
|
initConnection(*conn);
|
|
|
|
} catch (...) {
|
|
|
|
failed = true;
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
return conn;
|
|
|
|
},
|
2017-09-14 19:10:38 +03:00
|
|
|
[this](const ref<Connection> & r) {
|
|
|
|
return
|
|
|
|
r->to.good()
|
|
|
|
&& r->from.good()
|
|
|
|
&& std::chrono::duration_cast<std::chrono::seconds>(
|
|
|
|
std::chrono::steady_clock::now() - r->startTime).count() < maxConnectionAge;
|
|
|
|
}
|
2016-02-24 12:39:56 +02:00
|
|
|
))
|
2006-12-04 15:28:14 +02:00
|
|
|
{
|
2008-12-11 16:30:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-03 20:35:34 +02:00
|
|
|
ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
|
|
|
|
{
|
|
|
|
if (failed)
|
2017-07-30 14:27:57 +03:00
|
|
|
throw Error("opening a connection to remote store '%s' previously failed", getUri());
|
2017-03-03 20:35:34 +02:00
|
|
|
try {
|
|
|
|
return openConnection();
|
|
|
|
} catch (...) {
|
|
|
|
failed = true;
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-02 21:15:04 +03:00
|
|
|
void RemoteStore::initConnection(Connection & conn)
|
|
|
|
{
|
2016-02-23 16:00:59 +02:00
|
|
|
/* Send the magic greeting, check for the reply. */
|
|
|
|
try {
|
2016-09-02 21:15:04 +03:00
|
|
|
conn.to << WORKER_MAGIC_1;
|
|
|
|
conn.to.flush();
|
2021-09-23 18:48:52 +03:00
|
|
|
StringSink saved;
|
|
|
|
try {
|
|
|
|
TeeSource tee(conn.from, saved);
|
|
|
|
unsigned int magic = readInt(tee);
|
|
|
|
if (magic != WORKER_MAGIC_2)
|
|
|
|
throw Error("protocol mismatch");
|
|
|
|
} catch (SerialisationError & e) {
|
2021-09-23 19:01:04 +03:00
|
|
|
/* In case the other side is waiting for our input, close
|
|
|
|
it. */
|
|
|
|
conn.closeWrite();
|
2021-09-23 18:48:52 +03:00
|
|
|
auto msg = conn.from.drain();
|
2022-01-17 23:20:05 +02:00
|
|
|
throw Error("protocol mismatch, got '%s'", chomp(saved.s + msg));
|
2021-09-23 18:48:52 +03:00
|
|
|
}
|
2016-02-23 16:00:59 +02:00
|
|
|
|
2017-03-01 14:52:54 +02:00
|
|
|
conn.from >> conn.daemonVersion;
|
2016-09-02 21:15:04 +03:00
|
|
|
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
|
2016-02-23 16:00:59 +02:00
|
|
|
throw Error("Nix daemon protocol version not supported");
|
2016-09-02 21:15:04 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
|
2016-07-27 16:03:20 +03:00
|
|
|
throw Error("the Nix daemon version is too old");
|
2016-09-02 21:15:04 +03:00
|
|
|
conn.to << PROTOCOL_VERSION;
|
2016-02-23 16:00:59 +02:00
|
|
|
|
2016-09-02 21:15:04 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
|
2021-12-22 16:49:51 +02:00
|
|
|
// Obsolete CPU affinity.
|
|
|
|
conn.to << 0;
|
2016-02-23 16:00:59 +02:00
|
|
|
}
|
|
|
|
|
2016-09-02 21:15:04 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
|
2022-01-25 22:14:27 +02:00
|
|
|
conn.to << false; // obsolete reserveSpace
|
|
|
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 33) {
|
|
|
|
conn.to.flush();
|
|
|
|
conn.daemonNixVersion = readString(conn.from);
|
|
|
|
}
|
2016-02-23 16:00:59 +02:00
|
|
|
|
2022-12-26 22:21:08 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) {
|
2023-05-18 05:04:59 +03:00
|
|
|
conn.remoteTrustsUs = WorkerProto<std::optional<TrustedFlag>>::read(*this, conn.from);
|
2022-12-26 22:21:08 +02:00
|
|
|
} else {
|
|
|
|
// We don't know the answer; protocol to old.
|
|
|
|
conn.remoteTrustsUs = std::nullopt;
|
|
|
|
}
|
|
|
|
|
2018-10-17 00:36:15 +03:00
|
|
|
auto ex = conn.processStderr();
|
|
|
|
if (ex) std::rethrow_exception(ex);
|
2016-02-23 16:00:59 +02:00
|
|
|
}
|
|
|
|
catch (Error & e) {
|
2017-07-30 14:27:57 +03:00
|
|
|
throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what());
|
2016-02-23 16:00:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
setOptions(conn);
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-09-02 21:15:04 +03:00
|
|
|
void RemoteStore::setOptions(Connection & conn)
|
2007-09-18 12:11:20 +03:00
|
|
|
{
|
2016-09-02 21:15:04 +03:00
|
|
|
conn.to << wopSetOptions
|
2015-07-20 02:16:16 +03:00
|
|
|
<< settings.keepFailed
|
|
|
|
<< settings.keepGoing
|
|
|
|
<< settings.tryFallback
|
2020-06-19 01:09:22 +03:00
|
|
|
<< verbosity
|
2015-07-20 02:16:16 +03:00
|
|
|
<< settings.maxBuildJobs
|
2016-07-27 16:03:20 +03:00
|
|
|
<< settings.maxSilentTime
|
2017-10-24 12:00:16 +03:00
|
|
|
<< true
|
2020-06-19 01:09:22 +03:00
|
|
|
<< (settings.verboseBuild ? lvlError : lvlVomit)
|
2016-07-27 16:03:20 +03:00
|
|
|
<< 0 // obsolete log type
|
|
|
|
<< 0 /* obsolete print build trace */
|
|
|
|
<< settings.buildCores
|
|
|
|
<< settings.useSubstitutes;
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2016-09-02 21:15:04 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
|
2018-03-27 19:41:31 +03:00
|
|
|
std::map<std::string, Config::SettingInfo> overrides;
|
2020-11-02 14:57:58 +02:00
|
|
|
settings.getSettings(overrides, true); // libstore settings
|
|
|
|
fileTransferSettings.getSettings(overrides, true);
|
2019-08-28 17:29:44 +03:00
|
|
|
overrides.erase(settings.keepFailed.name);
|
|
|
|
overrides.erase(settings.keepGoing.name);
|
|
|
|
overrides.erase(settings.tryFallback.name);
|
|
|
|
overrides.erase(settings.maxBuildJobs.name);
|
|
|
|
overrides.erase(settings.maxSilentTime.name);
|
|
|
|
overrides.erase(settings.buildCores.name);
|
|
|
|
overrides.erase(settings.useSubstitutes.name);
|
2020-07-02 18:04:31 +03:00
|
|
|
overrides.erase(loggerSettings.showTrace.name);
|
2023-03-17 16:33:48 +02:00
|
|
|
overrides.erase(experimentalFeatureSettings.experimentalFeatures.name);
|
2023-02-02 03:05:56 +02:00
|
|
|
overrides.erase(settings.pluginFiles.name);
|
2016-09-02 21:15:04 +03:00
|
|
|
conn.to << overrides.size();
|
2015-07-20 02:16:16 +03:00
|
|
|
for (auto & i : overrides)
|
2018-03-27 19:41:31 +03:00
|
|
|
conn.to << i.first << i.second.value;
|
2012-08-01 01:19:44 +03:00
|
|
|
}
|
|
|
|
|
2018-10-17 00:36:15 +03:00
|
|
|
auto ex = conn.processStderr();
|
|
|
|
if (ex) std::rethrow_exception(ex);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks
|
|
|
|
the connection as bad (causing it to be closed) if a non-daemon
|
|
|
|
exception is thrown before the handle is closed. Such an exception
|
|
|
|
causes a deviation from the expected protocol and therefore a
|
|
|
|
desynchronization between the client and daemon. */
|
|
|
|
struct ConnectionHandle
|
|
|
|
{
|
|
|
|
Pool<RemoteStore::Connection>::Handle handle;
|
|
|
|
bool daemonException = false;
|
|
|
|
|
|
|
|
ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle)
|
|
|
|
: handle(std::move(handle))
|
|
|
|
{ }
|
|
|
|
|
|
|
|
ConnectionHandle(ConnectionHandle && h)
|
|
|
|
: handle(std::move(h.handle))
|
|
|
|
{ }
|
|
|
|
|
|
|
|
~ConnectionHandle()
|
|
|
|
{
|
2020-06-17 05:15:47 +03:00
|
|
|
if (!daemonException && std::uncaught_exceptions()) {
|
2018-10-17 00:36:15 +03:00
|
|
|
handle.markBad();
|
|
|
|
debug("closing daemon connection because of an exception");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RemoteStore::Connection * operator -> () { return &*handle; }
|
|
|
|
|
2020-08-27 15:48:08 +03:00
|
|
|
void processStderr(Sink * sink = 0, Source * source = 0, bool flush = true)
|
2018-10-17 00:36:15 +03:00
|
|
|
{
|
2020-08-27 15:48:08 +03:00
|
|
|
auto ex = handle->processStderr(sink, source, flush);
|
2018-10-17 00:36:15 +03:00
|
|
|
if (ex) {
|
|
|
|
daemonException = true;
|
|
|
|
std::rethrow_exception(ex);
|
|
|
|
}
|
|
|
|
}
|
2020-09-17 18:36:16 +03:00
|
|
|
|
|
|
|
void withFramedSink(std::function<void(Sink & sink)> fun);
|
2018-10-17 00:36:15 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
ConnectionHandle RemoteStore::getConnection()
|
|
|
|
{
|
|
|
|
return ConnectionHandle(connections->get());
|
2007-09-18 12:11:20 +03:00
|
|
|
}
|
|
|
|
|
2021-11-05 17:17:49 +02:00
|
|
|
void RemoteStore::setOptions()
|
|
|
|
{
|
|
|
|
setOptions(*(getConnection().handle));
|
|
|
|
}
|
2007-09-18 12:11:20 +03:00
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
bool RemoteStore::isValidPathUncached(const StorePath & path)
|
2006-11-30 20:35:50 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopIsValidPath << printStorePath(path);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2017-03-01 14:52:54 +02:00
|
|
|
return readInt(conn->from);
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute)
|
2012-07-11 18:08:47 +03:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet res;
|
2015-07-17 20:24:28 +03:00
|
|
|
for (auto & i : paths)
|
2020-06-16 23:20:18 +03:00
|
|
|
if (isValidPath(i)) res.insert(i);
|
2012-07-11 18:08:47 +03:00
|
|
|
return res;
|
|
|
|
} else {
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQueryValidPaths;
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, paths);
|
2020-10-21 22:31:19 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) {
|
|
|
|
conn->to << (settings.buildersUseSubstitutes ? 1 : 0);
|
|
|
|
}
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<StorePathSet>::read(*this, conn->from);
|
2012-07-11 18:08:47 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet RemoteStore::queryAllValidPaths()
|
2008-01-29 20:17:36 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopQueryAllValidPaths;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<StorePathSet>::read(*this, conn->from);
|
2008-01-29 20:17:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet RemoteStore::querySubstitutablePaths(const StorePathSet & paths)
|
2006-12-01 00:43:55 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet res;
|
2015-07-17 20:24:28 +03:00
|
|
|
for (auto & i : paths) {
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopHasSubstitutes << printStorePath(i);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2020-06-16 23:20:18 +03:00
|
|
|
if (readInt(conn->from)) res.insert(i);
|
2012-07-12 00:52:18 +03:00
|
|
|
}
|
|
|
|
return res;
|
|
|
|
} else {
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQuerySubstitutablePaths;
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, paths);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<StorePathSet>::read(*this, conn->from);
|
2012-07-12 00:52:18 +03:00
|
|
|
}
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-06-17 22:03:05 +03:00
|
|
|
void RemoteStore::querySubstitutablePathInfos(const StorePathCAMap & pathsMap, SubstitutablePathInfos & infos)
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 02:08:20 +03:00
|
|
|
{
|
2020-06-17 22:03:05 +03:00
|
|
|
if (pathsMap.empty()) return;
|
2012-07-11 17:43:24 +03:00
|
|
|
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2016-02-23 16:00:59 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2020-06-17 22:03:05 +03:00
|
|
|
for (auto & i : pathsMap) {
|
2012-07-11 17:43:24 +03:00
|
|
|
SubstitutablePathInfo info;
|
2020-06-17 22:03:05 +03:00
|
|
|
conn->to << wopQuerySubstitutablePathInfo << printStorePath(i.first);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
unsigned int reply = readInt(conn->from);
|
2012-07-11 17:43:24 +03:00
|
|
|
if (reply == 0) continue;
|
2019-12-05 20:11:09 +02:00
|
|
|
auto deriver = readString(conn->from);
|
|
|
|
if (deriver != "")
|
|
|
|
info.deriver = parseStorePath(deriver);
|
2023-05-18 05:04:59 +03:00
|
|
|
info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
|
2016-02-23 16:00:59 +02:00
|
|
|
info.downloadSize = readLongLong(conn->from);
|
2016-07-27 16:03:20 +03:00
|
|
|
info.narSize = readLongLong(conn->from);
|
2020-06-17 22:03:05 +03:00
|
|
|
infos.insert_or_assign(i.first, std::move(info));
|
2012-07-11 17:43:24 +03:00
|
|
|
}
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2012-07-11 17:43:24 +03:00
|
|
|
} else {
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQuerySubstitutablePathInfos;
|
2020-06-20 01:06:19 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 22) {
|
|
|
|
StorePathSet paths;
|
|
|
|
for (auto & path : pathsMap)
|
|
|
|
paths.insert(path.first);
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, paths);
|
2020-06-20 01:06:19 +03:00
|
|
|
} else
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, pathsMap);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2017-03-01 14:52:54 +02:00
|
|
|
size_t count = readNum<size_t>(conn->from);
|
|
|
|
for (size_t n = 0; n < count; n++) {
|
2023-01-14 23:38:43 +02:00
|
|
|
SubstitutablePathInfo & info(infos[parseStorePath(readString(conn->from))]);
|
2019-12-05 20:11:09 +02:00
|
|
|
auto deriver = readString(conn->from);
|
|
|
|
if (deriver != "")
|
|
|
|
info.deriver = parseStorePath(deriver);
|
2023-05-18 05:04:59 +03:00
|
|
|
info.references = WorkerProto<StorePathSet>::read(*this, conn->from);
|
2016-02-23 16:00:59 +02:00
|
|
|
info.downloadSize = readLongLong(conn->from);
|
|
|
|
info.narSize = readLongLong(conn->from);
|
2012-07-11 17:43:24 +03:00
|
|
|
}
|
2012-07-31 00:13:25 +03:00
|
|
|
|
download-from-binary-cache: parallelise fetching of NAR info files
Getting substitute information using the binary cache substituter has
non-trivial latency overhead. A package or NixOS system configuration
can have hundreds of dependencies, and in the worst case (when the
local info cache is empty) we have to do a separate HTTP request for
each of these. If the ping time to the server is t, getting N info
files will take tN seconds; e.g., with a ping time of 0.1s to
nixos.org, sequentially downloading 1000 info files (a typical NixOS
config) will take at least 100 seconds.
To fix this problem, the binary cache substituter can now perform
requests in parallel. This required changing the substituter
interface to support a function querySubstitutablePathInfos() that
queries multiple paths at the same time, and rewriting queryMissing()
to take advantage of parallelism. (Due to local caching,
parallelising queryMissing() is sufficient for most use cases, since
it's almost always called before building a derivation and thus fills
the local info cache.)
For example, parallelism speeds up querying all 1056 paths in a
particular NixOS system configuration from 116s to 2.6s. It works so
well because the eccentricity of the top-level derivation in the
dependency graph is only 9. So we only need 10 round-trips (when
using an unlimited number of parallel connections) to get everything.
Currently we do a maximum of 150 parallel connections to the server.
Thus it's important that the binary cache server (e.g. nixos.org) has
a high connection limit. Alternatively we could use HTTP pipelining,
but WWW::Curl doesn't support it and libcurl has a hard-coded limit of
5 requests per pipeline.
2012-07-07 02:08:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
void RemoteStore::queryPathInfoUncached(const StorePath & path,
|
2018-09-25 19:54:16 +03:00
|
|
|
Callback<std::shared_ptr<const ValidPathInfo>> callback) noexcept
|
2010-11-16 19:11:46 +02:00
|
|
|
{
|
2018-03-27 23:16:01 +03:00
|
|
|
try {
|
2020-09-18 12:22:13 +03:00
|
|
|
std::shared_ptr<const ValidPathInfo> info;
|
2018-04-09 22:26:16 +03:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQueryPathInfo << printStorePath(path);
|
2018-04-09 22:26:16 +03:00
|
|
|
try {
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2018-04-09 22:26:16 +03:00
|
|
|
} catch (Error & e) {
|
|
|
|
// Ugly backwards compatibility hack.
|
|
|
|
if (e.msg().find("is not valid") != std::string::npos)
|
2022-12-07 13:58:58 +02:00
|
|
|
throw InvalidPath(std::move(e.info()));
|
2018-04-09 22:26:16 +03:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
|
|
|
|
bool valid; conn->from >> valid;
|
2019-12-05 20:11:09 +02:00
|
|
|
if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path));
|
2018-04-09 22:26:16 +03:00
|
|
|
}
|
2021-07-26 14:31:09 +03:00
|
|
|
info = std::make_shared<ValidPathInfo>(
|
|
|
|
ValidPathInfo::read(conn->from, *this, GET_PROTOCOL_MINOR(conn->daemonVersion), StorePath{path}));
|
2016-09-16 19:54:14 +03:00
|
|
|
}
|
2018-03-27 23:16:01 +03:00
|
|
|
callback(std::move(info));
|
|
|
|
} catch (...) { callback.rethrow(); }
|
2010-11-16 19:11:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
void RemoteStore::queryReferrers(const StorePath & path,
|
|
|
|
StorePathSet & referrers)
|
2006-11-30 20:35:50 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQueryReferrers << printStorePath(path);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
for (auto & i : WorkerProto<StorePathSet>::read(*this, conn->from))
|
2020-06-16 23:20:18 +03:00
|
|
|
referrers.insert(i);
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet RemoteStore::queryValidDerivers(const StorePath & path)
|
2012-12-20 19:41:44 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQueryValidDerivers << printStorePath(path);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<StorePathSet>::read(*this, conn->from);
|
2012-12-20 19:41:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet RemoteStore::queryDerivationOutputs(const StorePath & path)
|
2010-02-22 14:44:36 +02:00
|
|
|
{
|
2020-11-16 09:35:50 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
|
2020-06-10 12:20:52 +03:00
|
|
|
return Store::queryDerivationOutputs(path);
|
|
|
|
}
|
2020-11-16 09:35:50 +02:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQueryDerivationOutputs << printStorePath(path);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<StorePathSet>::read(*this, conn->from);
|
2010-02-22 14:44:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-08-20 17:12:51 +03:00
|
|
|
std::map<std::string, std::optional<StorePath>> RemoteStore::queryPartialDerivationOutputMap(const StorePath & path)
|
2020-06-10 12:20:52 +03:00
|
|
|
{
|
2020-09-07 12:26:09 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(getProtocol()) >= 0x16) {
|
|
|
|
auto conn(getConnection());
|
|
|
|
conn->to << wopQueryDerivationOutputMap << printStorePath(path);
|
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<std::map<std::string, std::optional<StorePath>>>::read(*this, conn->from);
|
2020-09-07 12:26:09 +03:00
|
|
|
} else {
|
|
|
|
// Fallback for old daemon versions.
|
|
|
|
// For floating-CA derivations (and their co-dependencies) this is an
|
|
|
|
// under-approximation as it only returns the paths that can be inferred
|
|
|
|
// from the derivation itself (and not the ones that are known because
|
|
|
|
// the have been built), but as old stores don't handle floating-CA
|
|
|
|
// derivations this shouldn't matter
|
|
|
|
auto derivation = readDerivation(path);
|
|
|
|
auto outputsWithOptPaths = derivation.outputsAndOptPaths(*this);
|
|
|
|
std::map<std::string, std::optional<StorePath>> ret;
|
|
|
|
for (auto & [outputName, outputAndPath] : outputsWithOptPaths) {
|
|
|
|
ret.emplace(outputName, outputAndPath.second);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2020-06-10 12:20:52 +03:00
|
|
|
}
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
std::optional<StorePath> RemoteStore::queryPathFromHashPart(const std::string & hashPart)
|
2012-07-18 01:55:39 +03:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopQueryPathFromHashPart << hashPart;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
Path path = readString(conn->from);
|
2019-12-05 20:11:09 +02:00
|
|
|
if (path.empty()) return {};
|
|
|
|
return parseStorePath(path);
|
2012-07-18 01:55:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-09-22 12:40:19 +03:00
|
|
|
ref<const ValidPathInfo> RemoteStore::addCAToStore(
|
|
|
|
Source & dump,
|
2022-02-15 16:08:06 +02:00
|
|
|
std::string_view name,
|
2020-09-22 12:40:19 +03:00
|
|
|
ContentAddressMethod caMethod,
|
2020-10-13 02:51:23 +03:00
|
|
|
HashType hashType,
|
2020-09-22 12:40:19 +03:00
|
|
|
const StorePathSet & references,
|
|
|
|
RepairFlag repair)
|
2020-09-17 20:27:11 +03:00
|
|
|
{
|
2020-09-22 16:28:20 +03:00
|
|
|
std::optional<ConnectionHandle> conn_(getConnection());
|
|
|
|
auto & conn = *conn_;
|
2020-09-17 20:27:11 +03:00
|
|
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 25) {
|
|
|
|
|
|
|
|
conn->to
|
|
|
|
<< wopAddToStore
|
|
|
|
<< name
|
2023-04-01 23:40:32 +03:00
|
|
|
<< caMethod.render(hashType);
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, references);
|
2020-09-18 11:06:34 +03:00
|
|
|
conn->to << repair;
|
2020-09-17 20:27:11 +03:00
|
|
|
|
2020-10-30 22:47:34 +02:00
|
|
|
// The dump source may invoke the store, so we need to make some room.
|
|
|
|
connections->incCapacity();
|
|
|
|
{
|
|
|
|
Finally cleanup([&]() { connections->decCapacity(); });
|
|
|
|
conn.withFramedSink([&](Sink & sink) {
|
|
|
|
dump.drainInto(sink);
|
|
|
|
});
|
|
|
|
}
|
2020-09-17 20:27:11 +03:00
|
|
|
|
2021-07-26 14:31:09 +03:00
|
|
|
return make_ref<ValidPathInfo>(
|
|
|
|
ValidPathInfo::read(conn->from, *this, GET_PROTOCOL_MINOR(conn->daemonVersion)));
|
2020-09-17 20:27:11 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (repair) throw Error("repairing is not supported when building through the Nix daemon protocol < 1.25");
|
|
|
|
|
2020-09-17 21:19:15 +03:00
|
|
|
std::visit(overloaded {
|
2023-04-18 02:02:45 +03:00
|
|
|
[&](const TextIngestionMethod & thm) -> void {
|
2020-10-13 02:51:23 +03:00
|
|
|
if (hashType != htSHA256)
|
2023-05-09 20:24:53 +03:00
|
|
|
throw UnimplementedError("When adding text-hashed data called '%s', only SHA-256 is supported but '%s' was given",
|
|
|
|
name, printHashType(hashType));
|
2020-09-17 21:19:15 +03:00
|
|
|
std::string s = dump.drain();
|
|
|
|
conn->to << wopAddTextToStore << name << s;
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, references);
|
2020-09-17 21:19:15 +03:00
|
|
|
conn.processStderr();
|
|
|
|
},
|
2021-10-01 20:25:22 +03:00
|
|
|
[&](const FileIngestionMethod & fim) -> void {
|
2020-09-17 21:19:15 +03:00
|
|
|
conn->to
|
|
|
|
<< wopAddToStore
|
|
|
|
<< name
|
2020-10-13 02:51:23 +03:00
|
|
|
<< ((hashType == htSHA256 && fim == FileIngestionMethod::Recursive) ? 0 : 1) /* backwards compatibility hack */
|
|
|
|
<< (fim == FileIngestionMethod::Recursive ? 1 : 0)
|
|
|
|
<< printHashType(hashType);
|
2020-09-17 21:19:15 +03:00
|
|
|
|
2020-09-17 20:27:11 +03:00
|
|
|
try {
|
2020-09-17 21:19:15 +03:00
|
|
|
conn->to.written = 0;
|
|
|
|
connections->incCapacity();
|
|
|
|
{
|
|
|
|
Finally cleanup([&]() { connections->decCapacity(); });
|
2020-10-13 02:51:23 +03:00
|
|
|
if (fim == FileIngestionMethod::Recursive) {
|
2020-09-17 21:19:15 +03:00
|
|
|
dump.drainInto(conn->to);
|
|
|
|
} else {
|
|
|
|
std::string contents = dump.drain();
|
|
|
|
dumpString(contents, conn->to);
|
|
|
|
}
|
|
|
|
}
|
2020-09-17 20:27:11 +03:00
|
|
|
conn.processStderr();
|
2020-09-17 21:19:15 +03:00
|
|
|
} catch (SysError & e) {
|
|
|
|
/* Daemon closed while we were sending the path. Probably OOM
|
|
|
|
or I/O error. */
|
|
|
|
if (e.errNo == EPIPE)
|
|
|
|
try {
|
|
|
|
conn.processStderr();
|
|
|
|
} catch (EndOfFile & e) { }
|
|
|
|
throw;
|
|
|
|
}
|
2020-09-17 20:27:11 +03:00
|
|
|
|
2020-09-17 21:19:15 +03:00
|
|
|
}
|
2023-03-31 00:12:49 +03:00
|
|
|
}, caMethod.raw);
|
2020-09-18 12:22:13 +03:00
|
|
|
auto path = parseStorePath(readString(conn->from));
|
2020-09-22 16:28:20 +03:00
|
|
|
// Release our connection to prevent a deadlock in queryPathInfo().
|
|
|
|
conn_.reset();
|
2020-09-18 12:22:13 +03:00
|
|
|
return queryPathInfo(path);
|
2020-09-17 20:27:11 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-22 12:40:19 +03:00
|
|
|
|
2022-02-15 16:08:06 +02:00
|
|
|
StorePath RemoteStore::addToStoreFromDump(Source & dump, std::string_view name,
|
2021-11-09 11:24:49 +02:00
|
|
|
FileIngestionMethod method, HashType hashType, RepairFlag repair, const StorePathSet & references)
|
2020-09-17 21:19:15 +03:00
|
|
|
{
|
2020-10-13 02:51:23 +03:00
|
|
|
return addCAToStore(dump, name, method, hashType, references, repair)->path;
|
2020-09-17 21:19:15 +03:00
|
|
|
}
|
|
|
|
|
2020-09-17 20:27:11 +03:00
|
|
|
|
2018-03-22 00:42:21 +02:00
|
|
|
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
2020-07-13 18:37:44 +03:00
|
|
|
RepairFlag repair, CheckSigsFlag checkSigs)
|
2016-05-04 14:36:54 +03:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-11-09 19:45:06 +02:00
|
|
|
|
2016-11-09 19:57:22 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
|
|
|
|
conn->to << wopImportPaths;
|
|
|
|
|
2018-03-22 00:46:03 +02:00
|
|
|
auto source2 = sinkToSource([&](Sink & sink) {
|
|
|
|
sink << 1 // == path follows
|
|
|
|
;
|
|
|
|
copyNAR(source, sink);
|
|
|
|
sink
|
|
|
|
<< exportMagic
|
2019-12-05 20:11:09 +02:00
|
|
|
<< printStorePath(info.path);
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, sink, info.references);
|
2019-12-05 20:11:09 +02:00
|
|
|
sink
|
|
|
|
<< (info.deriver ? printStorePath(*info.deriver) : "")
|
2018-03-22 00:46:03 +02:00
|
|
|
<< 0 // == no legacy signature
|
|
|
|
<< 0 // == no path follows
|
|
|
|
;
|
|
|
|
});
|
|
|
|
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr(0, source2.get());
|
2016-11-09 19:57:22 +02:00
|
|
|
|
2023-05-18 05:04:59 +03:00
|
|
|
auto importedPaths = WorkerProto<StorePathSet>::read(*this, conn->from);
|
2023-01-14 23:38:43 +02:00
|
|
|
assert(importedPaths.size() <= 1);
|
2016-11-09 19:57:22 +02:00
|
|
|
}
|
2016-11-09 19:45:06 +02:00
|
|
|
|
2016-11-09 19:57:22 +02:00
|
|
|
else {
|
|
|
|
conn->to << wopAddToStoreNar
|
2019-12-05 20:11:09 +02:00
|
|
|
<< printStorePath(info.path)
|
|
|
|
<< (info.deriver ? printStorePath(*info.deriver) : "")
|
2020-06-19 01:09:22 +03:00
|
|
|
<< info.narHash.to_string(Base16, false);
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, info.references);
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << info.registrationTime << info.narSize
|
2020-06-02 03:37:43 +03:00
|
|
|
<< info.ultimate << info.sigs << renderContentAddress(info.ca)
|
2017-06-28 19:11:01 +03:00
|
|
|
<< repair << !checkSigs;
|
2020-07-29 01:48:39 +03:00
|
|
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 23) {
|
2020-09-17 18:36:16 +03:00
|
|
|
conn.withFramedSink([&](Sink & sink) {
|
2020-07-29 01:48:39 +03:00
|
|
|
copyNAR(source, sink);
|
2020-09-17 18:36:16 +03:00
|
|
|
});
|
2020-07-29 01:48:39 +03:00
|
|
|
} else if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21) {
|
|
|
|
conn.processStderr(0, &source);
|
|
|
|
} else {
|
|
|
|
copyNAR(source, conn->to);
|
|
|
|
conn.processStderr(0, nullptr);
|
|
|
|
}
|
2016-11-09 19:57:22 +02:00
|
|
|
}
|
2016-05-04 14:36:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-06-08 15:03:46 +03:00
|
|
|
void RemoteStore::addMultipleToStore(
|
|
|
|
PathsSource & pathsToCopy,
|
|
|
|
Activity & act,
|
|
|
|
RepairFlag repair,
|
|
|
|
CheckSigsFlag checkSigs)
|
|
|
|
{
|
|
|
|
auto source = sinkToSource([&](Sink & sink) {
|
|
|
|
sink << pathsToCopy.size();
|
|
|
|
for (auto & [pathInfo, pathSource] : pathsToCopy) {
|
|
|
|
pathInfo.write(sink, *this, 16);
|
|
|
|
pathSource->drainInto(sink);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
addMultipleToStore(*source, repair, checkSigs);
|
|
|
|
}
|
|
|
|
|
2021-07-26 14:31:09 +03:00
|
|
|
void RemoteStore::addMultipleToStore(
|
|
|
|
Source & source,
|
|
|
|
RepairFlag repair,
|
|
|
|
CheckSigsFlag checkSigs)
|
|
|
|
{
|
|
|
|
if (GET_PROTOCOL_MINOR(getConnection()->daemonVersion) >= 32) {
|
|
|
|
auto conn(getConnection());
|
|
|
|
conn->to
|
|
|
|
<< wopAddMultipleToStore
|
|
|
|
<< repair
|
|
|
|
<< !checkSigs;
|
|
|
|
conn.withFramedSink([&](Sink & sink) {
|
|
|
|
source.drainInto(sink);
|
|
|
|
});
|
|
|
|
} else
|
|
|
|
Store::addMultipleToStore(source, repair, checkSigs);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-25 17:00:00 +02:00
|
|
|
StorePath RemoteStore::addTextToStore(
|
|
|
|
std::string_view name,
|
|
|
|
std::string_view s,
|
|
|
|
const StorePathSet & references,
|
|
|
|
RepairFlag repair)
|
2006-11-30 20:35:50 +02:00
|
|
|
{
|
2020-09-17 21:19:15 +03:00
|
|
|
StringSource source(s);
|
2023-04-18 02:02:45 +03:00
|
|
|
return addCAToStore(source, name, TextIngestionMethod {}, htSHA256, references, repair)->path;
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
2020-10-08 18:36:51 +03:00
|
|
|
void RemoteStore::registerDrvOutput(const Realisation & info)
|
|
|
|
{
|
|
|
|
auto conn(getConnection());
|
|
|
|
conn->to << wopRegisterDrvOutput;
|
2021-05-07 12:06:17 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
|
|
|
|
conn->to << info.id.to_string();
|
|
|
|
conn->to << std::string(info.outPath.to_string());
|
|
|
|
} else {
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, info);
|
2021-05-07 12:06:17 +03:00
|
|
|
}
|
2020-10-08 18:36:51 +03:00
|
|
|
conn.processStderr();
|
|
|
|
}
|
|
|
|
|
2021-10-27 12:36:51 +03:00
|
|
|
void RemoteStore::queryRealisationUncached(const DrvOutput & id,
|
|
|
|
Callback<std::shared_ptr<const Realisation>> callback) noexcept
|
2020-10-08 18:36:51 +03:00
|
|
|
{
|
2022-05-30 14:27:13 +03:00
|
|
|
try {
|
|
|
|
auto conn(getConnection());
|
2021-12-06 12:45:18 +02:00
|
|
|
|
2022-05-30 14:27:13 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 27) {
|
|
|
|
warn("the daemon is too old to support content-addressed derivations, please upgrade it to 2.4");
|
|
|
|
return callback(nullptr);
|
|
|
|
}
|
2021-12-06 12:45:18 +02:00
|
|
|
|
2022-05-30 14:27:13 +03:00
|
|
|
conn->to << wopQueryRealisation;
|
|
|
|
conn->to << id.to_string();
|
|
|
|
conn.processStderr();
|
2021-10-27 12:36:51 +03:00
|
|
|
|
2022-05-30 14:27:13 +03:00
|
|
|
auto real = [&]() -> std::shared_ptr<const Realisation> {
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
|
2023-05-18 05:04:59 +03:00
|
|
|
auto outPaths = WorkerProto<std::set<StorePath>>::read(
|
|
|
|
*this, conn->from);
|
2022-05-30 14:27:13 +03:00
|
|
|
if (outPaths.empty())
|
|
|
|
return nullptr;
|
|
|
|
return std::make_shared<const Realisation>(Realisation { .id = id, .outPath = *outPaths.begin() });
|
|
|
|
} else {
|
2023-05-18 05:04:59 +03:00
|
|
|
auto realisations = WorkerProto<std::set<Realisation>>::read(
|
|
|
|
*this, conn->from);
|
2022-05-30 14:27:13 +03:00
|
|
|
if (realisations.empty())
|
|
|
|
return nullptr;
|
|
|
|
return std::make_shared<const Realisation>(*realisations.begin());
|
|
|
|
}
|
|
|
|
}();
|
2021-10-27 12:36:51 +03:00
|
|
|
|
|
|
|
callback(std::shared_ptr<const Realisation>(real));
|
|
|
|
} catch (...) { return callback.rethrow(); }
|
2020-10-08 18:36:51 +03:00
|
|
|
}
|
|
|
|
|
2021-04-05 16:48:18 +03:00
|
|
|
static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
|
2021-03-02 05:50:41 +02:00
|
|
|
{
|
2021-05-03 02:12:23 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(store, conn->to, reqs);
|
2021-03-02 05:50:41 +02:00
|
|
|
} else {
|
|
|
|
Strings ss;
|
|
|
|
for (auto & p : reqs) {
|
2021-04-05 16:48:18 +03:00
|
|
|
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
|
2021-03-02 05:50:41 +02:00
|
|
|
std::visit(overloaded {
|
2021-10-01 00:31:21 +03:00
|
|
|
[&](const StorePathWithOutputs & s) {
|
2021-03-02 05:50:41 +02:00
|
|
|
ss.push_back(s.to_string(store));
|
|
|
|
},
|
2021-10-01 00:31:21 +03:00
|
|
|
[&](const StorePath & drvPath) {
|
2021-03-02 05:50:41 +02:00
|
|
|
throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
|
|
|
|
store.printStorePath(drvPath),
|
|
|
|
GET_PROTOCOL_MAJOR(conn->daemonVersion),
|
|
|
|
GET_PROTOCOL_MINOR(conn->daemonVersion));
|
|
|
|
},
|
|
|
|
}, sOrDrvPath);
|
|
|
|
}
|
|
|
|
conn->to << ss;
|
|
|
|
}
|
|
|
|
}
|
2006-11-30 20:35:50 +02:00
|
|
|
|
2022-03-08 20:50:46 +02:00
|
|
|
void RemoteStore::copyDrvsFromEvalStore(
|
|
|
|
const std::vector<DerivedPath> & paths,
|
|
|
|
std::shared_ptr<Store> evalStore)
|
2006-11-30 20:35:50 +02:00
|
|
|
{
|
2021-07-22 23:50:48 +03:00
|
|
|
if (evalStore && evalStore.get() != this) {
|
|
|
|
/* The remote doesn't have a way to access evalStore, so copy
|
|
|
|
the .drvs. */
|
|
|
|
RealisedPath::Set drvPaths2;
|
2022-03-08 20:50:46 +02:00
|
|
|
for (auto & i : paths)
|
2021-07-22 23:50:48 +03:00
|
|
|
if (auto p = std::get_if<DerivedPath::Built>(&i))
|
|
|
|
drvPaths2.insert(p->drvPath);
|
|
|
|
copyClosure(*evalStore, *this, drvPaths2);
|
|
|
|
}
|
2022-03-08 20:50:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode, std::shared_ptr<Store> evalStore)
|
|
|
|
{
|
|
|
|
copyDrvsFromEvalStore(drvPaths, evalStore);
|
2021-07-19 16:43:08 +03:00
|
|
|
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopBuildPaths;
|
2019-12-05 20:11:09 +02:00
|
|
|
assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
|
2021-04-05 16:48:18 +03:00
|
|
|
writeDerivedPaths(*this, conn, drvPaths);
|
2019-12-05 20:11:09 +02:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
|
|
|
|
conn->to << buildMode;
|
|
|
|
else
|
|
|
|
/* Old daemons did not take a 'buildMode' parameter, so we
|
|
|
|
need to validate it here on the client side. */
|
|
|
|
if (buildMode != bmNormal)
|
|
|
|
throw Error("repairing or checking is not supported when building through the Nix daemon");
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
readInt(conn->from);
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way
In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I
realized since derivation goals' wanted outputs can "grow" due to
overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called
by `Worker::makeDerivationGoalCommon`), the previous bug fix had an
unfortunate side effect of causing more pointless rebuilds.
In paticular, we have this situation:
1. Goal made from `DerivedPath::Built { foo, {a} }`.
2. Goal gives on on substituting, starts building.
3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just
modified original goal.
4. Though the goal had gotten as far as building, so all outputs were
going to be produced, `addWantedOutputs` no longer knows that and so
the goal is flagged to be restarted.
This might sound far-fetched with input-addressed drvs, where we usually
basically have all our goals "planned out" before we start doing
anything, but with CA derivation goals and especially RFC 92, where *drv
resolution* means goals are created after some building is completed, it
is more likely to happen.
So the first thing to do was restore the clearing of `wantedOutputs` we
used to do, and then filter the outputs in `buildPathsWithResults` to
only get the ones we care about.
But fix also has its own side effect in that the `DerivedPath` in the
`BuildResult` in `DerivationGoal` cannot be trusted; it is merely the
*first* `DerivedPath` for which this goal was originally created.
To remedy this, I made `BuildResult` be like it was before, and instead
made `KeyedBuildResult` be a subclass wit the path. Only
`buildPathsWithResults` returns `KeyedBuildResult`s, everything else
just becomes like it was before, where the "key" is unambiguous from
context.
I think separating the "primary key" field(s) from the other fields is
good practical in general anyways. (I would like to do the same thing
for `ValidPathInfo`.) Among other things, it allows constructions like
`std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and
just precludes the possibility of those duplicate keys being out of
sync.
We might leverage the above someday to overload `buildPathsWithResults`
to take a *set* of return a *map* per the above.
-----
Unfortunately, we need to avoid C++20 strictness on designated
initializers.
(BTW
https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html
this offers some new syntax for this use-case. Hopefully this will be
adopted and we can eventually use it.)
No having that yet, maybe it would be better to not make
`KeyedBuildResult` a subclass to just avoid this.
Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
|
|
|
std::vector<KeyedBuildResult> RemoteStore::buildPathsWithResults(
|
2022-03-08 20:50:46 +02:00
|
|
|
const std::vector<DerivedPath> & paths,
|
|
|
|
BuildMode buildMode,
|
|
|
|
std::shared_ptr<Store> evalStore)
|
|
|
|
{
|
|
|
|
copyDrvsFromEvalStore(paths, evalStore);
|
|
|
|
|
|
|
|
std::optional<ConnectionHandle> conn_(getConnection());
|
|
|
|
auto & conn = *conn_;
|
|
|
|
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 34) {
|
|
|
|
conn->to << wopBuildPathsWithResults;
|
|
|
|
writeDerivedPaths(*this, conn, paths);
|
|
|
|
conn->to << buildMode;
|
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
return WorkerProto<std::vector<KeyedBuildResult>>::read(*this, conn->from);
|
2022-03-08 20:50:46 +02:00
|
|
|
} else {
|
|
|
|
// Avoid deadlock.
|
|
|
|
conn_.reset();
|
|
|
|
|
|
|
|
// Note: this throws an exception if a build/substitution
|
|
|
|
// fails, but meh.
|
|
|
|
buildPaths(paths, buildMode, evalStore);
|
|
|
|
|
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way
In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I
realized since derivation goals' wanted outputs can "grow" due to
overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called
by `Worker::makeDerivationGoalCommon`), the previous bug fix had an
unfortunate side effect of causing more pointless rebuilds.
In paticular, we have this situation:
1. Goal made from `DerivedPath::Built { foo, {a} }`.
2. Goal gives on on substituting, starts building.
3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just
modified original goal.
4. Though the goal had gotten as far as building, so all outputs were
going to be produced, `addWantedOutputs` no longer knows that and so
the goal is flagged to be restarted.
This might sound far-fetched with input-addressed drvs, where we usually
basically have all our goals "planned out" before we start doing
anything, but with CA derivation goals and especially RFC 92, where *drv
resolution* means goals are created after some building is completed, it
is more likely to happen.
So the first thing to do was restore the clearing of `wantedOutputs` we
used to do, and then filter the outputs in `buildPathsWithResults` to
only get the ones we care about.
But fix also has its own side effect in that the `DerivedPath` in the
`BuildResult` in `DerivationGoal` cannot be trusted; it is merely the
*first* `DerivedPath` for which this goal was originally created.
To remedy this, I made `BuildResult` be like it was before, and instead
made `KeyedBuildResult` be a subclass wit the path. Only
`buildPathsWithResults` returns `KeyedBuildResult`s, everything else
just becomes like it was before, where the "key" is unambiguous from
context.
I think separating the "primary key" field(s) from the other fields is
good practical in general anyways. (I would like to do the same thing
for `ValidPathInfo`.) Among other things, it allows constructions like
`std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and
just precludes the possibility of those duplicate keys being out of
sync.
We might leverage the above someday to overload `buildPathsWithResults`
to take a *set* of return a *map* per the above.
-----
Unfortunately, we need to avoid C++20 strictness on designated
initializers.
(BTW
https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html
this offers some new syntax for this use-case. Hopefully this will be
adopted and we can eventually use it.)
No having that yet, maybe it would be better to not make
`KeyedBuildResult` a subclass to just avoid this.
Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
|
|
|
std::vector<KeyedBuildResult> results;
|
2022-03-08 20:50:46 +02:00
|
|
|
|
|
|
|
for (auto & path : paths) {
|
|
|
|
std::visit(
|
|
|
|
overloaded {
|
|
|
|
[&](const DerivedPath::Opaque & bo) {
|
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way
In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I
realized since derivation goals' wanted outputs can "grow" due to
overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called
by `Worker::makeDerivationGoalCommon`), the previous bug fix had an
unfortunate side effect of causing more pointless rebuilds.
In paticular, we have this situation:
1. Goal made from `DerivedPath::Built { foo, {a} }`.
2. Goal gives on on substituting, starts building.
3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just
modified original goal.
4. Though the goal had gotten as far as building, so all outputs were
going to be produced, `addWantedOutputs` no longer knows that and so
the goal is flagged to be restarted.
This might sound far-fetched with input-addressed drvs, where we usually
basically have all our goals "planned out" before we start doing
anything, but with CA derivation goals and especially RFC 92, where *drv
resolution* means goals are created after some building is completed, it
is more likely to happen.
So the first thing to do was restore the clearing of `wantedOutputs` we
used to do, and then filter the outputs in `buildPathsWithResults` to
only get the ones we care about.
But fix also has its own side effect in that the `DerivedPath` in the
`BuildResult` in `DerivationGoal` cannot be trusted; it is merely the
*first* `DerivedPath` for which this goal was originally created.
To remedy this, I made `BuildResult` be like it was before, and instead
made `KeyedBuildResult` be a subclass wit the path. Only
`buildPathsWithResults` returns `KeyedBuildResult`s, everything else
just becomes like it was before, where the "key" is unambiguous from
context.
I think separating the "primary key" field(s) from the other fields is
good practical in general anyways. (I would like to do the same thing
for `ValidPathInfo`.) Among other things, it allows constructions like
`std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and
just precludes the possibility of those duplicate keys being out of
sync.
We might leverage the above someday to overload `buildPathsWithResults`
to take a *set* of return a *map* per the above.
-----
Unfortunately, we need to avoid C++20 strictness on designated
initializers.
(BTW
https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html
this offers some new syntax for this use-case. Hopefully this will be
adopted and we can eventually use it.)
No having that yet, maybe it would be better to not make
`KeyedBuildResult` a subclass to just avoid this.
Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
|
|
|
results.push_back(KeyedBuildResult {
|
|
|
|
{
|
|
|
|
.status = BuildResult::Substituted,
|
|
|
|
},
|
|
|
|
/* .path = */ bo,
|
2022-03-09 13:25:35 +02:00
|
|
|
});
|
2022-03-08 20:50:46 +02:00
|
|
|
},
|
|
|
|
[&](const DerivedPath::Built & bfd) {
|
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way
In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I
realized since derivation goals' wanted outputs can "grow" due to
overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called
by `Worker::makeDerivationGoalCommon`), the previous bug fix had an
unfortunate side effect of causing more pointless rebuilds.
In paticular, we have this situation:
1. Goal made from `DerivedPath::Built { foo, {a} }`.
2. Goal gives on on substituting, starts building.
3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just
modified original goal.
4. Though the goal had gotten as far as building, so all outputs were
going to be produced, `addWantedOutputs` no longer knows that and so
the goal is flagged to be restarted.
This might sound far-fetched with input-addressed drvs, where we usually
basically have all our goals "planned out" before we start doing
anything, but with CA derivation goals and especially RFC 92, where *drv
resolution* means goals are created after some building is completed, it
is more likely to happen.
So the first thing to do was restore the clearing of `wantedOutputs` we
used to do, and then filter the outputs in `buildPathsWithResults` to
only get the ones we care about.
But fix also has its own side effect in that the `DerivedPath` in the
`BuildResult` in `DerivationGoal` cannot be trusted; it is merely the
*first* `DerivedPath` for which this goal was originally created.
To remedy this, I made `BuildResult` be like it was before, and instead
made `KeyedBuildResult` be a subclass wit the path. Only
`buildPathsWithResults` returns `KeyedBuildResult`s, everything else
just becomes like it was before, where the "key" is unambiguous from
context.
I think separating the "primary key" field(s) from the other fields is
good practical in general anyways. (I would like to do the same thing
for `ValidPathInfo`.) Among other things, it allows constructions like
`std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and
just precludes the possibility of those duplicate keys being out of
sync.
We might leverage the above someday to overload `buildPathsWithResults`
to take a *set* of return a *map* per the above.
-----
Unfortunately, we need to avoid C++20 strictness on designated
initializers.
(BTW
https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html
this offers some new syntax for this use-case. Hopefully this will be
adopted and we can eventually use it.)
No having that yet, maybe it would be better to not make
`KeyedBuildResult` a subclass to just avoid this.
Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
|
|
|
KeyedBuildResult res {
|
|
|
|
{
|
|
|
|
.status = BuildResult::Built
|
|
|
|
},
|
|
|
|
/* .path = */ bfd,
|
2022-03-09 13:25:35 +02:00
|
|
|
};
|
2022-03-08 20:50:46 +02:00
|
|
|
|
|
|
|
OutputPathMap outputs;
|
|
|
|
auto drv = evalStore->readDerivation(bfd.drvPath);
|
2022-05-04 08:44:32 +03:00
|
|
|
const auto outputHashes = staticOutputHashes(*evalStore, drv); // FIXME: expensive
|
2023-01-12 01:57:18 +02:00
|
|
|
auto built = resolveDerivedPath(*this, bfd, &*evalStore);
|
|
|
|
for (auto & [output, outputPath] : built) {
|
2022-05-04 08:44:32 +03:00
|
|
|
auto outputHash = get(outputHashes, output);
|
|
|
|
if (!outputHash)
|
2022-03-08 20:50:46 +02:00
|
|
|
throw Error(
|
|
|
|
"the derivation '%s' doesn't have an output named '%s'",
|
|
|
|
printStorePath(bfd.drvPath), output);
|
2022-05-04 08:44:32 +03:00
|
|
|
auto outputId = DrvOutput{ *outputHash, output };
|
2023-03-17 16:33:48 +02:00
|
|
|
if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) {
|
2022-03-08 20:50:46 +02:00
|
|
|
auto realisation =
|
|
|
|
queryRealisation(outputId);
|
|
|
|
if (!realisation)
|
2023-01-02 18:35:48 +02:00
|
|
|
throw MissingRealisation(outputId);
|
2023-04-15 01:18:32 +03:00
|
|
|
res.builtOutputs.emplace(output, *realisation);
|
2022-03-08 20:50:46 +02:00
|
|
|
} else {
|
|
|
|
res.builtOutputs.emplace(
|
2023-04-15 01:18:32 +03:00
|
|
|
output,
|
2022-03-08 20:50:46 +02:00
|
|
|
Realisation {
|
|
|
|
.id = outputId,
|
2023-01-12 01:57:18 +02:00
|
|
|
.outPath = outputPath,
|
2022-03-08 20:50:46 +02:00
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
results.push_back(res);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
path.raw());
|
|
|
|
}
|
|
|
|
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-30 20:35:50 +02:00
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 18:57:40 +03:00
|
|
|
BuildMode buildMode)
|
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopBuildDerivation << printStorePath(drvPath);
|
|
|
|
writeDerivation(conn->to, *this, drv);
|
|
|
|
conn->to << buildMode;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
Make `KeyedBuildResult`, `BuildResult` like before, and fix bug another way
In https://github.com/NixOS/nix/pull/6311#discussion_r834863823, I
realized since derivation goals' wanted outputs can "grow" due to
overlapping dependencies (See `DerivationGoal::addWantedOutputs`, called
by `Worker::makeDerivationGoalCommon`), the previous bug fix had an
unfortunate side effect of causing more pointless rebuilds.
In paticular, we have this situation:
1. Goal made from `DerivedPath::Built { foo, {a} }`.
2. Goal gives on on substituting, starts building.
3. Goal made from `DerivedPath::Built { foo, {b} }`, in fact is just
modified original goal.
4. Though the goal had gotten as far as building, so all outputs were
going to be produced, `addWantedOutputs` no longer knows that and so
the goal is flagged to be restarted.
This might sound far-fetched with input-addressed drvs, where we usually
basically have all our goals "planned out" before we start doing
anything, but with CA derivation goals and especially RFC 92, where *drv
resolution* means goals are created after some building is completed, it
is more likely to happen.
So the first thing to do was restore the clearing of `wantedOutputs` we
used to do, and then filter the outputs in `buildPathsWithResults` to
only get the ones we care about.
But fix also has its own side effect in that the `DerivedPath` in the
`BuildResult` in `DerivationGoal` cannot be trusted; it is merely the
*first* `DerivedPath` for which this goal was originally created.
To remedy this, I made `BuildResult` be like it was before, and instead
made `KeyedBuildResult` be a subclass wit the path. Only
`buildPathsWithResults` returns `KeyedBuildResult`s, everything else
just becomes like it was before, where the "key" is unambiguous from
context.
I think separating the "primary key" field(s) from the other fields is
good practical in general anyways. (I would like to do the same thing
for `ValidPathInfo`.) Among other things, it allows constructions like
`std::map<Key, ThingWithKey>` where doesn't contain duplicate keys and
just precludes the possibility of those duplicate keys being out of
sync.
We might leverage the above someday to overload `buildPathsWithResults`
to take a *set* of return a *map* per the above.
-----
Unfortunately, we need to avoid C++20 strictness on designated
initializers.
(BTW
https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p2287r1.html
this offers some new syntax for this use-case. Hopefully this will be
adopted and we can eventually use it.)
No having that yet, maybe it would be better to not make
`KeyedBuildResult` a subclass to just avoid this.
Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
2022-03-25 03:26:07 +02:00
|
|
|
BuildResult res;
|
2021-02-28 20:42:46 +02:00
|
|
|
res.status = (BuildResult::Status) readInt(conn->from);
|
|
|
|
conn->from >> res.errorMsg;
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
|
2021-03-22 17:18:48 +02:00
|
|
|
conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
|
2021-02-28 20:42:46 +02:00
|
|
|
}
|
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
|
2023-05-18 05:04:59 +03:00
|
|
|
auto builtOutputs = WorkerProto<DrvOutputs>::read(*this, conn->from);
|
2023-04-15 01:18:32 +03:00
|
|
|
for (auto && [output, realisation] : builtOutputs)
|
|
|
|
res.builtOutputs.insert_or_assign(
|
|
|
|
std::move(output.outputName),
|
|
|
|
std::move(realisation));
|
2021-01-25 12:08:38 +02:00
|
|
|
}
|
2015-09-03 13:56:59 +03:00
|
|
|
return res;
|
Allow remote builds without sending the derivation closure
Previously, to build a derivation remotely, we had to copy the entire
closure of the .drv file to the remote machine, even though we only
need the top-level derivation. This is very wasteful: the closure can
contain thousands of store paths, and in some Hydra use cases, include
source paths that are very large (e.g. Git/Mercurial checkouts).
So now there is a new operation, StoreAPI::buildDerivation(), that
performs a build from an in-memory representation of a derivation
(BasicDerivation) rather than from a on-disk .drv file. The only files
that need to be in the Nix store are the sources of the derivation
(drv.inputSrcs), and the needed output paths of the dependencies (as
described by drv.inputDrvs). "nix-store --serve" exposes this
interface.
Note that this is a privileged operation, because you can construct a
derivation that builds any store path whatsoever. Fixing this will
require changing the hashing scheme (i.e., the output paths should be
computed from the other fields in BasicDerivation, allowing them to be
verified without access to other derivations). However, this would be
quite nice because it would allow .drv-free building (e.g. "nix-env
-i" wouldn't have to write any .drv files to disk).
Fixes #173.
2015-07-17 18:57:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
void RemoteStore::ensurePath(const StorePath & path)
|
2006-11-30 20:35:50 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopEnsurePath << printStorePath(path);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
readInt(conn->from);
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
void RemoteStore::addTempRoot(const StorePath & path)
|
2006-12-02 18:41:36 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopAddTempRoot << printStorePath(path);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
readInt(conn->from);
|
2006-12-02 18:41:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-12-05 01:29:16 +02:00
|
|
|
void RemoteStore::addIndirectRoot(const Path & path)
|
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopAddIndirectRoot << path;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
readInt(conn->from);
|
2006-12-05 01:29:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-03-14 14:50:07 +02:00
|
|
|
Roots RemoteStore::findRoots(bool censor)
|
2006-12-05 03:31:45 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopFindRoots;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2017-03-01 14:52:54 +02:00
|
|
|
size_t count = readNum<size_t>(conn->from);
|
2006-12-05 03:31:45 +02:00
|
|
|
Roots result;
|
|
|
|
while (count--) {
|
2016-02-23 16:00:59 +02:00
|
|
|
Path link = readString(conn->from);
|
2019-12-05 20:11:09 +02:00
|
|
|
auto target = parseStorePath(readString(conn->from));
|
|
|
|
result[std::move(target)].emplace(link);
|
2006-12-05 03:31:45 +02:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-18 12:34:17 +03:00
|
|
|
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
2006-12-05 04:18:46 +02:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2016-07-27 16:03:20 +03:00
|
|
|
conn->to
|
2019-12-05 20:11:09 +02:00
|
|
|
<< wopCollectGarbage << options.action;
|
2023-05-18 05:04:59 +03:00
|
|
|
workerProtoWrite(*this, conn->to, options.pathsToDelete);
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << options.ignoreLiveness
|
2016-07-27 16:03:20 +03:00
|
|
|
<< options.maxFreed
|
2009-11-20 19:12:38 +02:00
|
|
|
/* removed options */
|
2016-07-27 16:03:20 +03:00
|
|
|
<< 0 << 0 << 0;
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2012-07-31 00:13:25 +03:00
|
|
|
|
2016-02-23 16:00:59 +02:00
|
|
|
results.paths = readStrings<PathSet>(conn->from);
|
|
|
|
results.bytesFreed = readLongLong(conn->from);
|
|
|
|
readLongLong(conn->from); // obsolete
|
2016-04-19 19:50:15 +03:00
|
|
|
|
|
|
|
{
|
|
|
|
auto state_(Store::state.lock());
|
|
|
|
state_->pathInfoCache.clear();
|
|
|
|
}
|
2006-12-05 04:18:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-09-01 23:21:42 +03:00
|
|
|
void RemoteStore::optimiseStore()
|
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopOptimiseStore;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-02-23 16:00:59 +02:00
|
|
|
readInt(conn->from);
|
2014-09-01 23:21:42 +03:00
|
|
|
}
|
2010-05-04 13:45:10 +03:00
|
|
|
|
2016-04-08 19:16:53 +03:00
|
|
|
|
2017-06-28 19:11:01 +03:00
|
|
|
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
|
2015-06-02 00:20:11 +03:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2016-02-23 16:00:59 +02:00
|
|
|
conn->to << wopVerifyStore << checkContents << repair;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2017-03-01 14:52:54 +02:00
|
|
|
return readInt(conn->from);
|
2015-06-02 00:20:11 +03:00
|
|
|
}
|
|
|
|
|
2016-02-23 17:40:16 +02:00
|
|
|
|
2019-12-05 20:11:09 +02:00
|
|
|
void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & sigs)
|
2016-04-05 16:30:22 +03:00
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopAddSignatures << printStorePath(storePath) << sigs;
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2016-04-05 16:30:22 +03:00
|
|
|
readInt(conn->from);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-04-05 16:48:18 +03:00
|
|
|
void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
|
2019-12-05 20:11:09 +02:00
|
|
|
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
|
2020-07-30 14:10:49 +03:00
|
|
|
uint64_t & downloadSize, uint64_t & narSize)
|
2017-04-06 19:40:19 +03:00
|
|
|
{
|
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2017-04-06 19:40:19 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
|
|
|
|
// Don't hold the connection handle in the fallback case
|
|
|
|
// to prevent a deadlock.
|
|
|
|
goto fallback;
|
2019-12-05 20:11:09 +02:00
|
|
|
conn->to << wopQueryMissing;
|
2021-04-05 16:48:18 +03:00
|
|
|
writeDerivedPaths(*this, conn, targets);
|
2018-10-17 00:36:15 +03:00
|
|
|
conn.processStderr();
|
2023-05-18 05:04:59 +03:00
|
|
|
willBuild = WorkerProto<StorePathSet>::read(*this, conn->from);
|
|
|
|
willSubstitute = WorkerProto<StorePathSet>::read(*this, conn->from);
|
|
|
|
unknown = WorkerProto<StorePathSet>::read(*this, conn->from);
|
2017-04-06 19:40:19 +03:00
|
|
|
conn->from >> downloadSize >> narSize;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
fallback:
|
|
|
|
return Store::queryMissing(targets, willBuild, willSubstitute,
|
|
|
|
unknown, downloadSize, narSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-01-17 20:45:21 +02:00
|
|
|
void RemoteStore::addBuildLog(const StorePath & drvPath, std::string_view log)
|
|
|
|
{
|
|
|
|
auto conn(getConnection());
|
|
|
|
conn->to << wopAddBuildLog << drvPath.to_string();
|
|
|
|
StringSource source(log);
|
|
|
|
conn.withFramedSink([&](Sink & sink) {
|
|
|
|
source.drainInto(sink);
|
|
|
|
});
|
|
|
|
readInt(conn->from);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-01-25 22:14:27 +02:00
|
|
|
std::optional<std::string> RemoteStore::getVersion()
|
|
|
|
{
|
|
|
|
auto conn(getConnection());
|
|
|
|
return conn->daemonNixVersion;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-05-02 15:18:46 +03:00
|
|
|
void RemoteStore::connect()
|
|
|
|
{
|
2018-10-17 00:36:15 +03:00
|
|
|
auto conn(getConnection());
|
2017-05-02 15:18:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-31 00:28:47 +03:00
|
|
|
unsigned int RemoteStore::getProtocol()
|
|
|
|
{
|
|
|
|
auto conn(connections->get());
|
|
|
|
return conn->daemonVersion;
|
|
|
|
}
|
|
|
|
|
2022-12-26 22:21:08 +02:00
|
|
|
std::optional<TrustedFlag> RemoteStore::isTrustedClient()
|
|
|
|
{
|
|
|
|
auto conn(getConnection());
|
|
|
|
return conn->remoteTrustsUs;
|
|
|
|
}
|
2018-08-31 00:28:47 +03:00
|
|
|
|
2017-09-14 19:10:38 +03:00
|
|
|
void RemoteStore::flushBadConnections()
|
|
|
|
{
|
|
|
|
connections->flushBad();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-23 17:40:16 +02:00
|
|
|
RemoteStore::Connection::~Connection()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
to.flush();
|
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-07 12:26:09 +03:00
|
|
|
void RemoteStore::narFromPath(const StorePath & path, Sink & sink)
|
|
|
|
{
|
|
|
|
auto conn(connections->get());
|
|
|
|
conn->to << wopNarFromPath << printStorePath(path);
|
|
|
|
conn->processStderr();
|
|
|
|
copyNAR(conn->from, sink);
|
|
|
|
}
|
|
|
|
|
|
|
|
ref<FSAccessor> RemoteStore::getFSAccessor()
|
|
|
|
{
|
|
|
|
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
|
|
|
|
}
|
2016-02-23 17:40:16 +02:00
|
|
|
|
2017-08-28 19:49:42 +03:00
|
|
|
static Logger::Fields readFields(Source & from)
|
|
|
|
{
|
|
|
|
Logger::Fields fields;
|
|
|
|
size_t size = readInt(from);
|
|
|
|
for (size_t n = 0; n < size; n++) {
|
|
|
|
auto type = (decltype(Logger::Field::type)) readInt(from);
|
|
|
|
if (type == Logger::Field::tInt)
|
|
|
|
fields.push_back(readNum<uint64_t>(from));
|
|
|
|
else if (type == Logger::Field::tString)
|
|
|
|
fields.push_back(readString(from));
|
|
|
|
else
|
|
|
|
throw Error("got unsupported field type %x from Nix daemon", (int) type);
|
|
|
|
}
|
|
|
|
return fields;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-08-27 15:48:08 +03:00
|
|
|
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source, bool flush)
|
2006-12-03 04:08:13 +02:00
|
|
|
{
|
2020-08-27 15:48:08 +03:00
|
|
|
if (flush)
|
|
|
|
to.flush();
|
2017-08-28 19:49:42 +03:00
|
|
|
|
|
|
|
while (true) {
|
|
|
|
|
|
|
|
auto msg = readNum<uint64_t>(from);
|
|
|
|
|
2007-02-21 19:34:02 +02:00
|
|
|
if (msg == STDERR_WRITE) {
|
2022-02-25 17:00:00 +02:00
|
|
|
auto s = readString(from);
|
2007-02-21 18:34:00 +02:00
|
|
|
if (!sink) throw Error("no sink");
|
2016-05-04 16:46:25 +03:00
|
|
|
(*sink)(s);
|
2007-02-21 18:34:00 +02:00
|
|
|
}
|
2017-08-28 19:49:42 +03:00
|
|
|
|
2007-02-21 19:34:02 +02:00
|
|
|
else if (msg == STDERR_READ) {
|
|
|
|
if (!source) throw Error("no source");
|
2017-03-01 14:52:54 +02:00
|
|
|
size_t len = readNum<size_t>(from);
|
2020-12-02 15:10:56 +02:00
|
|
|
auto buf = std::make_unique<char[]>(len);
|
2020-12-02 15:00:43 +02:00
|
|
|
writeString({(const char *) buf.get(), source->read(buf.get(), len)}, to);
|
2011-12-15 01:30:06 +02:00
|
|
|
to.flush();
|
2007-02-21 19:34:02 +02:00
|
|
|
}
|
2017-08-28 19:49:42 +03:00
|
|
|
|
|
|
|
else if (msg == STDERR_ERROR) {
|
2020-10-07 18:13:54 +03:00
|
|
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) {
|
|
|
|
return std::make_exception_ptr(readError(from));
|
|
|
|
} else {
|
2022-02-25 17:00:00 +02:00
|
|
|
auto error = readString(from);
|
2020-10-07 18:13:54 +03:00
|
|
|
unsigned int status = readInt(from);
|
|
|
|
return std::make_exception_ptr(Error(status, error));
|
|
|
|
}
|
2017-08-28 19:49:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
else if (msg == STDERR_NEXT)
|
2016-09-21 17:11:01 +03:00
|
|
|
printError(chomp(readString(from)));
|
2017-08-28 19:49:42 +03:00
|
|
|
|
|
|
|
else if (msg == STDERR_START_ACTIVITY) {
|
|
|
|
auto act = readNum<ActivityId>(from);
|
2017-08-28 20:13:24 +03:00
|
|
|
auto lvl = (Verbosity) readInt(from);
|
2017-08-28 19:49:42 +03:00
|
|
|
auto type = (ActivityType) readInt(from);
|
|
|
|
auto s = readString(from);
|
|
|
|
auto fields = readFields(from);
|
|
|
|
auto parent = readNum<ActivityId>(from);
|
2017-08-28 20:13:24 +03:00
|
|
|
logger->startActivity(act, lvl, type, s, fields, parent);
|
2017-08-28 19:49:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
else if (msg == STDERR_STOP_ACTIVITY) {
|
|
|
|
auto act = readNum<ActivityId>(from);
|
|
|
|
logger->stopActivity(act);
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (msg == STDERR_RESULT) {
|
|
|
|
auto act = readNum<ActivityId>(from);
|
|
|
|
auto type = (ResultType) readInt(from);
|
|
|
|
auto fields = readFields(from);
|
|
|
|
logger->result(act, type, fields);
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (msg == STDERR_LAST)
|
|
|
|
break;
|
|
|
|
|
|
|
|
else
|
|
|
|
throw Error("got unknown message type %x from Nix daemon", msg);
|
2006-12-03 04:08:13 +02:00
|
|
|
}
|
2018-10-17 00:36:15 +03:00
|
|
|
|
|
|
|
return nullptr;
|
2006-12-03 04:08:13 +02:00
|
|
|
}
|
|
|
|
|
2021-07-26 14:31:09 +03:00
|
|
|
void ConnectionHandle::withFramedSink(std::function<void(Sink & sink)> fun)
|
2018-01-15 04:20:22 +02:00
|
|
|
{
|
2020-09-17 18:36:16 +03:00
|
|
|
(*this)->to.flush();
|
|
|
|
|
|
|
|
std::exception_ptr ex;
|
|
|
|
|
2021-07-26 14:31:09 +03:00
|
|
|
/* Handle log messages / exceptions from the remote on a separate
|
|
|
|
thread. */
|
2020-09-17 18:36:16 +03:00
|
|
|
std::thread stderrThread([&]()
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
processStderr(nullptr, nullptr, false);
|
|
|
|
} catch (...) {
|
|
|
|
ex = std::current_exception();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
Finally joinStderrThread([&]()
|
|
|
|
{
|
|
|
|
if (stderrThread.joinable()) {
|
|
|
|
stderrThread.join();
|
|
|
|
if (ex) {
|
|
|
|
try {
|
|
|
|
std::rethrow_exception(ex);
|
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
{
|
2020-09-17 23:01:35 +03:00
|
|
|
FramedSink sink((*this)->to, ex);
|
2020-09-17 18:36:16 +03:00
|
|
|
fun(sink);
|
|
|
|
sink.flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
stderrThread.join();
|
|
|
|
if (ex)
|
|
|
|
std::rethrow_exception(ex);
|
|
|
|
}
|
|
|
|
|
2006-11-30 20:35:50 +02:00
|
|
|
}
|