mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-13 09:46:16 +02:00
Merge remote-tracking branch 'upstream/master' into path-info
This commit is contained in:
commit
f0ad29acc1
37 changed files with 3477 additions and 2969 deletions
|
@ -2,11 +2,11 @@
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1602702596,
|
"lastModified": 1614309161,
|
||||||
"narHash": "sha256-fqJ4UgOb4ZUnCDIapDb4gCrtAah5Rnr2/At3IzMitig=",
|
"narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "ad0d20345219790533ebe06571f82ed6b034db31",
|
"rev": "0e499fde7af3c28d63e9b13636716b86c3162b93",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
|
@ -251,7 +251,7 @@ connected:
|
||||||
std::cerr << "# accept\n" << storeUri << "\n";
|
std::cerr << "# accept\n" << storeUri << "\n";
|
||||||
|
|
||||||
auto inputs = readStrings<PathSet>(source);
|
auto inputs = readStrings<PathSet>(source);
|
||||||
auto outputs = readStrings<PathSet>(source);
|
auto wantedOutputs = readStrings<StringSet>(source);
|
||||||
|
|
||||||
AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri) + ".upload-lock", true);
|
AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri) + ".upload-lock", true);
|
||||||
|
|
||||||
|
@ -276,6 +276,7 @@ connected:
|
||||||
uploadLock = -1;
|
uploadLock = -1;
|
||||||
|
|
||||||
auto drv = store->readDerivation(*drvPath);
|
auto drv = store->readDerivation(*drvPath);
|
||||||
|
auto outputHashes = staticOutputHashes(*store, drv);
|
||||||
drv.inputSrcs = store->parseStorePathSet(inputs);
|
drv.inputSrcs = store->parseStorePathSet(inputs);
|
||||||
|
|
||||||
auto result = sshStore->buildDerivation(*drvPath, drv);
|
auto result = sshStore->buildDerivation(*drvPath, drv);
|
||||||
|
@ -283,16 +284,42 @@ connected:
|
||||||
if (!result.success())
|
if (!result.success())
|
||||||
throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg);
|
throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg);
|
||||||
|
|
||||||
StorePathSet missing;
|
std::set<Realisation> missingRealisations;
|
||||||
for (auto & path : outputs)
|
StorePathSet missingPaths;
|
||||||
if (!store->isValidPath(store->parseStorePath(path))) missing.insert(store->parseStorePath(path));
|
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
|
||||||
|
for (auto & outputName : wantedOutputs) {
|
||||||
|
auto thisOutputHash = outputHashes.at(outputName);
|
||||||
|
auto thisOutputId = DrvOutput{ thisOutputHash, outputName };
|
||||||
|
if (!store->queryRealisation(thisOutputId)) {
|
||||||
|
debug("missing output %s", outputName);
|
||||||
|
assert(result.builtOutputs.count(thisOutputId));
|
||||||
|
auto newRealisation = result.builtOutputs.at(thisOutputId);
|
||||||
|
missingRealisations.insert(newRealisation);
|
||||||
|
missingPaths.insert(newRealisation.outPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
auto outputPaths = drv.outputsAndOptPaths(*store);
|
||||||
|
for (auto & [outputName, hopefullyOutputPath] : outputPaths) {
|
||||||
|
assert(hopefullyOutputPath.second);
|
||||||
|
if (!store->isValidPath(*hopefullyOutputPath.second))
|
||||||
|
missingPaths.insert(*hopefullyOutputPath.second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!missing.empty()) {
|
if (!missingPaths.empty()) {
|
||||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri));
|
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri));
|
||||||
if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
|
if (auto localStore = store.dynamic_pointer_cast<LocalStore>())
|
||||||
for (auto & i : missing)
|
for (auto & path : missingPaths)
|
||||||
localStore->locksHeld.insert(store->printStorePath(i)); /* FIXME: ugly */
|
localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */
|
||||||
copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs, NoSubstitute);
|
copyPaths(ref<Store>(sshStore), store, missingPaths, NoRepair, NoCheckSigs, NoSubstitute);
|
||||||
|
}
|
||||||
|
// XXX: Should be done as part of `copyPaths`
|
||||||
|
for (auto & realisation : missingRealisations) {
|
||||||
|
// Should hold, because if the feature isn't enabled the set
|
||||||
|
// of missing realisations should be empty
|
||||||
|
settings.requireExperimentalFeature("ca-derivations");
|
||||||
|
store->registerDrvOutput(realisation);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,7 +2,8 @@
|
||||||
|
|
||||||
#include "parsed-derivations.hh"
|
#include "parsed-derivations.hh"
|
||||||
#include "lock.hh"
|
#include "lock.hh"
|
||||||
#include "local-store.hh"
|
#include "store-api.hh"
|
||||||
|
#include "pathlocks.hh"
|
||||||
#include "goal.hh"
|
#include "goal.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
@ -64,7 +65,7 @@ struct DerivationGoal : public Goal
|
||||||
bool retrySubstitution;
|
bool retrySubstitution;
|
||||||
|
|
||||||
/* The derivation stored at drvPath. */
|
/* The derivation stored at drvPath. */
|
||||||
std::unique_ptr<BasicDerivation> drv;
|
std::unique_ptr<Derivation> drv;
|
||||||
|
|
||||||
std::unique_ptr<ParsedDerivation> parsedDrv;
|
std::unique_ptr<ParsedDerivation> parsedDrv;
|
||||||
|
|
||||||
|
@ -79,18 +80,6 @@ struct DerivationGoal : public Goal
|
||||||
|
|
||||||
std::map<std::string, InitialOutput> initialOutputs;
|
std::map<std::string, InitialOutput> initialOutputs;
|
||||||
|
|
||||||
/* User selected for running the builder. */
|
|
||||||
std::unique_ptr<UserLock> buildUser;
|
|
||||||
|
|
||||||
/* The process ID of the builder. */
|
|
||||||
Pid pid;
|
|
||||||
|
|
||||||
/* The temporary directory. */
|
|
||||||
Path tmpDir;
|
|
||||||
|
|
||||||
/* The path of the temporary directory in the sandbox. */
|
|
||||||
Path tmpDirInSandbox;
|
|
||||||
|
|
||||||
/* File descriptor for the log file. */
|
/* File descriptor for the log file. */
|
||||||
AutoCloseFD fdLogFile;
|
AutoCloseFD fdLogFile;
|
||||||
std::shared_ptr<BufferedSink> logFileSink, logSink;
|
std::shared_ptr<BufferedSink> logFileSink, logSink;
|
||||||
|
@ -106,79 +95,15 @@ struct DerivationGoal : public Goal
|
||||||
|
|
||||||
std::string currentHookLine;
|
std::string currentHookLine;
|
||||||
|
|
||||||
/* Pipe for the builder's standard output/error. */
|
|
||||||
Pipe builderOut;
|
|
||||||
|
|
||||||
/* Pipe for synchronising updates to the builder namespaces. */
|
|
||||||
Pipe userNamespaceSync;
|
|
||||||
|
|
||||||
/* The mount namespace of the builder, used to add additional
|
|
||||||
paths to the sandbox as a result of recursive Nix calls. */
|
|
||||||
AutoCloseFD sandboxMountNamespace;
|
|
||||||
|
|
||||||
/* On Linux, whether we're doing the build in its own user
|
|
||||||
namespace. */
|
|
||||||
bool usingUserNamespace = true;
|
|
||||||
|
|
||||||
/* The build hook. */
|
/* The build hook. */
|
||||||
std::unique_ptr<HookInstance> hook;
|
std::unique_ptr<HookInstance> hook;
|
||||||
|
|
||||||
/* Whether we're currently doing a chroot build. */
|
|
||||||
bool useChroot = false;
|
|
||||||
|
|
||||||
Path chrootRootDir;
|
|
||||||
|
|
||||||
/* RAII object to delete the chroot directory. */
|
|
||||||
std::shared_ptr<AutoDelete> autoDelChroot;
|
|
||||||
|
|
||||||
/* The sort of derivation we are building. */
|
/* The sort of derivation we are building. */
|
||||||
DerivationType derivationType;
|
DerivationType derivationType;
|
||||||
|
|
||||||
/* Whether to run the build in a private network namespace. */
|
|
||||||
bool privateNetwork = false;
|
|
||||||
|
|
||||||
typedef void (DerivationGoal::*GoalState)();
|
typedef void (DerivationGoal::*GoalState)();
|
||||||
GoalState state;
|
GoalState state;
|
||||||
|
|
||||||
/* Stuff we need to pass to initChild(). */
|
|
||||||
struct ChrootPath {
|
|
||||||
Path source;
|
|
||||||
bool optional;
|
|
||||||
ChrootPath(Path source = "", bool optional = false)
|
|
||||||
: source(source), optional(optional)
|
|
||||||
{ }
|
|
||||||
};
|
|
||||||
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
|
|
||||||
DirsInChroot dirsInChroot;
|
|
||||||
|
|
||||||
typedef map<string, string> Environment;
|
|
||||||
Environment env;
|
|
||||||
|
|
||||||
#if __APPLE__
|
|
||||||
typedef string SandboxProfile;
|
|
||||||
SandboxProfile additionalSandboxProfile;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Hash rewriting. */
|
|
||||||
StringMap inputRewrites, outputRewrites;
|
|
||||||
typedef map<StorePath, StorePath> RedirectedOutputs;
|
|
||||||
RedirectedOutputs redirectedOutputs;
|
|
||||||
|
|
||||||
/* The outputs paths used during the build.
|
|
||||||
|
|
||||||
- Input-addressed derivations or fixed content-addressed outputs are
|
|
||||||
sometimes built when some of their outputs already exist, and can not
|
|
||||||
be hidden via sandboxing. We use temporary locations instead and
|
|
||||||
rewrite after the build. Otherwise the regular predetermined paths are
|
|
||||||
put here.
|
|
||||||
|
|
||||||
- Floating content-addressed derivations do not know their final build
|
|
||||||
output paths until the outputs are hashed, so random locations are
|
|
||||||
used, and then renamed. The randomness helps guard against hidden
|
|
||||||
self-references.
|
|
||||||
*/
|
|
||||||
OutputPathMap scratchOutputs;
|
|
||||||
|
|
||||||
/* The final output paths of the build.
|
/* The final output paths of the build.
|
||||||
|
|
||||||
- For input-addressed derivations, always the precomputed paths
|
- For input-addressed derivations, always the precomputed paths
|
||||||
|
@ -191,11 +116,6 @@ struct DerivationGoal : public Goal
|
||||||
|
|
||||||
BuildMode buildMode;
|
BuildMode buildMode;
|
||||||
|
|
||||||
/* If we're repairing without a chroot, there may be outputs that
|
|
||||||
are valid but corrupt. So we redirect these outputs to
|
|
||||||
temporary paths. */
|
|
||||||
StorePathSet redirectedBadOutputs;
|
|
||||||
|
|
||||||
BuildResult result;
|
BuildResult result;
|
||||||
|
|
||||||
/* The current round, if we're building multiple times. */
|
/* The current round, if we're building multiple times. */
|
||||||
|
@ -203,17 +123,6 @@ struct DerivationGoal : public Goal
|
||||||
|
|
||||||
size_t nrRounds;
|
size_t nrRounds;
|
||||||
|
|
||||||
/* Path registration info from the previous round, if we're
|
|
||||||
building multiple times. Since this contains the hash, it
|
|
||||||
allows us to compare whether two rounds produced the same
|
|
||||||
result. */
|
|
||||||
std::map<Path, ValidPathInfo> prevInfos;
|
|
||||||
|
|
||||||
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
|
|
||||||
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
|
|
||||||
|
|
||||||
const static Path homeDir;
|
|
||||||
|
|
||||||
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
|
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
|
||||||
|
|
||||||
std::unique_ptr<Activity> act;
|
std::unique_ptr<Activity> act;
|
||||||
|
@ -226,39 +135,13 @@ struct DerivationGoal : public Goal
|
||||||
/* The remote machine on which we're building. */
|
/* The remote machine on which we're building. */
|
||||||
std::string machineName;
|
std::string machineName;
|
||||||
|
|
||||||
/* The recursive Nix daemon socket. */
|
|
||||||
AutoCloseFD daemonSocket;
|
|
||||||
|
|
||||||
/* The daemon main thread. */
|
|
||||||
std::thread daemonThread;
|
|
||||||
|
|
||||||
/* The daemon worker threads. */
|
|
||||||
std::vector<std::thread> daemonWorkerThreads;
|
|
||||||
|
|
||||||
/* Paths that were added via recursive Nix calls. */
|
|
||||||
StorePathSet addedPaths;
|
|
||||||
|
|
||||||
/* Recursive Nix calls are only allowed to build or realize paths
|
|
||||||
in the original input closure or added via a recursive Nix call
|
|
||||||
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
|
||||||
/nix/store/<bla> is some arbitrary path in a binary cache). */
|
|
||||||
bool isAllowed(const StorePath & path)
|
|
||||||
{
|
|
||||||
return inputPaths.count(path) || addedPaths.count(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
friend struct RestrictedStore;
|
|
||||||
|
|
||||||
DerivationGoal(const StorePath & drvPath,
|
DerivationGoal(const StorePath & drvPath,
|
||||||
const StringSet & wantedOutputs, Worker & worker,
|
const StringSet & wantedOutputs, Worker & worker,
|
||||||
BuildMode buildMode = bmNormal);
|
BuildMode buildMode = bmNormal);
|
||||||
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
|
DerivationGoal(const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
const StringSet & wantedOutputs, Worker & worker,
|
const StringSet & wantedOutputs, Worker & worker,
|
||||||
BuildMode buildMode = bmNormal);
|
BuildMode buildMode = bmNormal);
|
||||||
~DerivationGoal();
|
virtual ~DerivationGoal();
|
||||||
|
|
||||||
/* Whether we need to perform hash rewriting if there are valid output paths. */
|
|
||||||
bool needsHashRewrite();
|
|
||||||
|
|
||||||
void timedOut(Error && ex) override;
|
void timedOut(Error && ex) override;
|
||||||
|
|
||||||
|
@ -280,7 +163,7 @@ struct DerivationGoal : public Goal
|
||||||
void closureRepaired();
|
void closureRepaired();
|
||||||
void inputsRealised();
|
void inputsRealised();
|
||||||
void tryToBuild();
|
void tryToBuild();
|
||||||
void tryLocalBuild();
|
virtual void tryLocalBuild();
|
||||||
void buildDone();
|
void buildDone();
|
||||||
|
|
||||||
void resolvedFinished();
|
void resolvedFinished();
|
||||||
|
@ -288,40 +171,11 @@ struct DerivationGoal : public Goal
|
||||||
/* Is the build hook willing to perform the build? */
|
/* Is the build hook willing to perform the build? */
|
||||||
HookReply tryBuildHook();
|
HookReply tryBuildHook();
|
||||||
|
|
||||||
/* Start building a derivation. */
|
virtual int getChildStatus();
|
||||||
void startBuilder();
|
|
||||||
|
|
||||||
/* Fill in the environment for the builder. */
|
|
||||||
void initEnv();
|
|
||||||
|
|
||||||
/* Setup tmp dir location. */
|
|
||||||
void initTmpDir();
|
|
||||||
|
|
||||||
/* Write a JSON file containing the derivation attributes. */
|
|
||||||
void writeStructuredAttrs();
|
|
||||||
|
|
||||||
void startDaemon();
|
|
||||||
|
|
||||||
void stopDaemon();
|
|
||||||
|
|
||||||
/* Add 'path' to the set of paths that may be referenced by the
|
|
||||||
outputs, and make it appear in the sandbox. */
|
|
||||||
void addDependency(const StorePath & path);
|
|
||||||
|
|
||||||
/* Make a file owned by the builder. */
|
|
||||||
void chownToBuilder(const Path & path);
|
|
||||||
|
|
||||||
/* Run the builder's process. */
|
|
||||||
void runChild();
|
|
||||||
|
|
||||||
/* Check that the derivation outputs all exist and register them
|
/* Check that the derivation outputs all exist and register them
|
||||||
as valid. */
|
as valid. */
|
||||||
void registerOutputs();
|
virtual void registerOutputs();
|
||||||
|
|
||||||
/* Check that an output meets the requirements specified by the
|
|
||||||
'outputChecks' attribute (or the legacy
|
|
||||||
'{allowed,disallowed}{References,Requisites}' attributes). */
|
|
||||||
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
|
|
||||||
|
|
||||||
/* Open a log file and a pipe to it. */
|
/* Open a log file and a pipe to it. */
|
||||||
Path openLogFile();
|
Path openLogFile();
|
||||||
|
@ -329,8 +183,18 @@ struct DerivationGoal : public Goal
|
||||||
/* Close the log file. */
|
/* Close the log file. */
|
||||||
void closeLogFile();
|
void closeLogFile();
|
||||||
|
|
||||||
/* Delete the temporary directory, if we have one. */
|
/* Close the read side of the logger pipe. */
|
||||||
void deleteTmpDir(bool force);
|
virtual void closeReadPipes();
|
||||||
|
|
||||||
|
/* Cleanup hooks for buildDone() */
|
||||||
|
virtual void cleanupHookFinally();
|
||||||
|
virtual void cleanupPreChildKill();
|
||||||
|
virtual void cleanupPostChildKill();
|
||||||
|
virtual bool cleanupDecideWhetherDiskFull();
|
||||||
|
virtual void cleanupPostOutputsRegisteredModeCheck();
|
||||||
|
virtual void cleanupPostOutputsRegisteredModeNonCheck();
|
||||||
|
|
||||||
|
virtual bool isReadDesc(int fd);
|
||||||
|
|
||||||
/* Callback used by the worker to write to the log. */
|
/* Callback used by the worker to write to the log. */
|
||||||
void handleChildOutput(int fd, const string & data) override;
|
void handleChildOutput(int fd, const string & data) override;
|
||||||
|
@ -347,17 +211,7 @@ struct DerivationGoal : public Goal
|
||||||
void checkPathValidity();
|
void checkPathValidity();
|
||||||
|
|
||||||
/* Forcibly kill the child process, if any. */
|
/* Forcibly kill the child process, if any. */
|
||||||
void killChild();
|
virtual void killChild();
|
||||||
|
|
||||||
/* Create alternative path calculated from but distinct from the
|
|
||||||
input, so we can avoid overwriting outputs (or other store paths)
|
|
||||||
that already exist. */
|
|
||||||
StorePath makeFallbackPath(const StorePath & path);
|
|
||||||
/* Make a path to another based on the output name along with the
|
|
||||||
derivation hash. */
|
|
||||||
/* FIXME add option to randomize, so we can audit whether our
|
|
||||||
rewrites caught everything */
|
|
||||||
StorePath makeFallbackPath(std::string_view outputName);
|
|
||||||
|
|
||||||
void repairClosure();
|
void repairClosure();
|
||||||
|
|
||||||
|
@ -370,4 +224,6 @@ struct DerivationGoal : public Goal
|
||||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
MakeError(NotDeterministic, BuildError);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#include "worker.hh"
|
#include "worker.hh"
|
||||||
#include "substitution-goal.hh"
|
#include "substitution-goal.hh"
|
||||||
#include "derivation-goal.hh"
|
#include "derivation-goal.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -58,6 +59,26 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat
|
||||||
result.status = BuildResult::MiscFailure;
|
result.status = BuildResult::MiscFailure;
|
||||||
result.errorMsg = e.msg();
|
result.errorMsg = e.msg();
|
||||||
}
|
}
|
||||||
|
// XXX: Should use `goal->queryPartialDerivationOutputMap()` once it's
|
||||||
|
// extended to return the full realisation for each output
|
||||||
|
auto staticDrvOutputs = drv.outputsAndOptPaths(*this);
|
||||||
|
auto outputHashes = staticOutputHashes(*this, drv);
|
||||||
|
for (auto & [outputName, staticOutput] : staticDrvOutputs) {
|
||||||
|
auto outputId = DrvOutput{outputHashes.at(outputName), outputName};
|
||||||
|
if (staticOutput.second)
|
||||||
|
result.builtOutputs.insert_or_assign(
|
||||||
|
outputId,
|
||||||
|
Realisation{ outputId, *staticOutput.second}
|
||||||
|
);
|
||||||
|
if (settings.isExperimentalFeatureEnabled("ca-derivations") && !derivationHasKnownOutputPaths(drv.type())) {
|
||||||
|
auto realisation = this->queryRealisation(outputId);
|
||||||
|
if (realisation)
|
||||||
|
result.builtOutputs.insert_or_assign(
|
||||||
|
outputId,
|
||||||
|
*realisation
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
2823
src/libstore/build/local-derivation-goal.cc
Normal file
2823
src/libstore/build/local-derivation-goal.cc
Normal file
File diff suppressed because it is too large
Load diff
199
src/libstore/build/local-derivation-goal.hh
Normal file
199
src/libstore/build/local-derivation-goal.hh
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "derivation-goal.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
struct LocalDerivationGoal : public DerivationGoal
|
||||||
|
{
|
||||||
|
LocalStore & getLocalStore();
|
||||||
|
|
||||||
|
/* User selected for running the builder. */
|
||||||
|
std::unique_ptr<UserLock> buildUser;
|
||||||
|
|
||||||
|
/* The process ID of the builder. */
|
||||||
|
Pid pid;
|
||||||
|
|
||||||
|
/* The temporary directory. */
|
||||||
|
Path tmpDir;
|
||||||
|
|
||||||
|
/* The path of the temporary directory in the sandbox. */
|
||||||
|
Path tmpDirInSandbox;
|
||||||
|
|
||||||
|
/* Pipe for the builder's standard output/error. */
|
||||||
|
Pipe builderOut;
|
||||||
|
|
||||||
|
/* Pipe for synchronising updates to the builder namespaces. */
|
||||||
|
Pipe userNamespaceSync;
|
||||||
|
|
||||||
|
/* The mount namespace of the builder, used to add additional
|
||||||
|
paths to the sandbox as a result of recursive Nix calls. */
|
||||||
|
AutoCloseFD sandboxMountNamespace;
|
||||||
|
|
||||||
|
/* On Linux, whether we're doing the build in its own user
|
||||||
|
namespace. */
|
||||||
|
bool usingUserNamespace = true;
|
||||||
|
|
||||||
|
/* Whether we're currently doing a chroot build. */
|
||||||
|
bool useChroot = false;
|
||||||
|
|
||||||
|
Path chrootRootDir;
|
||||||
|
|
||||||
|
/* RAII object to delete the chroot directory. */
|
||||||
|
std::shared_ptr<AutoDelete> autoDelChroot;
|
||||||
|
|
||||||
|
/* Whether to run the build in a private network namespace. */
|
||||||
|
bool privateNetwork = false;
|
||||||
|
|
||||||
|
/* Stuff we need to pass to initChild(). */
|
||||||
|
struct ChrootPath {
|
||||||
|
Path source;
|
||||||
|
bool optional;
|
||||||
|
ChrootPath(Path source = "", bool optional = false)
|
||||||
|
: source(source), optional(optional)
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
typedef map<Path, ChrootPath> DirsInChroot; // maps target path to source path
|
||||||
|
DirsInChroot dirsInChroot;
|
||||||
|
|
||||||
|
typedef map<string, string> Environment;
|
||||||
|
Environment env;
|
||||||
|
|
||||||
|
#if __APPLE__
|
||||||
|
typedef string SandboxProfile;
|
||||||
|
SandboxProfile additionalSandboxProfile;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Hash rewriting. */
|
||||||
|
StringMap inputRewrites, outputRewrites;
|
||||||
|
typedef map<StorePath, StorePath> RedirectedOutputs;
|
||||||
|
RedirectedOutputs redirectedOutputs;
|
||||||
|
|
||||||
|
/* The outputs paths used during the build.
|
||||||
|
|
||||||
|
- Input-addressed derivations or fixed content-addressed outputs are
|
||||||
|
sometimes built when some of their outputs already exist, and can not
|
||||||
|
be hidden via sandboxing. We use temporary locations instead and
|
||||||
|
rewrite after the build. Otherwise the regular predetermined paths are
|
||||||
|
put here.
|
||||||
|
|
||||||
|
- Floating content-addressed derivations do not know their final build
|
||||||
|
output paths until the outputs are hashed, so random locations are
|
||||||
|
used, and then renamed. The randomness helps guard against hidden
|
||||||
|
self-references.
|
||||||
|
*/
|
||||||
|
OutputPathMap scratchOutputs;
|
||||||
|
|
||||||
|
/* Path registration info from the previous round, if we're
|
||||||
|
building multiple times. Since this contains the hash, it
|
||||||
|
allows us to compare whether two rounds produced the same
|
||||||
|
result. */
|
||||||
|
std::map<Path, ValidPathInfo> prevInfos;
|
||||||
|
|
||||||
|
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
|
||||||
|
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
|
||||||
|
|
||||||
|
const static Path homeDir;
|
||||||
|
|
||||||
|
/* The recursive Nix daemon socket. */
|
||||||
|
AutoCloseFD daemonSocket;
|
||||||
|
|
||||||
|
/* The daemon main thread. */
|
||||||
|
std::thread daemonThread;
|
||||||
|
|
||||||
|
/* The daemon worker threads. */
|
||||||
|
std::vector<std::thread> daemonWorkerThreads;
|
||||||
|
|
||||||
|
/* Paths that were added via recursive Nix calls. */
|
||||||
|
StorePathSet addedPaths;
|
||||||
|
|
||||||
|
/* Recursive Nix calls are only allowed to build or realize paths
|
||||||
|
in the original input closure or added via a recursive Nix call
|
||||||
|
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
||||||
|
/nix/store/<bla> is some arbitrary path in a binary cache). */
|
||||||
|
bool isAllowed(const StorePath & path)
|
||||||
|
{
|
||||||
|
return inputPaths.count(path) || addedPaths.count(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
friend struct RestrictedStore;
|
||||||
|
|
||||||
|
using DerivationGoal::DerivationGoal;
|
||||||
|
|
||||||
|
virtual ~LocalDerivationGoal() override;
|
||||||
|
|
||||||
|
/* Whether we need to perform hash rewriting if there are valid output paths. */
|
||||||
|
bool needsHashRewrite();
|
||||||
|
|
||||||
|
/* The additional states. */
|
||||||
|
void tryLocalBuild() override;
|
||||||
|
|
||||||
|
/* Start building a derivation. */
|
||||||
|
void startBuilder();
|
||||||
|
|
||||||
|
/* Fill in the environment for the builder. */
|
||||||
|
void initEnv();
|
||||||
|
|
||||||
|
/* Setup tmp dir location. */
|
||||||
|
void initTmpDir();
|
||||||
|
|
||||||
|
/* Write a JSON file containing the derivation attributes. */
|
||||||
|
void writeStructuredAttrs();
|
||||||
|
|
||||||
|
void startDaemon();
|
||||||
|
|
||||||
|
void stopDaemon();
|
||||||
|
|
||||||
|
/* Add 'path' to the set of paths that may be referenced by the
|
||||||
|
outputs, and make it appear in the sandbox. */
|
||||||
|
void addDependency(const StorePath & path);
|
||||||
|
|
||||||
|
/* Make a file owned by the builder. */
|
||||||
|
void chownToBuilder(const Path & path);
|
||||||
|
|
||||||
|
int getChildStatus() override;
|
||||||
|
|
||||||
|
/* Run the builder's process. */
|
||||||
|
void runChild();
|
||||||
|
|
||||||
|
/* Check that the derivation outputs all exist and register them
|
||||||
|
as valid. */
|
||||||
|
void registerOutputs() override;
|
||||||
|
|
||||||
|
/* Check that an output meets the requirements specified by the
|
||||||
|
'outputChecks' attribute (or the legacy
|
||||||
|
'{allowed,disallowed}{References,Requisites}' attributes). */
|
||||||
|
void checkOutputs(const std::map<std::string, ValidPathInfo> & outputs);
|
||||||
|
|
||||||
|
/* Close the read side of the logger pipe. */
|
||||||
|
void closeReadPipes() override;
|
||||||
|
|
||||||
|
/* Cleanup hooks for buildDone() */
|
||||||
|
void cleanupHookFinally() override;
|
||||||
|
void cleanupPreChildKill() override;
|
||||||
|
void cleanupPostChildKill() override;
|
||||||
|
bool cleanupDecideWhetherDiskFull() override;
|
||||||
|
void cleanupPostOutputsRegisteredModeCheck() override;
|
||||||
|
void cleanupPostOutputsRegisteredModeNonCheck() override;
|
||||||
|
|
||||||
|
bool isReadDesc(int fd) override;
|
||||||
|
|
||||||
|
/* Delete the temporary directory, if we have one. */
|
||||||
|
void deleteTmpDir(bool force);
|
||||||
|
|
||||||
|
/* Forcibly kill the child process, if any. */
|
||||||
|
void killChild() override;
|
||||||
|
|
||||||
|
/* Create alternative path calculated from but distinct from the
|
||||||
|
input, so we can avoid overwriting outputs (or other store paths)
|
||||||
|
that already exist. */
|
||||||
|
StorePath makeFallbackPath(const StorePath & path);
|
||||||
|
/* Make a path to another based on the output name along with the
|
||||||
|
derivation hash. */
|
||||||
|
/* FIXME add option to randomize, so we can audit whether our
|
||||||
|
rewrites caught everything */
|
||||||
|
StorePath makeFallbackPath(std::string_view outputName);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -1,7 +1,7 @@
|
||||||
#include "machines.hh"
|
#include "machines.hh"
|
||||||
#include "worker.hh"
|
#include "worker.hh"
|
||||||
#include "substitution-goal.hh"
|
#include "substitution-goal.hh"
|
||||||
#include "derivation-goal.hh"
|
#include "local-derivation-goal.hh"
|
||||||
#include "hook-instance.hh"
|
#include "hook-instance.hh"
|
||||||
|
|
||||||
#include <poll.h>
|
#include <poll.h>
|
||||||
|
@ -59,8 +59,10 @@ std::shared_ptr<DerivationGoal> Worker::makeDerivationGoalCommon(
|
||||||
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
|
std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drvPath,
|
||||||
const StringSet & wantedOutputs, BuildMode buildMode)
|
const StringSet & wantedOutputs, BuildMode buildMode)
|
||||||
{
|
{
|
||||||
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
|
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
|
||||||
return std::make_shared<DerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
|
return !dynamic_cast<LocalStore *>(&store)
|
||||||
|
? std::make_shared</* */DerivationGoal>(drvPath, wantedOutputs, *this, buildMode)
|
||||||
|
: std::make_shared<LocalDerivationGoal>(drvPath, wantedOutputs, *this, buildMode);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,8 +70,10 @@ std::shared_ptr<DerivationGoal> Worker::makeDerivationGoal(const StorePath & drv
|
||||||
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
|
std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath & drvPath,
|
||||||
const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode)
|
const BasicDerivation & drv, const StringSet & wantedOutputs, BuildMode buildMode)
|
||||||
{
|
{
|
||||||
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() {
|
return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr<DerivationGoal> {
|
||||||
return std::make_shared<DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
|
return !dynamic_cast<LocalStore *>(&store)
|
||||||
|
? std::make_shared</* */DerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode)
|
||||||
|
: std::make_shared<LocalDerivationGoal>(drvPath, drv, wantedOutputs, *this, buildMode);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -575,6 +575,9 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
||||||
logger->stopWork();
|
logger->stopWork();
|
||||||
to << res.status << res.errorMsg;
|
to << res.status << res.errorMsg;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 0xc) {
|
||||||
|
worker_proto::write(*store, to, res.builtOutputs);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,17 @@ bool derivationIsFixed(DerivationType dt) {
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool derivationHasKnownOutputPaths(DerivationType dt) {
|
||||||
|
switch (dt) {
|
||||||
|
case DerivationType::InputAddressed: return true;
|
||||||
|
case DerivationType::CAFixed: return true;
|
||||||
|
case DerivationType::CAFloating: return false;
|
||||||
|
case DerivationType::DeferredInputAddressed: return false;
|
||||||
|
};
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool derivationIsImpure(DerivationType dt) {
|
bool derivationIsImpure(DerivationType dt) {
|
||||||
switch (dt) {
|
switch (dt) {
|
||||||
case DerivationType::InputAddressed: return false;
|
case DerivationType::InputAddressed: return false;
|
||||||
|
|
|
@ -94,6 +94,11 @@ bool derivationIsFixed(DerivationType);
|
||||||
derivation is controlled separately. Never true for non-CA derivations. */
|
derivation is controlled separately. Never true for non-CA derivations. */
|
||||||
bool derivationIsImpure(DerivationType);
|
bool derivationIsImpure(DerivationType);
|
||||||
|
|
||||||
|
/* Does the derivation knows its own output paths?
|
||||||
|
* Only true when there's no floating-ca derivation involved in the closure.
|
||||||
|
*/
|
||||||
|
bool derivationHasKnownOutputPaths(DerivationType);
|
||||||
|
|
||||||
struct BasicDerivation
|
struct BasicDerivation
|
||||||
{
|
{
|
||||||
DerivationOutputs outputs; /* keyed on symbolic IDs */
|
DerivationOutputs outputs; /* keyed on symbolic IDs */
|
||||||
|
|
|
@ -165,10 +165,15 @@ bool Settings::isExperimentalFeatureEnabled(const std::string & name)
|
||||||
return std::find(f.begin(), f.end(), name) != f.end();
|
return std::find(f.begin(), f.end(), name) != f.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MissingExperimentalFeature::MissingExperimentalFeature(std::string feature)
|
||||||
|
: Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", feature)
|
||||||
|
, missingFeature(feature)
|
||||||
|
{}
|
||||||
|
|
||||||
void Settings::requireExperimentalFeature(const std::string & name)
|
void Settings::requireExperimentalFeature(const std::string & name)
|
||||||
{
|
{
|
||||||
if (!isExperimentalFeatureEnabled(name))
|
if (!isExperimentalFeatureEnabled(name))
|
||||||
throw Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", name);
|
throw MissingExperimentalFeature(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Settings::isWSL1()
|
bool Settings::isWSL1()
|
||||||
|
|
|
@ -45,6 +45,15 @@ struct PluginFilesSetting : public BaseSetting<Paths>
|
||||||
void set(const std::string & str, bool append = false) override;
|
void set(const std::string & str, bool append = false) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MissingExperimentalFeature: public Error
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
std::string missingFeature;
|
||||||
|
|
||||||
|
MissingExperimentalFeature(std::string feature);
|
||||||
|
virtual const char* sname() const override { return "MissingExperimentalFeature"; }
|
||||||
|
};
|
||||||
|
|
||||||
class Settings : public Config {
|
class Settings : public Config {
|
||||||
|
|
||||||
unsigned int getDefaultCores();
|
unsigned int getDefaultCores();
|
||||||
|
@ -632,7 +641,7 @@ public:
|
||||||
is `root`.
|
is `root`.
|
||||||
|
|
||||||
> **Warning**
|
> **Warning**
|
||||||
>
|
>
|
||||||
> Adding a user to `trusted-users` is essentially equivalent to
|
> Adding a user to `trusted-users` is essentially equivalent to
|
||||||
> giving that user root access to the system. For example, the user
|
> giving that user root access to the system. For example, the user
|
||||||
> can set `sandbox-paths` and thereby obtain read access to
|
> can set `sandbox-paths` and thereby obtain read access to
|
||||||
|
@ -722,13 +731,13 @@ public:
|
||||||
The program executes with no arguments. The program's environment
|
The program executes with no arguments. The program's environment
|
||||||
contains the following environment variables:
|
contains the following environment variables:
|
||||||
|
|
||||||
- `DRV_PATH`
|
- `DRV_PATH`
|
||||||
The derivation for the built paths.
|
The derivation for the built paths.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
`/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv`
|
`/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv`
|
||||||
|
|
||||||
- `OUT_PATHS`
|
- `OUT_PATHS`
|
||||||
Output paths of the built derivation, separated by a space
|
Output paths of the built derivation, separated by a space
|
||||||
character.
|
character.
|
||||||
|
|
||||||
|
@ -759,7 +768,7 @@ public:
|
||||||
documentation](https://ec.haxx.se/usingcurl-netrc.html).
|
documentation](https://ec.haxx.se/usingcurl-netrc.html).
|
||||||
|
|
||||||
> **Note**
|
> **Note**
|
||||||
>
|
>
|
||||||
> This must be an absolute path, and `~` is not resolved. For
|
> This must be an absolute path, and `~` is not resolved. For
|
||||||
> example, `~/.netrc` won't resolve to your home directory's
|
> example, `~/.netrc` won't resolve to your home directory's
|
||||||
> `.netrc`.
|
> `.netrc`.
|
||||||
|
|
|
@ -257,7 +257,9 @@ public:
|
||||||
|
|
||||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
|
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
|
||||||
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
|
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
|
||||||
|
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) {
|
||||||
|
status.builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
|
||||||
|
}
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -655,6 +655,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
|
||||||
|
|
||||||
void LocalStore::registerDrvOutput(const Realisation & info)
|
void LocalStore::registerDrvOutput(const Realisation & info)
|
||||||
{
|
{
|
||||||
|
settings.requireExperimentalFeature("ca-derivations");
|
||||||
auto state(_state.lock());
|
auto state(_state.lock());
|
||||||
retrySQLite<void>([&]() {
|
retrySQLite<void>([&]() {
|
||||||
state->stmts->RegisterRealisedOutput.use()
|
state->stmts->RegisterRealisedOutput.use()
|
||||||
|
|
|
@ -280,7 +280,7 @@ private:
|
||||||
|
|
||||||
void createUser(const std::string & userName, uid_t userId) override;
|
void createUser(const std::string & userName, uid_t userId) override;
|
||||||
|
|
||||||
friend struct DerivationGoal;
|
friend struct LocalDerivationGoal;
|
||||||
friend struct SubstitutionGoal;
|
friend struct SubstitutionGoal;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,8 @@ struct Realisation {
|
||||||
GENERATE_CMP(Realisation, me->id, me->outPath);
|
GENERATE_CMP(Realisation, me->id, me->outPath);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef std::map<DrvOutput, Realisation> DrvOutputs;
|
||||||
|
|
||||||
struct OpaquePath {
|
struct OpaquePath {
|
||||||
StorePath path;
|
StorePath path;
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include "logging.hh"
|
#include "logging.hh"
|
||||||
#include "callback.hh"
|
#include "callback.hh"
|
||||||
#include "filetransfer.hh"
|
#include "filetransfer.hh"
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -49,6 +50,21 @@ void write(const Store & store, Sink & out, const ContentAddress & ca)
|
||||||
out << renderContentAddress(ca);
|
out << renderContentAddress(ca);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Realisation read(const Store & store, Source & from, Phantom<Realisation> _)
|
||||||
|
{
|
||||||
|
std::string rawInput = readString(from);
|
||||||
|
return Realisation::fromJSON(
|
||||||
|
nlohmann::json::parse(rawInput),
|
||||||
|
"remote-protocol"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
void write(const Store & store, Sink & out, const Realisation & realisation)
|
||||||
|
{ out << realisation.toJSON().dump(); }
|
||||||
|
|
||||||
|
DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _)
|
||||||
|
{ return DrvOutput::parse(readString(from)); }
|
||||||
|
void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
|
||||||
|
{ out << drvOutput.to_string(); }
|
||||||
|
|
||||||
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
|
std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
|
||||||
{
|
{
|
||||||
|
@ -665,6 +681,10 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
|
||||||
unsigned int status;
|
unsigned int status;
|
||||||
conn->from >> status >> res.errorMsg;
|
conn->from >> status >> res.errorMsg;
|
||||||
res.status = (BuildResult::Status) status;
|
res.status = (BuildResult::Status) status;
|
||||||
|
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 0xc) {
|
||||||
|
auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
|
||||||
|
res.builtOutputs = builtOutputs;
|
||||||
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@ namespace nix {
|
||||||
#define SERVE_MAGIC_1 0x390c9deb
|
#define SERVE_MAGIC_1 0x390c9deb
|
||||||
#define SERVE_MAGIC_2 0x5452eecb
|
#define SERVE_MAGIC_2 0x5452eecb
|
||||||
|
|
||||||
#define SERVE_PROTOCOL_VERSION 0x205
|
#define SERVE_PROTOCOL_VERSION 0x206
|
||||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||||
|
|
||||||
|
|
|
@ -800,6 +800,36 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const RealisedPath::Set & paths,
|
||||||
|
RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute)
|
||||||
|
{
|
||||||
|
StorePathSet storePaths;
|
||||||
|
std::set<Realisation> realisations;
|
||||||
|
for (auto & path : paths) {
|
||||||
|
storePaths.insert(path.path());
|
||||||
|
if (auto realisation = std::get_if<Realisation>(&path.raw)) {
|
||||||
|
settings.requireExperimentalFeature("ca-derivations");
|
||||||
|
realisations.insert(*realisation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute);
|
||||||
|
try {
|
||||||
|
for (auto & realisation : realisations) {
|
||||||
|
dstStore->registerDrvOutput(realisation);
|
||||||
|
}
|
||||||
|
} catch (MissingExperimentalFeature & e) {
|
||||||
|
// Don't fail if the remote doesn't support CA derivations is it might
|
||||||
|
// not be within our control to change that, and we might still want
|
||||||
|
// to at least copy the output paths.
|
||||||
|
if (e.missingFeature == "ca-derivations")
|
||||||
|
ignoreException();
|
||||||
|
else
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
return pathsMap;
|
||||||
|
}
|
||||||
|
|
||||||
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const StorePathSet & storePaths,
|
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore, const StorePathSet & storePaths,
|
||||||
RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute)
|
RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute)
|
||||||
{
|
{
|
||||||
|
@ -813,7 +843,6 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
|
||||||
for (auto & path : storePaths)
|
for (auto & path : storePaths)
|
||||||
pathsMap.insert_or_assign(path, path);
|
pathsMap.insert_or_assign(path, path);
|
||||||
|
|
||||||
if (missing.empty()) return pathsMap;
|
|
||||||
|
|
||||||
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
|
Activity act(*logger, lvlInfo, actCopyPaths, fmt("copying %d paths", missing.size()));
|
||||||
|
|
||||||
|
@ -890,21 +919,9 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
|
||||||
nrDone++;
|
nrDone++;
|
||||||
showProgress();
|
showProgress();
|
||||||
});
|
});
|
||||||
|
|
||||||
return pathsMap;
|
return pathsMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
|
|
||||||
const StorePathSet & storePaths, RepairFlag repair, CheckSigsFlag checkSigs,
|
|
||||||
SubstituteFlag substitute)
|
|
||||||
{
|
|
||||||
StorePathSet closure;
|
|
||||||
srcStore->computeFSClosure(storePaths, closure);
|
|
||||||
copyPaths(srcStore, dstStore, closure, repair, checkSigs, substitute);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::optional<ValidPathInfo> decodeValidPathInfo(const Store & store, std::istream & str, std::optional<HashResult> hashGiven)
|
std::optional<ValidPathInfo> decodeValidPathInfo(const Store & store, std::istream & str, std::optional<HashResult> hashGiven)
|
||||||
{
|
{
|
||||||
std::string path;
|
std::string path;
|
||||||
|
|
|
@ -162,6 +162,8 @@ struct BuildResult
|
||||||
non-determinism.) */
|
non-determinism.) */
|
||||||
bool isNonDeterministic = false;
|
bool isNonDeterministic = false;
|
||||||
|
|
||||||
|
DrvOutputs builtOutputs;
|
||||||
|
|
||||||
/* The start/stop times of the build (or one of the rounds, if it
|
/* The start/stop times of the build (or one of the rounds, if it
|
||||||
was repeated). */
|
was repeated). */
|
||||||
time_t startTime = 0, stopTime = 0;
|
time_t startTime = 0, stopTime = 0;
|
||||||
|
@ -748,15 +750,12 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
||||||
that. Returns a map of what each path was copied to the dstStore
|
that. Returns a map of what each path was copied to the dstStore
|
||||||
as. */
|
as. */
|
||||||
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
|
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
|
||||||
const StorePathSet & storePaths,
|
const RealisedPath::Set &,
|
||||||
RepairFlag repair = NoRepair,
|
RepairFlag repair = NoRepair,
|
||||||
CheckSigsFlag checkSigs = CheckSigs,
|
CheckSigsFlag checkSigs = CheckSigs,
|
||||||
SubstituteFlag substitute = NoSubstitute);
|
SubstituteFlag substitute = NoSubstitute);
|
||||||
|
std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStore,
|
||||||
|
const StorePathSet& paths,
|
||||||
/* Copy the closure of the specified paths from one store to another. */
|
|
||||||
void copyClosure(ref<Store> srcStore, ref<Store> dstStore,
|
|
||||||
const StorePathSet & storePaths,
|
|
||||||
RepairFlag repair = NoRepair,
|
RepairFlag repair = NoRepair,
|
||||||
CheckSigsFlag checkSigs = CheckSigs,
|
CheckSigsFlag checkSigs = CheckSigs,
|
||||||
SubstituteFlag substitute = NoSubstitute);
|
SubstituteFlag substitute = NoSubstitute);
|
||||||
|
|
|
@ -9,7 +9,7 @@ namespace nix {
|
||||||
#define WORKER_MAGIC_1 0x6e697863
|
#define WORKER_MAGIC_1 0x6e697863
|
||||||
#define WORKER_MAGIC_2 0x6478696f
|
#define WORKER_MAGIC_2 0x6478696f
|
||||||
|
|
||||||
#define PROTOCOL_VERSION 0x11b
|
#define PROTOCOL_VERSION 0x11c
|
||||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||||
|
|
||||||
|
@ -86,6 +86,8 @@ namespace worker_proto {
|
||||||
MAKE_WORKER_PROTO(, std::string);
|
MAKE_WORKER_PROTO(, std::string);
|
||||||
MAKE_WORKER_PROTO(, StorePath);
|
MAKE_WORKER_PROTO(, StorePath);
|
||||||
MAKE_WORKER_PROTO(, ContentAddress);
|
MAKE_WORKER_PROTO(, ContentAddress);
|
||||||
|
MAKE_WORKER_PROTO(, Realisation);
|
||||||
|
MAKE_WORKER_PROTO(, DrvOutput);
|
||||||
|
|
||||||
MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
|
MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,14 @@ void Args::addFlag(Flag && flag_)
|
||||||
if (flag->shortName) shortFlags[flag->shortName] = flag;
|
if (flag->shortName) shortFlags[flag->shortName] = flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Args::removeFlag(const std::string & longName)
|
||||||
|
{
|
||||||
|
auto flag = longFlags.find(longName);
|
||||||
|
assert(flag != longFlags.end());
|
||||||
|
if (flag->second->shortName) shortFlags.erase(flag->second->shortName);
|
||||||
|
longFlags.erase(flag);
|
||||||
|
}
|
||||||
|
|
||||||
void Completions::add(std::string completion, std::string description)
|
void Completions::add(std::string completion, std::string description)
|
||||||
{
|
{
|
||||||
assert(description.find('\n') == std::string::npos);
|
assert(description.find('\n') == std::string::npos);
|
||||||
|
|
|
@ -140,6 +140,8 @@ public:
|
||||||
|
|
||||||
void addFlag(Flag && flag);
|
void addFlag(Flag && flag);
|
||||||
|
|
||||||
|
void removeFlag(const std::string & longName);
|
||||||
|
|
||||||
void expectArgs(ExpectedArg && arg)
|
void expectArgs(ExpectedArg && arg)
|
||||||
{
|
{
|
||||||
expectedArgs.emplace_back(std::move(arg));
|
expectedArgs.emplace_back(std::move(arg));
|
||||||
|
|
|
@ -946,7 +946,7 @@ void killUser(uid_t uid)
|
||||||
#else
|
#else
|
||||||
if (kill(-1, SIGKILL) == 0) break;
|
if (kill(-1, SIGKILL) == 0) break;
|
||||||
#endif
|
#endif
|
||||||
if (errno == ESRCH) break; /* no more processes */
|
if (errno == ESRCH || errno == EPERM) break; /* no more processes */
|
||||||
if (errno != EINTR)
|
if (errno != EINTR)
|
||||||
throw SysError("cannot kill processes for uid '%1%'", uid);
|
throw SysError("cannot kill processes for uid '%1%'", uid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,12 +50,12 @@ static int main_nix_copy_closure(int argc, char ** argv)
|
||||||
auto to = toMode ? openStore(remoteUri) : openStore();
|
auto to = toMode ? openStore(remoteUri) : openStore();
|
||||||
auto from = toMode ? openStore() : openStore(remoteUri);
|
auto from = toMode ? openStore() : openStore(remoteUri);
|
||||||
|
|
||||||
StorePathSet storePaths2;
|
RealisedPath::Set storePaths2;
|
||||||
for (auto & path : storePaths)
|
for (auto & path : storePaths)
|
||||||
storePaths2.insert(from->followLinksToStorePath(path));
|
storePaths2.insert(from->followLinksToStorePath(path));
|
||||||
|
|
||||||
StorePathSet closure;
|
RealisedPath::Set closure;
|
||||||
from->computeFSClosure(storePaths2, closure, false, includeOutputs);
|
RealisedPath::closure(*from, storePaths2, closure);
|
||||||
|
|
||||||
copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes);
|
copyPaths(from, to, closure, NoRepair, NoCheckSigs, useSubstitutes);
|
||||||
|
|
||||||
|
|
|
@ -911,6 +911,10 @@ static void opServe(Strings opFlags, Strings opArgs)
|
||||||
|
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
|
||||||
out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
|
out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion >= 5)) {
|
||||||
|
worker_proto::write(*store, out, status.builtOutputs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
using namespace nix;
|
using namespace nix;
|
||||||
|
|
||||||
struct CmdCopy : StorePathsCommand
|
struct CmdCopy : RealisedPathsCommand
|
||||||
{
|
{
|
||||||
std::string srcUri, dstUri;
|
std::string srcUri, dstUri;
|
||||||
|
|
||||||
|
@ -16,10 +16,10 @@ struct CmdCopy : StorePathsCommand
|
||||||
|
|
||||||
SubstituteFlag substitute = NoSubstitute;
|
SubstituteFlag substitute = NoSubstitute;
|
||||||
|
|
||||||
using StorePathsCommand::run;
|
using RealisedPathsCommand::run;
|
||||||
|
|
||||||
CmdCopy()
|
CmdCopy()
|
||||||
: StorePathsCommand(true)
|
: RealisedPathsCommand(true)
|
||||||
{
|
{
|
||||||
addFlag({
|
addFlag({
|
||||||
.longName = "from",
|
.longName = "from",
|
||||||
|
@ -75,14 +75,15 @@ struct CmdCopy : StorePathsCommand
|
||||||
if (srcUri.empty() && dstUri.empty())
|
if (srcUri.empty() && dstUri.empty())
|
||||||
throw UsageError("you must pass '--from' and/or '--to'");
|
throw UsageError("you must pass '--from' and/or '--to'");
|
||||||
|
|
||||||
StorePathsCommand::run(store);
|
RealisedPathsCommand::run(store);
|
||||||
}
|
}
|
||||||
|
|
||||||
void run(ref<Store> srcStore, StorePaths storePaths) override
|
void run(ref<Store> srcStore, std::vector<RealisedPath> paths) override
|
||||||
{
|
{
|
||||||
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
|
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
|
||||||
|
|
||||||
copyPaths(srcStore, dstStore, StorePathSet(storePaths.begin(), storePaths.end()),
|
copyPaths(
|
||||||
|
srcStore, dstStore, RealisedPath::Set(paths.begin(), paths.end()),
|
||||||
NoRepair, checkSigs, substitute);
|
NoRepair, checkSigs, substitute);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
38
src/nix/flake-lock.md
Normal file
38
src/nix/flake-lock.md
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
R""(
|
||||||
|
|
||||||
|
# Examples
|
||||||
|
|
||||||
|
* Update the `nixpkgs` and `nix` inputs of the flake in the current
|
||||||
|
directory:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# nix flake lock --update-input nixpkgs --update-input nix
|
||||||
|
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
|
||||||
|
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
|
||||||
|
```
|
||||||
|
|
||||||
|
# Description
|
||||||
|
|
||||||
|
This command updates the lock file of a flake (`flake.lock`) so that
|
||||||
|
it contains a lock for every flake input specified in
|
||||||
|
`flake.nix`. Existing lock file entries are not updated unless
|
||||||
|
required by a flag such as `--update-input`.
|
||||||
|
|
||||||
|
Note that every command that operates on a flake will also update the
|
||||||
|
lock file if needed, and supports the same flags. Therefore,
|
||||||
|
|
||||||
|
```console
|
||||||
|
# nix flake lock --update-input nixpkgs
|
||||||
|
# nix build
|
||||||
|
```
|
||||||
|
|
||||||
|
is equivalent to:
|
||||||
|
|
||||||
|
```console
|
||||||
|
# nix build --update-input nixpkgs
|
||||||
|
```
|
||||||
|
|
||||||
|
Thus, this command is only useful if you want to update the lock file
|
||||||
|
separately from any other action such as building.
|
||||||
|
|
||||||
|
)""
|
|
@ -2,52 +2,33 @@ R""(
|
||||||
|
|
||||||
# Examples
|
# Examples
|
||||||
|
|
||||||
* Update the `nixpkgs` and `nix` inputs of the flake in the current
|
|
||||||
directory:
|
|
||||||
|
|
||||||
```console
|
|
||||||
# nix flake update --update-input nixpkgs --update-input nix
|
|
||||||
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
|
|
||||||
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
|
|
||||||
```
|
|
||||||
|
|
||||||
* Recreate the lock file (i.e. update all inputs) and commit the new
|
* Recreate the lock file (i.e. update all inputs) and commit the new
|
||||||
lock file:
|
lock file:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
# nix flake update --recreate-lock-file --commit-lock-file
|
# nix flake update
|
||||||
|
* Updated 'nix': 'github:NixOS/nix/9fab14adbc3810d5cc1f88672fde1eee4358405c' -> 'github:NixOS/nix/8927cba62f5afb33b01016d5c4f7f8b7d0adde3c'
|
||||||
|
* Updated 'nixpkgs': 'github:NixOS/nixpkgs/3d2d8f281a27d466fa54b469b5993f7dde198375' -> 'github:NixOS/nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293'
|
||||||
…
|
…
|
||||||
warning: committed new revision '158bcbd9d6cc08ab859c0810186c1beebc982aad'
|
warning: committed new revision '158bcbd9d6cc08ab859c0810186c1beebc982aad'
|
||||||
```
|
```
|
||||||
|
|
||||||
# Description
|
# Description
|
||||||
|
|
||||||
This command updates the lock file of a flake (`flake.lock`) so that
|
This command recreates the lock file of a flake (`flake.lock`), thus
|
||||||
it contains a lock for every flake input specified in
|
updating the lock for every mutable input (like `nixpkgs`) to its
|
||||||
`flake.nix`. Note that every command that operates on a flake will
|
current version. This is equivalent to passing `--recreate-lock-file`
|
||||||
also update the lock file if needed, and supports the same
|
to any command that operates on a flake. That is,
|
||||||
flags. Therefore,
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
# nix flake update --update-input nixpkgs
|
# nix flake update
|
||||||
# nix build
|
# nix build
|
||||||
```
|
```
|
||||||
|
|
||||||
is equivalent to:
|
is equivalent to:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
# nix build --update-input nixpkgs
|
# nix build --recreate-lock-file
|
||||||
```
|
```
|
||||||
|
|
||||||
Thus, this command is only useful if you want to update the lock file
|
|
||||||
separately from any other action such as building.
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> This command does *not* update locks that are already present unless
|
|
||||||
> you explicitly ask for it using `--update-input` or
|
|
||||||
> `--recreate-lock-file`. Thus, if the lock file already has locks for
|
|
||||||
> every input, then `nix flake update` (without arguments) does
|
|
||||||
> nothing.
|
|
||||||
|
|
||||||
)""
|
)""
|
||||||
|
|
|
@ -104,6 +104,14 @@ struct CmdFlakeUpdate : FlakeCommand
|
||||||
return "update flake lock file";
|
return "update flake lock file";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CmdFlakeUpdate()
|
||||||
|
{
|
||||||
|
/* Remove flags that don't make sense. */
|
||||||
|
removeFlag("recreate-lock-file");
|
||||||
|
removeFlag("update-input");
|
||||||
|
removeFlag("no-update-lock-file");
|
||||||
|
}
|
||||||
|
|
||||||
std::string doc() override
|
std::string doc() override
|
||||||
{
|
{
|
||||||
return
|
return
|
||||||
|
@ -113,7 +121,30 @@ struct CmdFlakeUpdate : FlakeCommand
|
||||||
|
|
||||||
void run(nix::ref<nix::Store> store) override
|
void run(nix::ref<nix::Store> store) override
|
||||||
{
|
{
|
||||||
/* Use --refresh by default for 'nix flake update'. */
|
settings.tarballTtl = 0;
|
||||||
|
|
||||||
|
lockFlags.recreateLockFile = true;
|
||||||
|
|
||||||
|
lockFlake();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
struct CmdFlakeLock : FlakeCommand
|
||||||
|
{
|
||||||
|
std::string description() override
|
||||||
|
{
|
||||||
|
return "create missing lock file entries";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string doc() override
|
||||||
|
{
|
||||||
|
return
|
||||||
|
#include "flake-lock.md"
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
void run(nix::ref<nix::Store> store) override
|
||||||
|
{
|
||||||
settings.tarballTtl = 0;
|
settings.tarballTtl = 0;
|
||||||
|
|
||||||
lockFlake();
|
lockFlake();
|
||||||
|
@ -1006,6 +1037,7 @@ struct CmdFlake : NixMultiCommand
|
||||||
CmdFlake()
|
CmdFlake()
|
||||||
: MultiCommand({
|
: MultiCommand({
|
||||||
{"update", []() { return make_ref<CmdFlakeUpdate>(); }},
|
{"update", []() { return make_ref<CmdFlakeUpdate>(); }},
|
||||||
|
{"lock", []() { return make_ref<CmdFlakeLock>(); }},
|
||||||
{"info", []() { return make_ref<CmdFlakeInfo>(); }},
|
{"info", []() { return make_ref<CmdFlakeInfo>(); }},
|
||||||
{"list-inputs", []() { return make_ref<CmdFlakeListInputs>(); }},
|
{"list-inputs", []() { return make_ref<CmdFlakeListInputs>(); }},
|
||||||
{"check", []() { return make_ref<CmdFlakeCheck>(); }},
|
{"check", []() { return make_ref<CmdFlakeCheck>(); }},
|
||||||
|
|
|
@ -11,6 +11,7 @@ let
|
||||||
args = ["sh" "-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
|
args = ["sh" "-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
|
||||||
outputHashMode = "recursive";
|
outputHashMode = "recursive";
|
||||||
outputHashAlgo = "sha256";
|
outputHashAlgo = "sha256";
|
||||||
|
__contentAddressed = true;
|
||||||
} // removeAttrs args ["builder" "meta"])
|
} // removeAttrs args ["builder" "meta"])
|
||||||
// { meta = args.meta or {}; };
|
// { meta = args.meta or {}; };
|
||||||
|
|
||||||
|
@ -19,7 +20,6 @@ let
|
||||||
name = "build-remote-input-1";
|
name = "build-remote-input-1";
|
||||||
buildCommand = "echo FOO > $out";
|
buildCommand = "echo FOO > $out";
|
||||||
requiredSystemFeatures = ["foo"];
|
requiredSystemFeatures = ["foo"];
|
||||||
outputHash = "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=";
|
|
||||||
};
|
};
|
||||||
|
|
||||||
input2 = mkDerivation {
|
input2 = mkDerivation {
|
||||||
|
@ -27,7 +27,16 @@ let
|
||||||
name = "build-remote-input-2";
|
name = "build-remote-input-2";
|
||||||
buildCommand = "echo BAR > $out";
|
buildCommand = "echo BAR > $out";
|
||||||
requiredSystemFeatures = ["bar"];
|
requiredSystemFeatures = ["bar"];
|
||||||
outputHash = "sha256-XArauVH91AVwP9hBBQNlkX9ccuPpSYx9o0zeIHb6e+Q=";
|
};
|
||||||
|
|
||||||
|
input3 = mkDerivation {
|
||||||
|
shell = busybox;
|
||||||
|
name = "build-remote-input-3";
|
||||||
|
buildCommand = ''
|
||||||
|
read x < ${input2}
|
||||||
|
echo $x BAZ > $out
|
||||||
|
'';
|
||||||
|
requiredSystemFeatures = ["baz"];
|
||||||
};
|
};
|
||||||
|
|
||||||
in
|
in
|
||||||
|
@ -38,8 +47,7 @@ in
|
||||||
buildCommand =
|
buildCommand =
|
||||||
''
|
''
|
||||||
read x < ${input1}
|
read x < ${input1}
|
||||||
read y < ${input2}
|
read y < ${input3}
|
||||||
echo "$x $y" > $out
|
echo "$x $y" > $out
|
||||||
'';
|
'';
|
||||||
outputHash = "sha256-3YGhlOfbGUm9hiPn2teXXTT8M1NEpDFvfXkxMaJRld0=";
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
source common.sh
|
|
||||||
|
|
||||||
file=build-hook-ca.nix
|
|
||||||
|
|
||||||
source build-remote.sh
|
|
7
tests/build-remote-content-addressed-floating.sh
Normal file
7
tests/build-remote-content-addressed-floating.sh
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
file=build-hook-ca.nix
|
||||||
|
|
||||||
|
sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf
|
||||||
|
|
||||||
|
source build-remote.sh
|
|
@ -232,7 +232,7 @@ nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2
|
||||||
nix build -o $TEST_ROOT/result --no-registries $flake2Dir#bar --refresh
|
nix build -o $TEST_ROOT/result --no-registries $flake2Dir#bar --refresh
|
||||||
|
|
||||||
# Updating the flake should not change the lockfile.
|
# Updating the flake should not change the lockfile.
|
||||||
nix flake update $flake2Dir
|
nix flake lock $flake2Dir
|
||||||
[[ -z $(git -C $flake2Dir diff master) ]]
|
[[ -z $(git -C $flake2Dir diff master) ]]
|
||||||
|
|
||||||
# Now we should be able to build the flake in pure mode.
|
# Now we should be able to build the flake in pure mode.
|
||||||
|
@ -354,10 +354,10 @@ nix build -o $TEST_ROOT/result flake3#xyzzy flake3#fnord
|
||||||
nix build -o $TEST_ROOT/result flake4#xyzzy
|
nix build -o $TEST_ROOT/result flake4#xyzzy
|
||||||
|
|
||||||
# Test 'nix flake update' and --override-flake.
|
# Test 'nix flake update' and --override-flake.
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
[[ -z $(git -C $flake3Dir diff master) ]]
|
[[ -z $(git -C $flake3Dir diff master) ]]
|
||||||
|
|
||||||
nix flake update $flake3Dir --recreate-lock-file --override-flake flake2 nixpkgs
|
nix flake update $flake3Dir --override-flake flake2 nixpkgs
|
||||||
[[ ! -z $(git -C $flake3Dir diff master) ]]
|
[[ ! -z $(git -C $flake3Dir diff master) ]]
|
||||||
|
|
||||||
# Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore
|
# Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore
|
||||||
|
@ -389,7 +389,7 @@ cat > $flake3Dir/flake.nix <<EOF
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
git -C $flake3Dir add flake.nix flake.lock
|
git -C $flake3Dir add flake.nix flake.lock
|
||||||
git -C $flake3Dir commit -m 'Remove packages.xyzzy'
|
git -C $flake3Dir commit -m 'Remove packages.xyzzy'
|
||||||
git -C $flake3Dir checkout master
|
git -C $flake3Dir checkout master
|
||||||
|
@ -547,7 +547,7 @@ cat > $flake3Dir/flake.nix <<EOF
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["foo"]' ]]
|
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["foo"]' ]]
|
||||||
|
|
||||||
cat > $flake3Dir/flake.nix <<EOF
|
cat > $flake3Dir/flake.nix <<EOF
|
||||||
|
@ -559,7 +559,7 @@ cat > $flake3Dir/flake.nix <<EOF
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["flake2","flake1"]' ]]
|
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["flake2","flake1"]' ]]
|
||||||
|
|
||||||
cat > $flake3Dir/flake.nix <<EOF
|
cat > $flake3Dir/flake.nix <<EOF
|
||||||
|
@ -571,7 +571,7 @@ cat > $flake3Dir/flake.nix <<EOF
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["flake2"]' ]]
|
[[ $(jq -c .nodes.root.inputs.bar $flake3Dir/flake.lock) = '["flake2"]' ]]
|
||||||
|
|
||||||
# Test overriding inputs of inputs.
|
# Test overriding inputs of inputs.
|
||||||
|
@ -587,7 +587,7 @@ cat > $flake3Dir/flake.nix <<EOF
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
[[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
|
[[ $(jq .nodes.flake1.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
|
||||||
|
|
||||||
cat > $flake3Dir/flake.nix <<EOF
|
cat > $flake3Dir/flake.nix <<EOF
|
||||||
|
@ -600,7 +600,7 @@ cat > $flake3Dir/flake.nix <<EOF
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
nix flake update $flake3Dir --recreate-lock-file
|
nix flake update $flake3Dir
|
||||||
[[ $(jq -c .nodes.flake2.inputs.flake1 $flake3Dir/flake.lock) =~ '["foo"]' ]]
|
[[ $(jq -c .nodes.flake2.inputs.flake1 $flake3Dir/flake.lock) =~ '["foo"]' ]]
|
||||||
[[ $(jq .nodes.foo.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
|
[[ $(jq .nodes.foo.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
|
||||||
|
|
||||||
|
@ -658,20 +658,20 @@ nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ
|
||||||
|
|
||||||
# Test --override-input.
|
# Test --override-input.
|
||||||
git -C $flake3Dir reset --hard
|
git -C $flake3Dir reset --hard
|
||||||
nix flake update $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
|
nix flake lock $flake3Dir --override-input flake2/flake1 flake5 -vvvvv
|
||||||
[[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
|
[[ $(jq .nodes.flake1_2.locked.url $flake3Dir/flake.lock) =~ flake5 ]]
|
||||||
|
|
||||||
nix flake update $flake3Dir --override-input flake2/flake1 flake1
|
nix flake lock $flake3Dir --override-input flake2/flake1 flake1
|
||||||
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
|
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
|
||||||
|
|
||||||
nix flake update $flake3Dir --override-input flake2/flake1 flake1/master/$hash1
|
nix flake lock $flake3Dir --override-input flake2/flake1 flake1/master/$hash1
|
||||||
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]]
|
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash1 ]]
|
||||||
|
|
||||||
# Test --update-input.
|
# Test --update-input.
|
||||||
nix flake update $flake3Dir
|
nix flake lock $flake3Dir
|
||||||
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) = $hash1 ]]
|
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) = $hash1 ]]
|
||||||
|
|
||||||
nix flake update $flake3Dir --update-input flake2/flake1
|
nix flake lock $flake3Dir --update-input flake2/flake1
|
||||||
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
|
[[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
|
||||||
|
|
||||||
# Test 'nix flake list-inputs'.
|
# Test 'nix flake list-inputs'.
|
||||||
|
|
|
@ -17,6 +17,7 @@ nix_tests = \
|
||||||
linux-sandbox.sh \
|
linux-sandbox.sh \
|
||||||
build-dry.sh \
|
build-dry.sh \
|
||||||
build-remote-input-addressed.sh \
|
build-remote-input-addressed.sh \
|
||||||
|
build-remote-content-addressed-floating.sh \
|
||||||
ssh-relay.sh \
|
ssh-relay.sh \
|
||||||
nar-access.sh \
|
nar-access.sh \
|
||||||
structured-attrs.sh \
|
structured-attrs.sh \
|
||||||
|
@ -38,10 +39,10 @@ nix_tests = \
|
||||||
describe-stores.sh \
|
describe-stores.sh \
|
||||||
flakes.sh \
|
flakes.sh \
|
||||||
content-addressed.sh \
|
content-addressed.sh \
|
||||||
|
nix-copy-content-addressed.sh \
|
||||||
build.sh \
|
build.sh \
|
||||||
compute-levels.sh
|
compute-levels.sh
|
||||||
# parallel.sh
|
# parallel.sh
|
||||||
# build-remote-content-addressed-fixed.sh \
|
|
||||||
|
|
||||||
install-tests += $(foreach x, $(nix_tests), tests/$(x))
|
install-tests += $(foreach x, $(nix_tests), tests/$(x))
|
||||||
|
|
||||||
|
|
34
tests/nix-copy-content-addressed.sh
Executable file
34
tests/nix-copy-content-addressed.sh
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
# Globally enable the ca derivations experimental flag
|
||||||
|
sed -i 's/experimental-features = .*/& ca-derivations ca-references/' "$NIX_CONF_DIR/nix.conf"
|
||||||
|
|
||||||
|
export REMOTE_STORE_DIR="$TEST_ROOT/remote_store"
|
||||||
|
export REMOTE_STORE="file://$REMOTE_STORE_DIR"
|
||||||
|
|
||||||
|
ensureCorrectlyCopied () {
|
||||||
|
attrPath="$1"
|
||||||
|
nix build --store "$REMOTE_STORE" --file ./content-addressed.nix "$attrPath"
|
||||||
|
}
|
||||||
|
|
||||||
|
testOneCopy () {
|
||||||
|
clearStore
|
||||||
|
rm -rf "$REMOTE_STORE_DIR"
|
||||||
|
|
||||||
|
attrPath="$1"
|
||||||
|
nix copy --to $REMOTE_STORE "$attrPath" --file ./content-addressed.nix
|
||||||
|
|
||||||
|
ensureCorrectlyCopied "$attrPath"
|
||||||
|
|
||||||
|
# Ensure that we can copy back what we put in the store
|
||||||
|
clearStore
|
||||||
|
nix copy --from $REMOTE_STORE \
|
||||||
|
--file ./content-addressed.nix "$attrPath" \
|
||||||
|
--no-check-sigs
|
||||||
|
}
|
||||||
|
|
||||||
|
for attrPath in rootCA dependentCA transitivelyDependentCA dependentNonCA dependentFixedOutput; do
|
||||||
|
testOneCopy "$attrPath"
|
||||||
|
done
|
Loading…
Reference in a new issue