mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-22 14:06:16 +02:00
Merge pull request #10088 from edolstra/flake-source-path
Use SourcePath for reading flake.{nix,lock}
This commit is contained in:
commit
eaa6c26189
6 changed files with 77 additions and 63 deletions
|
@ -21,6 +21,7 @@
|
|||
#include "url.hh"
|
||||
#include "registry.hh"
|
||||
#include "build-result.hh"
|
||||
#include "fs-input-accessor.hh"
|
||||
|
||||
#include <regex>
|
||||
#include <queue>
|
||||
|
@ -146,7 +147,7 @@ MixFlakeOptions::MixFlakeOptions()
|
|||
.category = category,
|
||||
.labels = {"flake-lock-path"},
|
||||
.handler = {[&](std::string lockFilePath) {
|
||||
lockFlags.referenceLockFilePath = lockFilePath;
|
||||
lockFlags.referenceLockFilePath = getUnfilteredRootPath(CanonPath(absPath(lockFilePath)));
|
||||
}},
|
||||
.completer = completePath
|
||||
});
|
||||
|
|
|
@ -139,7 +139,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
|
|||
attrs.emplace(state.symbols[attr.name], Explicit<bool> { attr.value->boolean });
|
||||
break;
|
||||
case nInt:
|
||||
attrs.emplace(state.symbols[attr.name], (long unsigned int)attr.value->integer);
|
||||
attrs.emplace(state.symbols[attr.name], (long unsigned int) attr.value->integer);
|
||||
break;
|
||||
default:
|
||||
if (attr.name == state.symbols.create("publicKeys")) {
|
||||
|
@ -202,43 +202,28 @@ static std::map<FlakeId, FlakeInput> parseFlakeInputs(
|
|||
return inputs;
|
||||
}
|
||||
|
||||
static Flake getFlake(
|
||||
static Flake readFlake(
|
||||
EvalState & state,
|
||||
const FlakeRef & originalRef,
|
||||
bool allowLookup,
|
||||
FlakeCache & flakeCache,
|
||||
InputPath lockRootPath)
|
||||
const FlakeRef & resolvedRef,
|
||||
const FlakeRef & lockedRef,
|
||||
const SourcePath & rootDir,
|
||||
const InputPath & lockRootPath)
|
||||
{
|
||||
auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
state, originalRef, allowLookup, flakeCache);
|
||||
auto flakePath = rootDir / CanonPath(resolvedRef.subdir) / "flake.nix";
|
||||
|
||||
// We need to guard against symlink attacks, but before we start doing
|
||||
// filesystem operations we should make sure there's a flake.nix in the
|
||||
// first place.
|
||||
auto unsafeFlakeDir = state.store->toRealPath(storePath) + "/" + lockedRef.subdir;
|
||||
auto unsafeFlakeFile = unsafeFlakeDir + "/flake.nix";
|
||||
if (!pathExists(unsafeFlakeFile))
|
||||
throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", lockedRef, lockedRef.subdir);
|
||||
Value vInfo;
|
||||
state.evalFile(flakePath, vInfo, true);
|
||||
|
||||
// Guard against symlink attacks.
|
||||
auto flakeDir = canonPath(unsafeFlakeDir, true);
|
||||
auto flakeFile = canonPath(flakeDir + "/flake.nix", true);
|
||||
if (!isInDir(flakeFile, state.store->toRealPath(storePath)))
|
||||
throw Error("'flake.nix' file of flake '%s' escapes from '%s'",
|
||||
lockedRef, state.store->printStorePath(storePath));
|
||||
expectType(state, nAttrs, vInfo, state.positions.add(Pos::Origin(rootDir), 1, 1));
|
||||
|
||||
Flake flake {
|
||||
.originalRef = originalRef,
|
||||
.resolvedRef = resolvedRef,
|
||||
.lockedRef = lockedRef,
|
||||
.storePath = storePath,
|
||||
.path = flakePath,
|
||||
};
|
||||
|
||||
Value vInfo;
|
||||
state.evalFile(state.rootPath(CanonPath(flakeFile)), vInfo, true); // FIXME: symlink attack
|
||||
|
||||
expectType(state, nAttrs, vInfo, state.positions.add({state.rootPath(CanonPath(flakeFile))}, 1, 1));
|
||||
|
||||
if (auto description = vInfo.attrs->get(state.sDescription)) {
|
||||
expectType(state, nString, *description->value, description->pos);
|
||||
flake.description = description->value->c_str();
|
||||
|
@ -247,7 +232,7 @@ static Flake getFlake(
|
|||
auto sInputs = state.symbols.create("inputs");
|
||||
|
||||
if (auto inputs = vInfo.attrs->get(sInputs))
|
||||
flake.inputs = parseFlakeInputs(state, inputs->value, inputs->pos, flakeDir, lockRootPath);
|
||||
flake.inputs = parseFlakeInputs(state, inputs->value, inputs->pos, flakePath.parent().path.abs(), lockRootPath); // FIXME
|
||||
|
||||
auto sOutputs = state.symbols.create("outputs");
|
||||
|
||||
|
@ -264,7 +249,7 @@ static Flake getFlake(
|
|||
}
|
||||
|
||||
} else
|
||||
throw Error("flake '%s' lacks attribute 'outputs'", lockedRef);
|
||||
throw Error("flake '%s' lacks attribute 'outputs'", resolvedRef);
|
||||
|
||||
auto sNixConfig = state.symbols.create("nixConfig");
|
||||
|
||||
|
@ -281,7 +266,7 @@ static Flake getFlake(
|
|||
NixStringContext emptyContext = {};
|
||||
flake.config.settings.emplace(
|
||||
state.symbols[setting.name],
|
||||
state.coerceToString(setting.pos, *setting.value, emptyContext, "", false, true, true) .toOwned());
|
||||
state.coerceToString(setting.pos, *setting.value, emptyContext, "", false, true, true).toOwned());
|
||||
}
|
||||
else if (setting.value->type() == nInt)
|
||||
flake.config.settings.emplace(
|
||||
|
@ -313,12 +298,25 @@ static Flake getFlake(
|
|||
attr.name != sOutputs &&
|
||||
attr.name != sNixConfig)
|
||||
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
|
||||
lockedRef, state.symbols[attr.name], state.positions[attr.pos]);
|
||||
resolvedRef, state.symbols[attr.name], state.positions[attr.pos]);
|
||||
}
|
||||
|
||||
return flake;
|
||||
}
|
||||
|
||||
static Flake getFlake(
|
||||
EvalState & state,
|
||||
const FlakeRef & originalRef,
|
||||
bool allowLookup,
|
||||
FlakeCache & flakeCache,
|
||||
InputPath lockRootPath)
|
||||
{
|
||||
auto [storePath, resolvedRef, lockedRef] = fetchOrSubstituteTree(
|
||||
state, originalRef, allowLookup, flakeCache);
|
||||
|
||||
return readFlake(state, originalRef, resolvedRef, lockedRef, state.rootPath(state.store->toRealPath(storePath)), lockRootPath);
|
||||
}
|
||||
|
||||
Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup, FlakeCache & flakeCache)
|
||||
{
|
||||
return getFlake(state, originalRef, allowLookup, flakeCache, {});
|
||||
|
@ -330,6 +328,13 @@ Flake getFlake(EvalState & state, const FlakeRef & originalRef, bool allowLookup
|
|||
return getFlake(state, originalRef, allowLookup, flakeCache);
|
||||
}
|
||||
|
||||
static LockFile readLockFile(const SourcePath & lockFilePath)
|
||||
{
|
||||
return lockFilePath.pathExists()
|
||||
? LockFile(lockFilePath.readFile(), fmt("%s", lockFilePath))
|
||||
: LockFile();
|
||||
}
|
||||
|
||||
/* Compute an in-memory lock file for the specified top-level flake,
|
||||
and optionally write it to file, if the flake is writable. */
|
||||
LockedFlake lockFlake(
|
||||
|
@ -355,17 +360,16 @@ LockedFlake lockFlake(
|
|||
throw Error("reference lock file was provided, but the `allow-dirty` setting is set to false");
|
||||
}
|
||||
|
||||
// FIXME: symlink attack
|
||||
auto oldLockFile = LockFile::read(
|
||||
auto oldLockFile = readLockFile(
|
||||
lockFlags.referenceLockFilePath.value_or(
|
||||
state.store->toRealPath(flake.storePath) + "/" + flake.lockedRef.subdir + "/flake.lock"));
|
||||
flake.lockFilePath()));
|
||||
|
||||
debug("old lock file: %s", oldLockFile);
|
||||
|
||||
std::map<InputPath, FlakeInput> overrides;
|
||||
std::set<InputPath> explicitCliOverrides;
|
||||
std::set<InputPath> overridesUsed, updatesUsed;
|
||||
std::map<ref<Node>, StorePath> nodePaths;
|
||||
std::map<ref<Node>, SourcePath> nodePaths;
|
||||
|
||||
for (auto & i : lockFlags.inputOverrides) {
|
||||
overrides.insert_or_assign(i.first, FlakeInput { .ref = i.second });
|
||||
|
@ -538,7 +542,7 @@ LockedFlake lockFlake(
|
|||
|
||||
if (mustRefetch) {
|
||||
auto inputFlake = getFlake(state, oldLock->lockedRef, false, flakeCache, inputPath);
|
||||
nodePaths.emplace(childNode, inputFlake.storePath);
|
||||
nodePaths.emplace(childNode, inputFlake.path.parent());
|
||||
computeLocks(inputFlake.inputs, childNode, inputPath, oldLock, lockRootPath, parentPath, false);
|
||||
} else {
|
||||
computeLocks(fakeInputs, childNode, inputPath, oldLock, lockRootPath, parentPath, true);
|
||||
|
@ -587,13 +591,12 @@ LockedFlake lockFlake(
|
|||
flake. Also, unless we already have this flake
|
||||
in the top-level lock file, use this flake's
|
||||
own lock file. */
|
||||
nodePaths.emplace(childNode, inputFlake.storePath);
|
||||
nodePaths.emplace(childNode, inputFlake.path.parent());
|
||||
computeLocks(
|
||||
inputFlake.inputs, childNode, inputPath,
|
||||
oldLock
|
||||
? std::dynamic_pointer_cast<const Node>(oldLock)
|
||||
: LockFile::read(
|
||||
state.store->toRealPath(inputFlake.storePath) + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root.get_ptr(),
|
||||
: readLockFile(inputFlake.lockFilePath()).root.get_ptr(),
|
||||
oldLock ? lockRootPath : inputPath,
|
||||
localPath,
|
||||
false);
|
||||
|
@ -605,7 +608,7 @@ LockedFlake lockFlake(
|
|||
|
||||
auto childNode = make_ref<LockedNode>(lockedRef, ref, false);
|
||||
|
||||
nodePaths.emplace(childNode, storePath);
|
||||
nodePaths.emplace(childNode, state.rootPath(state.store->toRealPath(storePath)));
|
||||
|
||||
node->inputs.insert_or_assign(id, childNode);
|
||||
}
|
||||
|
@ -619,9 +622,9 @@ LockedFlake lockFlake(
|
|||
};
|
||||
|
||||
// Bring in the current ref for relative path resolution if we have it
|
||||
auto parentPath = canonPath(state.store->toRealPath(flake.storePath) + "/" + flake.lockedRef.subdir, true);
|
||||
auto parentPath = flake.path.parent().path.abs();
|
||||
|
||||
nodePaths.emplace(newLockFile.root, flake.storePath);
|
||||
nodePaths.emplace(newLockFile.root, flake.path.parent());
|
||||
|
||||
computeLocks(
|
||||
flake.inputs,
|
||||
|
@ -746,13 +749,15 @@ void callFlake(EvalState & state,
|
|||
|
||||
auto overrides = state.buildBindings(lockedFlake.nodePaths.size());
|
||||
|
||||
for (auto & [node, storePath] : lockedFlake.nodePaths) {
|
||||
for (auto & [node, sourcePath] : lockedFlake.nodePaths) {
|
||||
auto override = state.buildBindings(2);
|
||||
|
||||
auto & vSourceInfo = override.alloc(state.symbols.create("sourceInfo"));
|
||||
|
||||
auto lockedNode = node.dynamic_pointer_cast<const LockedNode>();
|
||||
|
||||
auto [storePath, subdir] = state.store->toStorePath(sourcePath.path.abs());
|
||||
|
||||
emitTreeAttrs(
|
||||
state,
|
||||
storePath,
|
||||
|
@ -766,7 +771,7 @@ void callFlake(EvalState & state,
|
|||
|
||||
override
|
||||
.alloc(state.symbols.create("dir"))
|
||||
.mkString(lockedNode ? lockedNode->lockedRef.subdir : lockedFlake.flake.lockedRef.subdir);
|
||||
.mkString(CanonPath(subdir).rel());
|
||||
|
||||
overrides.alloc(state.symbols.create(key->second)).mkAttrs(override);
|
||||
}
|
||||
|
@ -928,7 +933,7 @@ Fingerprint LockedFlake::getFingerprint() const
|
|||
// flake.sourceInfo.storePath for the fingerprint.
|
||||
return hashString(HashAlgorithm::SHA256,
|
||||
fmt("%s;%s;%d;%d;%s",
|
||||
flake.storePath.to_string(),
|
||||
flake.path.to_string(),
|
||||
flake.lockedRef.subdir,
|
||||
flake.lockedRef.input.getRevCount().value_or(0),
|
||||
flake.lockedRef.input.getLastModified().value_or(0),
|
||||
|
|
|
@ -77,18 +77,27 @@ struct Flake
|
|||
* the specific local store result of invoking the fetcher
|
||||
*/
|
||||
FlakeRef lockedRef;
|
||||
/**
|
||||
* The path of `flake.nix`.
|
||||
*/
|
||||
SourcePath path;
|
||||
/**
|
||||
* pretend that 'lockedRef' is dirty
|
||||
*/
|
||||
bool forceDirty = false;
|
||||
std::optional<std::string> description;
|
||||
StorePath storePath;
|
||||
FlakeInputs inputs;
|
||||
/**
|
||||
* 'nixConfig' attribute
|
||||
*/
|
||||
ConfigFile config;
|
||||
|
||||
~Flake();
|
||||
|
||||
SourcePath lockFilePath()
|
||||
{
|
||||
return path.parent() / "flake.lock";
|
||||
}
|
||||
};
|
||||
|
||||
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup);
|
||||
|
@ -104,11 +113,11 @@ struct LockedFlake
|
|||
LockFile lockFile;
|
||||
|
||||
/**
|
||||
* Store paths of nodes that have been fetched in
|
||||
* Source tree accessors for nodes that have been fetched in
|
||||
* lockFlake(); in particular, the root node and the overriden
|
||||
* inputs.
|
||||
*/
|
||||
std::map<ref<Node>, StorePath> nodePaths;
|
||||
std::map<ref<Node>, SourcePath> nodePaths;
|
||||
|
||||
Fingerprint getFingerprint() const;
|
||||
};
|
||||
|
@ -165,7 +174,7 @@ struct LockFlags
|
|||
/**
|
||||
* The path to a lock file to read instead of the `flake.lock` file in the top-level flake
|
||||
*/
|
||||
std::optional<std::string> referenceLockFilePath;
|
||||
std::optional<SourcePath> referenceLockFilePath;
|
||||
|
||||
/**
|
||||
* The path to a lock file to write to instead of the `flake.lock` file in the top-level flake
|
||||
|
|
|
@ -84,8 +84,10 @@ std::shared_ptr<Node> LockFile::findInput(const InputPath & path)
|
|||
return doFind(root, path, visited);
|
||||
}
|
||||
|
||||
LockFile::LockFile(const nlohmann::json & json, const Path & path)
|
||||
LockFile::LockFile(std::string_view contents, std::string_view path)
|
||||
{
|
||||
auto json = nlohmann::json::parse(contents);
|
||||
|
||||
auto version = json.value("version", 0);
|
||||
if (version < 5 || version > 7)
|
||||
throw Error("lock file '%s' has unsupported version %d", path, version);
|
||||
|
@ -203,12 +205,6 @@ std::pair<std::string, LockFile::KeyMap> LockFile::to_string() const
|
|||
return {json.dump(2), std::move(nodeKeys)};
|
||||
}
|
||||
|
||||
LockFile LockFile::read(const Path & path)
|
||||
{
|
||||
if (!pathExists(path)) return LockFile();
|
||||
return LockFile(nlohmann::json::parse(readFile(path)), path);
|
||||
}
|
||||
|
||||
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
|
||||
{
|
||||
stream << lockFile.toJSON().first.dump(2);
|
||||
|
|
|
@ -55,7 +55,7 @@ struct LockFile
|
|||
ref<Node> root = make_ref<Node>();
|
||||
|
||||
LockFile() {};
|
||||
LockFile(const nlohmann::json & json, const Path & path);
|
||||
LockFile(std::string_view contents, std::string_view path);
|
||||
|
||||
typedef std::map<ref<const Node>, std::string> KeyMap;
|
||||
|
||||
|
@ -63,8 +63,6 @@ struct LockFile
|
|||
|
||||
std::pair<std::string, KeyMap> to_string() const;
|
||||
|
||||
static LockFile read(const Path & path);
|
||||
|
||||
/**
|
||||
* Check whether this lock file has any unlocked inputs. If so,
|
||||
* return one.
|
||||
|
|
|
@ -207,6 +207,9 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
auto lockedFlake = lockFlake();
|
||||
auto & flake = lockedFlake.flake;
|
||||
|
||||
// Currently, all flakes are in the Nix store via the rootFS accessor.
|
||||
auto storePath = store->printStorePath(store->toStorePath(flake.path.path.abs()).first);
|
||||
|
||||
if (json) {
|
||||
nlohmann::json j;
|
||||
if (flake.description)
|
||||
|
@ -225,7 +228,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
j["revCount"] = *revCount;
|
||||
if (auto lastModified = flake.lockedRef.input.getLastModified())
|
||||
j["lastModified"] = *lastModified;
|
||||
j["path"] = store->printStorePath(flake.storePath);
|
||||
j["path"] = storePath;
|
||||
j["locks"] = lockedFlake.lockFile.toJSON().first;
|
||||
logger->cout("%s", j.dump());
|
||||
} else {
|
||||
|
@ -241,7 +244,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
|||
*flake.description);
|
||||
logger->cout(
|
||||
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
|
||||
store->printStorePath(flake.storePath));
|
||||
storePath);
|
||||
if (auto rev = flake.lockedRef.input.getRev())
|
||||
logger->cout(
|
||||
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
|
||||
|
@ -1033,7 +1036,9 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
|
|||
|
||||
StorePathSet sources;
|
||||
|
||||
sources.insert(flake.flake.storePath);
|
||||
auto storePath = store->toStorePath(flake.flake.path.path.abs()).first;
|
||||
|
||||
sources.insert(storePath);
|
||||
|
||||
// FIXME: use graph output, handle cycles.
|
||||
std::function<nlohmann::json(const Node & node)> traverse;
|
||||
|
@ -1062,7 +1067,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
|
|||
|
||||
if (json) {
|
||||
nlohmann::json jsonRoot = {
|
||||
{"path", store->printStorePath(flake.flake.storePath)},
|
||||
{"path", store->printStorePath(storePath)},
|
||||
{"inputs", traverse(*flake.lockFile.root)},
|
||||
};
|
||||
logger->cout("%s", jsonRoot);
|
||||
|
|
Loading…
Reference in a new issue