2019-02-12 19:23:11 +02:00
|
|
|
#include "flake.hh"
|
2019-06-04 21:01:21 +03:00
|
|
|
#include "lockfile.hh"
|
2018-11-29 20:18:36 +02:00
|
|
|
#include "primops.hh"
|
|
|
|
#include "eval-inline.hh"
|
2019-06-05 17:51:54 +03:00
|
|
|
#include "primops/fetchGit.hh"
|
2018-11-29 20:18:36 +02:00
|
|
|
#include "download.hh"
|
2019-02-21 07:53:01 +02:00
|
|
|
#include "args.hh"
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-02-21 07:53:01 +02:00
|
|
|
#include <iostream>
|
2018-11-29 20:18:36 +02:00
|
|
|
#include <queue>
|
2018-11-30 17:11:15 +02:00
|
|
|
#include <regex>
|
2019-05-28 21:34:02 +03:00
|
|
|
#include <ctime>
|
|
|
|
#include <iomanip>
|
2018-11-29 20:18:36 +02:00
|
|
|
#include <nlohmann/json.hpp>
|
|
|
|
|
|
|
|
namespace nix {
|
|
|
|
|
2019-05-29 16:31:07 +03:00
|
|
|
using namespace flake;
|
|
|
|
|
|
|
|
namespace flake {
|
|
|
|
|
2019-04-15 15:08:18 +03:00
|
|
|
/* Read a registry. */
|
2019-03-21 10:30:16 +02:00
|
|
|
std::shared_ptr<FlakeRegistry> readRegistry(const Path & path)
|
2019-02-12 23:43:22 +02:00
|
|
|
{
|
2019-03-21 10:30:16 +02:00
|
|
|
auto registry = std::make_shared<FlakeRegistry>();
|
2019-02-12 23:43:22 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
if (!pathExists(path))
|
|
|
|
return std::make_shared<FlakeRegistry>();
|
2019-03-26 13:48:57 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
2019-03-26 13:48:57 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 1)
|
|
|
|
throw Error("flake registry '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
auto flakes = json["flakes"];
|
2019-04-08 20:03:00 +03:00
|
|
|
for (auto i = flakes.begin(); i != flakes.end(); ++i)
|
|
|
|
registry->entries.emplace(i.key(), FlakeRef(i->value("uri", "")));
|
2019-02-12 23:43:22 +02:00
|
|
|
|
|
|
|
return registry;
|
|
|
|
}
|
|
|
|
|
2019-04-15 15:08:18 +03:00
|
|
|
/* Write a registry to a file. */
|
2019-04-16 15:27:54 +03:00
|
|
|
void writeRegistry(const FlakeRegistry & registry, const Path & path)
|
2019-02-25 14:46:37 +02:00
|
|
|
{
|
2019-03-29 17:18:25 +02:00
|
|
|
nlohmann::json json;
|
2019-06-11 22:32:57 +03:00
|
|
|
json["version"] = 1;
|
2019-04-08 20:03:00 +03:00
|
|
|
for (auto elem : registry.entries)
|
|
|
|
json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} };
|
2019-03-26 13:48:57 +02:00
|
|
|
createDirs(dirOf(path));
|
2019-02-25 14:46:37 +02:00
|
|
|
writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file.
|
|
|
|
}
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
Path getUserRegistryPath()
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-04-08 20:03:00 +03:00
|
|
|
return getHome() + "/.config/nix/registry.json";
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
std::shared_ptr<FlakeRegistry> getUserRegistry()
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-04-08 20:03:00 +03:00
|
|
|
return readRegistry(getUserRegistryPath());
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
std::shared_ptr<FlakeRegistry> getFlagRegistry(RegistryOverrides registryOverrides)
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-03-21 10:30:16 +02:00
|
|
|
auto flagRegistry = std::make_shared<FlakeRegistry>();
|
|
|
|
for (auto const & x : registryOverrides) {
|
|
|
|
flagRegistry->entries.insert_or_assign(FlakeRef(x.first), FlakeRef(x.second));
|
|
|
|
}
|
|
|
|
return flagRegistry;
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
2019-04-30 13:47:15 +03:00
|
|
|
std::vector<FlakeRef> pastSearches = {});
|
|
|
|
|
|
|
|
FlakeRef updateFlakeRef(EvalState & state, const FlakeRef & newRef, const Registries & registries, std::vector<FlakeRef> pastSearches)
|
|
|
|
{
|
|
|
|
std::string errorMsg = "found cycle in flake registries: ";
|
|
|
|
for (FlakeRef oldRef : pastSearches) {
|
|
|
|
errorMsg += oldRef.to_string();
|
|
|
|
if (oldRef == newRef)
|
|
|
|
throw Error(errorMsg);
|
|
|
|
errorMsg += " - ";
|
|
|
|
}
|
|
|
|
pastSearches.push_back(newRef);
|
|
|
|
return lookupFlake(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
|
|
|
std::vector<FlakeRef> pastSearches)
|
2019-02-12 19:23:11 +02:00
|
|
|
{
|
2019-04-08 20:03:00 +03:00
|
|
|
for (std::shared_ptr<FlakeRegistry> registry : registries) {
|
|
|
|
auto i = registry->entries.find(flakeRef);
|
|
|
|
if (i != registry->entries.end()) {
|
|
|
|
auto newRef = i->second;
|
2019-04-30 13:47:15 +03:00
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto j = registry->entries.find(flakeRef.baseRef());
|
|
|
|
if (j != registry->entries.end()) {
|
|
|
|
auto newRef = j->second;
|
|
|
|
newRef.ref = flakeRef.ref;
|
|
|
|
newRef.rev = flakeRef.rev;
|
2019-06-11 14:09:06 +03:00
|
|
|
newRef.subdir = flakeRef.subdir;
|
2019-04-30 13:47:15 +03:00
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
2019-02-12 23:43:22 +02:00
|
|
|
}
|
2019-04-08 20:03:00 +03:00
|
|
|
}
|
2019-04-16 16:02:02 +03:00
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
if (!flakeRef.isDirect())
|
2019-04-19 12:43:56 +03:00
|
|
|
throw Error("could not resolve flake reference '%s'", flakeRef);
|
2019-04-16 16:02:02 +03:00
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
return flakeRef;
|
2019-02-12 19:23:11 +02:00
|
|
|
}
|
|
|
|
|
2019-06-21 20:04:58 +03:00
|
|
|
FlakeRef maybeLookupFlake(
|
|
|
|
EvalState & state,
|
|
|
|
const FlakeRef & flakeRef,
|
|
|
|
bool allowLookup)
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
2019-06-21 20:04:58 +03:00
|
|
|
if (!flakeRef.isDirect()) {
|
|
|
|
if (allowLookup)
|
|
|
|
return lookupFlake(state, flakeRef, state.getFlakeRegistries());
|
|
|
|
else
|
|
|
|
throw Error("'%s' is an indirect flake reference, but registry lookups are not allowed", flakeRef);
|
|
|
|
} else
|
|
|
|
return flakeRef;
|
|
|
|
}
|
2019-05-01 12:38:48 +03:00
|
|
|
|
2019-06-21 20:04:58 +03:00
|
|
|
|
|
|
|
static SourceInfo fetchFlake(EvalState & state, const FlakeRef & resolvedRef)
|
|
|
|
{
|
|
|
|
assert(resolvedRef.isDirect());
|
2019-04-17 14:54:06 +03:00
|
|
|
|
2019-05-28 21:34:02 +03:00
|
|
|
auto doGit = [&](const GitInfo & gitInfo) {
|
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.ref = gitInfo.ref;
|
|
|
|
ref.rev = gitInfo.rev;
|
|
|
|
SourceInfo info(ref);
|
|
|
|
info.storePath = gitInfo.storePath;
|
|
|
|
info.revCount = gitInfo.revCount;
|
|
|
|
info.narHash = state.store->queryPathInfo(info.storePath)->narHash;
|
|
|
|
info.lastModified = gitInfo.lastModified;
|
|
|
|
return info;
|
|
|
|
};
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
// This only downloads only one revision of the repo, not the entire history.
|
2019-05-01 12:38:48 +03:00
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&resolvedRef.data)) {
|
2018-12-12 14:20:59 +02:00
|
|
|
|
|
|
|
// FIXME: use regular /archive URLs instead? api.github.com
|
|
|
|
// might have stricter rate limits.
|
2019-02-12 19:23:11 +02:00
|
|
|
|
2019-02-25 17:20:50 +02:00
|
|
|
auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s",
|
|
|
|
refData->owner, refData->repo,
|
2019-05-01 12:38:48 +03:00
|
|
|
resolvedRef.rev ? resolvedRef.rev->to_string(Base16, false)
|
|
|
|
: resolvedRef.ref ? *resolvedRef.ref : "master");
|
2019-02-25 17:20:50 +02:00
|
|
|
|
2019-04-10 13:12:44 +03:00
|
|
|
std::string accessToken = settings.githubAccessToken.get();
|
|
|
|
if (accessToken != "")
|
|
|
|
url += "?access_token=" + accessToken;
|
|
|
|
|
2019-05-23 00:36:29 +03:00
|
|
|
CachedDownloadRequest request(url);
|
|
|
|
request.unpack = true;
|
|
|
|
request.name = "source";
|
|
|
|
request.ttl = resolvedRef.rev ? 1000000000 : settings.tarballTtl;
|
2019-05-28 23:35:41 +03:00
|
|
|
request.getLastModified = true;
|
2019-05-23 00:36:29 +03:00
|
|
|
auto result = getDownloader()->downloadCached(state.store, request);
|
2018-12-12 14:20:59 +02:00
|
|
|
|
2019-02-25 17:20:50 +02:00
|
|
|
if (!result.etag)
|
|
|
|
throw Error("did not receive an ETag header from '%s'", url);
|
2018-12-12 14:20:59 +02:00
|
|
|
|
2019-02-25 17:20:50 +02:00
|
|
|
if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"')
|
|
|
|
throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url);
|
|
|
|
|
2019-05-08 00:20:42 +03:00
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1);
|
2019-05-01 12:38:48 +03:00
|
|
|
SourceInfo info(ref);
|
2019-05-15 16:38:24 +03:00
|
|
|
info.storePath = result.storePath;
|
2019-05-28 14:07:15 +03:00
|
|
|
info.narHash = state.store->queryPathInfo(info.storePath)->narHash;
|
2019-05-28 23:35:41 +03:00
|
|
|
info.lastModified = result.lastModified;
|
2019-02-25 17:20:50 +02:00
|
|
|
|
|
|
|
return info;
|
2018-12-12 14:20:59 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
// This downloads the entire git history
|
2019-05-01 12:38:48 +03:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsGit>(&resolvedRef.data)) {
|
2019-05-28 21:34:02 +03:00
|
|
|
return doGit(exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source"));
|
2019-04-08 23:46:25 +03:00
|
|
|
}
|
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsPath>(&resolvedRef.data)) {
|
2019-04-08 23:46:25 +03:00
|
|
|
if (!pathExists(refData->path + "/.git"))
|
|
|
|
throw Error("flake '%s' does not reference a Git repository", refData->path);
|
2019-05-28 21:34:02 +03:00
|
|
|
return doGit(exportGit(state.store, refData->path, {}, {}, "source"));
|
2018-11-30 17:11:15 +02:00
|
|
|
}
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-02-12 19:23:11 +02:00
|
|
|
else abort();
|
2018-11-30 17:11:15 +02:00
|
|
|
}
|
|
|
|
|
2019-06-21 20:04:58 +03:00
|
|
|
Flake getFlake(EvalState & state, const FlakeRef & flakeRef)
|
2018-11-30 17:11:15 +02:00
|
|
|
{
|
2019-06-21 20:04:58 +03:00
|
|
|
SourceInfo sourceInfo = fetchFlake(state, flakeRef);
|
2019-05-01 12:38:48 +03:00
|
|
|
debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-05-01 11:34:23 +03:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-02-25 17:20:50 +02:00
|
|
|
|
2019-04-16 16:40:58 +03:00
|
|
|
state.store->assertStorePath(sourceInfo.storePath);
|
2018-11-30 17:11:15 +02:00
|
|
|
|
2019-02-12 21:35:03 +02:00
|
|
|
if (state.allowedPaths)
|
2019-05-15 16:38:24 +03:00
|
|
|
state.allowedPaths->insert(state.store->toRealPath(sourceInfo.storePath));
|
2019-02-12 21:35:03 +02:00
|
|
|
|
2019-05-01 21:38:41 +03:00
|
|
|
// Guard against symlink attacks.
|
2019-05-01 12:38:48 +03:00
|
|
|
Path flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix");
|
2019-05-15 16:38:24 +03:00
|
|
|
Path realFlakeFile = state.store->toRealPath(flakeFile);
|
|
|
|
if (!isInDir(realFlakeFile, state.store->toRealPath(sourceInfo.storePath)))
|
|
|
|
throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath);
|
2019-05-01 12:38:48 +03:00
|
|
|
|
|
|
|
Flake flake(flakeRef, sourceInfo);
|
|
|
|
|
2019-05-15 16:38:24 +03:00
|
|
|
if (!pathExists(realFlakeFile))
|
2019-05-01 19:07:36 +03:00
|
|
|
throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir);
|
2019-04-19 12:43:56 +03:00
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
Value vInfo;
|
2019-05-15 16:38:24 +03:00
|
|
|
state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack
|
2018-11-29 20:18:36 +02:00
|
|
|
|
|
|
|
state.forceAttrs(vInfo);
|
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
auto sEpoch = state.symbols.create("epoch");
|
|
|
|
|
|
|
|
if (auto epoch = vInfo.attrs->get(sEpoch)) {
|
2019-05-22 15:31:40 +03:00
|
|
|
flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos);
|
2019-07-10 11:27:33 +03:00
|
|
|
if (flake.epoch < 201906)
|
|
|
|
throw Error("flake '%s' has illegal epoch %d", flakeRef, flake.epoch);
|
2019-06-03 15:47:47 +03:00
|
|
|
if (flake.epoch > 201906)
|
2019-05-22 15:31:40 +03:00
|
|
|
throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch);
|
|
|
|
} else
|
|
|
|
throw Error("flake '%s' lacks attribute 'epoch'", flakeRef);
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
if (auto name = vInfo.attrs->get(state.sName))
|
2019-02-12 19:23:11 +02:00
|
|
|
flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos);
|
2018-11-29 20:18:36 +02:00
|
|
|
else
|
2019-05-22 15:31:40 +03:00
|
|
|
throw Error("flake '%s' lacks attribute 'name'", flakeRef);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
|
|
|
if (auto description = vInfo.attrs->get(state.sDescription))
|
|
|
|
flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos);
|
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
auto sInputs = state.symbols.create("inputs");
|
2019-05-29 16:12:22 +03:00
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
if (auto inputs = vInfo.attrs->get(sInputs)) {
|
|
|
|
state.forceList(*(**inputs).value, *(**inputs).pos);
|
|
|
|
for (unsigned int n = 0; n < (**inputs).value->listSize(); ++n)
|
|
|
|
flake.inputs.push_back(FlakeRef(state.forceStringNoCtx(
|
|
|
|
*(**inputs).value->listElems()[n], *(**inputs).pos)));
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
auto sNonFlakeInputs = state.symbols.create("nonFlakeInputs");
|
2019-05-29 16:12:22 +03:00
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
if (std::optional<Attr *> nonFlakeInputs = vInfo.attrs->get(sNonFlakeInputs)) {
|
|
|
|
state.forceAttrs(*(**nonFlakeInputs).value, *(**nonFlakeInputs).pos);
|
|
|
|
for (Attr attr : *(*(**nonFlakeInputs).value).attrs) {
|
2019-03-21 10:30:16 +02:00
|
|
|
std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos);
|
|
|
|
FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri);
|
2019-05-30 00:09:23 +03:00
|
|
|
flake.nonFlakeInputs.insert_or_assign(attr.name, nonFlakeRef);
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
auto sOutputs = state.symbols.create("outputs");
|
2019-05-29 16:12:22 +03:00
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
if (auto outputs = vInfo.attrs->get(sOutputs)) {
|
|
|
|
state.forceFunction(*(**outputs).value, *(**outputs).pos);
|
|
|
|
flake.vOutputs = (**outputs).value;
|
2018-11-29 20:18:36 +02:00
|
|
|
} else
|
2019-05-30 00:09:23 +03:00
|
|
|
throw Error("flake '%s' lacks attribute 'outputs'", flakeRef);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
for (auto & attr : *vInfo.attrs) {
|
|
|
|
if (attr.name != sEpoch &&
|
|
|
|
attr.name != state.sName &&
|
|
|
|
attr.name != state.sDescription &&
|
2019-05-30 00:09:23 +03:00
|
|
|
attr.name != sInputs &&
|
|
|
|
attr.name != sNonFlakeInputs &&
|
|
|
|
attr.name != sOutputs)
|
2019-05-29 16:12:22 +03:00
|
|
|
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
|
|
|
|
flakeRef, attr.name, *attr.pos);
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
return flake;
|
|
|
|
}
|
|
|
|
|
2019-06-21 20:04:58 +03:00
|
|
|
NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef)
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-06-21 20:04:58 +03:00
|
|
|
auto sourceInfo = fetchFlake(state, flakeRef);
|
2019-05-01 12:38:48 +03:00
|
|
|
debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
NonFlake nonFlake(flakeRef, sourceInfo);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-28 13:58:28 +03:00
|
|
|
state.store->assertStorePath(nonFlake.sourceInfo.storePath);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
if (state.allowedPaths)
|
2019-05-28 13:58:28 +03:00
|
|
|
state.allowedPaths->insert(nonFlake.sourceInfo.storePath);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
|
|
|
return nonFlake;
|
|
|
|
}
|
|
|
|
|
2019-05-21 16:03:54 +03:00
|
|
|
bool allowedToWrite(HandleLockFile handle)
|
2019-05-14 12:34:45 +03:00
|
|
|
{
|
2019-05-21 16:03:54 +03:00
|
|
|
return handle == UpdateLockFile || handle == RecreateLockFile;
|
2019-05-14 12:34:45 +03:00
|
|
|
}
|
|
|
|
|
2019-05-21 16:03:54 +03:00
|
|
|
bool recreateLockFile(HandleLockFile handle)
|
2019-05-14 12:34:45 +03:00
|
|
|
{
|
2019-05-21 16:03:54 +03:00
|
|
|
return handle == RecreateLockFile || handle == UseNewLockFile;
|
2019-05-14 12:34:45 +03:00
|
|
|
}
|
|
|
|
|
2019-05-21 16:03:54 +03:00
|
|
|
bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef)
|
2019-05-14 12:34:45 +03:00
|
|
|
{
|
|
|
|
if (handle == AllPure) return false;
|
|
|
|
else if (handle == TopRefUsesRegistries) return isTopRef;
|
|
|
|
else if (handle == UpdateLockFile) return true;
|
|
|
|
else if (handle == UseUpdatedLockFile) return true;
|
|
|
|
else if (handle == RecreateLockFile) return true;
|
|
|
|
else if (handle == UseNewLockFile) return true;
|
|
|
|
else assert(false);
|
|
|
|
}
|
|
|
|
|
2019-06-04 22:07:55 +03:00
|
|
|
/* Given a flakeref and its subtree of the lockfile, return an updated
|
|
|
|
subtree of the lockfile. That is, if the 'flake.nix' of the
|
|
|
|
referenced flake has inputs that don't have a corresponding entry
|
|
|
|
in the lockfile, they're added to the lockfile; conversely, any
|
|
|
|
lockfile entries that don't have a corresponding entry in flake.nix
|
|
|
|
are removed.
|
|
|
|
|
|
|
|
Note that this is lazy: we only recursively fetch inputs that are
|
|
|
|
not in the lockfile yet. */
|
2019-06-04 21:08:13 +03:00
|
|
|
static std::pair<Flake, FlakeInput> updateLocks(
|
2019-06-04 20:10:35 +03:00
|
|
|
EvalState & state,
|
2019-06-04 22:10:53 +03:00
|
|
|
const Flake & flake,
|
2019-06-04 20:10:35 +03:00
|
|
|
HandleLockFile handleLockFile,
|
|
|
|
const FlakeInputs & oldEntry,
|
|
|
|
bool topRef)
|
2019-05-01 12:38:48 +03:00
|
|
|
{
|
2019-06-04 21:08:13 +03:00
|
|
|
FlakeInput newEntry(
|
2019-06-04 20:10:35 +03:00
|
|
|
flake.id,
|
|
|
|
flake.sourceInfo.resolvedRef,
|
|
|
|
flake.sourceInfo.narHash);
|
|
|
|
|
|
|
|
for (auto & input : flake.nonFlakeInputs) {
|
|
|
|
auto & id = input.first;
|
|
|
|
auto & ref = input.second;
|
2019-06-04 21:08:13 +03:00
|
|
|
auto i = oldEntry.nonFlakeInputs.find(id);
|
|
|
|
if (i != oldEntry.nonFlakeInputs.end()) {
|
|
|
|
newEntry.nonFlakeInputs.insert_or_assign(i->first, i->second);
|
2019-05-01 12:38:48 +03:00
|
|
|
} else {
|
2019-05-14 12:34:45 +03:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-06-04 20:10:35 +03:00
|
|
|
throw Error("cannot update non-flake dependency '%s' in pure mode", id);
|
2019-06-21 20:04:58 +03:00
|
|
|
auto nonFlake = getNonFlake(state, maybeLookupFlake(state, ref, allowedToUseRegistries(handleLockFile, false)));
|
2019-06-04 21:08:13 +03:00
|
|
|
newEntry.nonFlakeInputs.insert_or_assign(id,
|
|
|
|
NonFlakeInput(
|
2019-06-04 20:10:35 +03:00
|
|
|
nonFlake.sourceInfo.resolvedRef,
|
|
|
|
nonFlake.sourceInfo.narHash));
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
for (auto & inputRef : flake.inputs) {
|
2019-06-04 21:08:13 +03:00
|
|
|
auto i = oldEntry.flakeInputs.find(inputRef);
|
|
|
|
if (i != oldEntry.flakeInputs.end()) {
|
|
|
|
newEntry.flakeInputs.insert_or_assign(inputRef, i->second);
|
2019-05-01 12:38:48 +03:00
|
|
|
} else {
|
2019-05-14 12:34:45 +03:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-06-04 20:10:35 +03:00
|
|
|
throw Error("cannot update flake dependency '%s' in pure mode", inputRef);
|
2019-06-04 21:08:13 +03:00
|
|
|
newEntry.flakeInputs.insert_or_assign(inputRef,
|
2019-06-04 22:10:53 +03:00
|
|
|
updateLocks(state,
|
2019-06-21 20:04:58 +03:00
|
|
|
getFlake(state, maybeLookupFlake(state, inputRef, allowedToUseRegistries(handleLockFile, false))),
|
2019-06-04 22:10:53 +03:00
|
|
|
handleLockFile, {}, false).second);
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-04-16 17:18:47 +03:00
|
|
|
}
|
2019-02-12 22:05:44 +02:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
return {flake, newEntry};
|
2019-03-29 17:18:25 +02:00
|
|
|
}
|
2019-02-12 22:55:43 +02:00
|
|
|
|
2019-06-04 22:07:55 +03:00
|
|
|
/* Compute an in-memory lockfile for the specified top-level flake,
|
|
|
|
and optionally write it to file, it the flake is writable. */
|
2019-05-14 12:34:45 +03:00
|
|
|
ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile)
|
2019-05-01 12:38:48 +03:00
|
|
|
{
|
2019-06-21 20:04:58 +03:00
|
|
|
auto flake = getFlake(state, maybeLookupFlake(state, topRef, allowedToUseRegistries(handleLockFile, true)));
|
2019-06-04 20:10:35 +03:00
|
|
|
|
2019-05-21 15:55:43 +03:00
|
|
|
LockFile oldLockFile;
|
2019-05-01 12:38:48 +03:00
|
|
|
|
2019-05-31 21:12:59 +03:00
|
|
|
if (!recreateLockFile(handleLockFile)) {
|
2019-05-01 12:38:48 +03:00
|
|
|
// If recreateLockFile, start with an empty lockfile
|
2019-05-31 21:12:59 +03:00
|
|
|
// FIXME: symlink attack
|
2019-06-04 21:01:21 +03:00
|
|
|
oldLockFile = LockFile::read(
|
2019-05-31 21:12:59 +03:00
|
|
|
state.store->toRealPath(flake.sourceInfo.storePath)
|
|
|
|
+ "/" + flake.sourceInfo.resolvedRef.subdir + "/flake.lock");
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-05-01 12:38:48 +03:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
LockFile lockFile(updateLocks(
|
2019-06-04 22:10:53 +03:00
|
|
|
state, flake, handleLockFile, oldLockFile, true).second);
|
2019-02-12 23:43:22 +02:00
|
|
|
|
2019-05-21 15:55:43 +03:00
|
|
|
if (!(lockFile == oldLockFile)) {
|
|
|
|
if (allowedToWrite(handleLockFile)) {
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsPath>(&topRef.data)) {
|
2019-06-04 21:01:21 +03:00
|
|
|
lockFile.write(refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
|
2019-05-21 15:55:43 +03:00
|
|
|
|
|
|
|
// Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store.
|
|
|
|
runProgram("git", true, { "-C", refData->path, "add",
|
|
|
|
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" });
|
2019-05-21 16:03:54 +03:00
|
|
|
} else
|
|
|
|
warn("cannot write lockfile of remote flake '%s'", topRef);
|
2019-05-21 15:55:43 +03:00
|
|
|
} else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries)
|
2019-05-21 16:03:54 +03:00
|
|
|
warn("using updated lockfile without writing it to file");
|
2019-05-21 15:55:43 +03:00
|
|
|
}
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
return ResolvedFlake(std::move(flake), std::move(lockFile));
|
2019-02-21 07:53:01 +02:00
|
|
|
}
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateLockFile)
|
2019-02-21 07:53:01 +02:00
|
|
|
{
|
2019-05-14 12:34:45 +03:00
|
|
|
resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile);
|
2019-02-21 07:53:01 +02:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:01:08 +03:00
|
|
|
static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo, Value & vAttrs)
|
|
|
|
{
|
|
|
|
auto & path = sourceInfo.storePath;
|
2019-06-04 20:10:35 +03:00
|
|
|
assert(state.store->isValidPath(path));
|
2019-05-28 15:01:08 +03:00
|
|
|
mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path});
|
|
|
|
|
|
|
|
if (sourceInfo.resolvedRef.rev) {
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("rev")),
|
|
|
|
sourceInfo.resolvedRef.rev->gitRev());
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("shortRev")),
|
|
|
|
sourceInfo.resolvedRef.rev->gitShortRev());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sourceInfo.revCount)
|
|
|
|
mkInt(*state.allocAttr(vAttrs, state.symbols.create("revCount")), *sourceInfo.revCount);
|
2019-05-28 21:34:02 +03:00
|
|
|
|
|
|
|
if (sourceInfo.lastModified)
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("lastModified")),
|
|
|
|
fmt("%s",
|
|
|
|
std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S")));
|
2019-05-28 15:01:08 +03:00
|
|
|
}
|
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
/* Helper primop to make callFlake (below) fetch/call its inputs
|
|
|
|
lazily. Note that this primop cannot be called by user code since
|
|
|
|
it doesn't appear in 'builtins'. */
|
|
|
|
static void prim_callFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
2019-06-04 21:08:13 +03:00
|
|
|
auto lazyFlake = (FlakeInput *) args[0]->attrs;
|
2019-06-21 20:04:58 +03:00
|
|
|
|
|
|
|
assert(lazyFlake->ref.isImmutable());
|
|
|
|
|
|
|
|
auto flake = getFlake(state, lazyFlake->ref);
|
2019-06-04 21:34:44 +03:00
|
|
|
|
|
|
|
if (flake.sourceInfo.narHash != lazyFlake->narHash)
|
|
|
|
throw Error("the content hash of flake '%s' doesn't match the hash recorded in the referring lockfile", flake.sourceInfo.resolvedRef);
|
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
callFlake(state, flake, *lazyFlake, v);
|
|
|
|
}
|
|
|
|
|
2019-06-04 21:56:13 +03:00
|
|
|
static void prim_callNonFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
|
|
|
auto lazyNonFlake = (NonFlakeInput *) args[0]->attrs;
|
|
|
|
|
2019-06-21 20:04:58 +03:00
|
|
|
assert(lazyNonFlake->ref.isImmutable());
|
|
|
|
|
2019-06-04 21:56:13 +03:00
|
|
|
auto nonFlake = getNonFlake(state, lazyNonFlake->ref);
|
|
|
|
|
|
|
|
if (nonFlake.sourceInfo.narHash != lazyNonFlake->narHash)
|
|
|
|
throw Error("the content hash of repository '%s' doesn't match the hash recorded in the referring lockfile", nonFlake.sourceInfo.resolvedRef);
|
|
|
|
|
|
|
|
state.mkAttrs(v, 8);
|
|
|
|
|
|
|
|
assert(state.store->isValidPath(nonFlake.sourceInfo.storePath));
|
|
|
|
|
|
|
|
mkString(*state.allocAttr(v, state.sOutPath),
|
|
|
|
nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath});
|
|
|
|
|
|
|
|
emitSourceInfoAttrs(state, nonFlake.sourceInfo, v);
|
|
|
|
}
|
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
void callFlake(EvalState & state,
|
|
|
|
const Flake & flake,
|
|
|
|
const FlakeInputs & inputs,
|
2019-06-04 23:35:43 +03:00
|
|
|
Value & vRes)
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
2019-06-04 23:35:43 +03:00
|
|
|
// Construct the resulting attrset '{outputs, ...}'. This attrset
|
|
|
|
// is passed lazily as an argument to the 'outputs' function.
|
|
|
|
|
|
|
|
auto & v = *state.allocValue();
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
state.mkAttrs(v,
|
2019-06-04 21:08:13 +03:00
|
|
|
inputs.flakeInputs.size() +
|
|
|
|
inputs.nonFlakeInputs.size() + 8);
|
2019-06-04 20:10:35 +03:00
|
|
|
|
2019-06-04 21:08:13 +03:00
|
|
|
for (auto & dep : inputs.flakeInputs) {
|
2019-06-04 20:10:35 +03:00
|
|
|
auto vFlake = state.allocAttr(v, dep.second.id);
|
|
|
|
auto vPrimOp = state.allocValue();
|
|
|
|
static auto primOp = new PrimOp(prim_callFlake, 1, state.symbols.create("callFlake"));
|
|
|
|
vPrimOp->type = tPrimOp;
|
|
|
|
vPrimOp->primOp = primOp;
|
|
|
|
auto vArg = state.allocValue();
|
|
|
|
vArg->type = tNull;
|
|
|
|
// FIXME: leak
|
2019-06-04 21:08:13 +03:00
|
|
|
vArg->attrs = (Bindings *) new FlakeInput(dep.second); // evil! also inefficient
|
2019-06-04 20:10:35 +03:00
|
|
|
mkApp(*vFlake, *vPrimOp, *vArg);
|
2019-04-16 14:56:08 +03:00
|
|
|
}
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-06-04 21:08:13 +03:00
|
|
|
for (auto & dep : inputs.nonFlakeInputs) {
|
2019-06-04 20:10:35 +03:00
|
|
|
auto vNonFlake = state.allocAttr(v, dep.first);
|
2019-06-04 21:56:13 +03:00
|
|
|
auto vPrimOp = state.allocValue();
|
|
|
|
static auto primOp = new PrimOp(prim_callNonFlake, 1, state.symbols.create("callNonFlake"));
|
|
|
|
vPrimOp->type = tPrimOp;
|
|
|
|
vPrimOp->primOp = primOp;
|
|
|
|
auto vArg = state.allocValue();
|
|
|
|
vArg->type = tNull;
|
|
|
|
// FIXME: leak
|
|
|
|
vArg->attrs = (Bindings *) new NonFlakeInput(dep.second); // evil! also inefficient
|
|
|
|
mkApp(*vNonFlake, *vPrimOp, *vArg);
|
2019-04-16 14:56:08 +03:00
|
|
|
}
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
mkString(*state.allocAttr(v, state.sDescription), flake.description);
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
emitSourceInfoAttrs(state, flake.sourceInfo, v);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
auto vOutputs = state.allocAttr(v, state.symbols.create("outputs"));
|
2019-06-04 20:10:35 +03:00
|
|
|
mkApp(*vOutputs, *flake.vOutputs, v);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-04-16 17:29:44 +03:00
|
|
|
v.attrs->push_back(Attr(state.symbols.create("self"), &v));
|
|
|
|
|
2019-04-16 14:56:08 +03:00
|
|
|
v.attrs->sort();
|
2019-06-04 23:35:43 +03:00
|
|
|
|
|
|
|
/* For convenience, put the outputs directly in the result, so you
|
|
|
|
can refer to an output of an input as 'inputs.foo.bar' rather
|
|
|
|
than 'inputs.foo.outputs.bar'. */
|
|
|
|
auto v2 = *state.allocValue();
|
|
|
|
state.eval(state.parseExprFromString("res: res.outputs // res", "/"), v2);
|
|
|
|
|
|
|
|
state.callFunction(v2, v, vRes, noPos);
|
2019-04-16 14:56:08 +03:00
|
|
|
}
|
2019-02-12 22:55:43 +02:00
|
|
|
|
2019-06-04 20:10:35 +03:00
|
|
|
void callFlake(EvalState & state,
|
|
|
|
const ResolvedFlake & resFlake,
|
|
|
|
Value & v)
|
|
|
|
{
|
|
|
|
callFlake(state, resFlake.flake, resFlake.lockFile, v);
|
|
|
|
}
|
|
|
|
|
2019-03-29 17:18:25 +02:00
|
|
|
// This function is exposed to be used in nix files.
|
2019-02-12 22:55:43 +02:00
|
|
|
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
2019-05-29 16:44:48 +03:00
|
|
|
callFlake(state, resolveFlake(state, state.forceStringNoCtx(*args[0], pos),
|
|
|
|
evalSettings.pureEval ? AllPure : UseUpdatedLockFile), v);
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static RegisterPrimOp r2("getFlake", 1, prim_getFlake);
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
void gitCloneFlake(FlakeRef flakeRef, EvalState & state, Registries registries, const Path & destDir)
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
|
|
|
flakeRef = lookupFlake(state, flakeRef, registries);
|
|
|
|
|
|
|
|
std::string uri;
|
|
|
|
|
|
|
|
Strings args = {"clone"};
|
|
|
|
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&flakeRef.data)) {
|
|
|
|
uri = "git@github.com:" + refData->owner + "/" + refData->repo + ".git";
|
|
|
|
args.push_back(uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
} else if (auto refData = std::get_if<FlakeRef::IsGit>(&flakeRef.data)) {
|
|
|
|
args.push_back(refData->uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
if (destDir != "")
|
|
|
|
args.push_back(destDir);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
|
|
|
runProgram("git", true, args);
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
2019-05-29 16:31:07 +03:00
|
|
|
|
|
|
|
std::shared_ptr<flake::FlakeRegistry> EvalState::getGlobalFlakeRegistry()
|
|
|
|
{
|
|
|
|
std::call_once(_globalFlakeRegistryInit, [&]() {
|
|
|
|
auto path = evalSettings.flakeRegistry;
|
|
|
|
|
|
|
|
if (!hasPrefix(path, "/")) {
|
|
|
|
CachedDownloadRequest request(evalSettings.flakeRegistry);
|
|
|
|
request.name = "flake-registry.json";
|
|
|
|
request.gcRoot = true;
|
|
|
|
path = getDownloader()->downloadCached(store, request).path;
|
|
|
|
}
|
|
|
|
|
|
|
|
_globalFlakeRegistry = readRegistry(path);
|
|
|
|
});
|
|
|
|
|
|
|
|
return _globalFlakeRegistry;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This always returns a vector with flakeReg, userReg, globalReg.
|
|
|
|
// If one of them doesn't exist, the registry is left empty but does exist.
|
|
|
|
const Registries EvalState::getFlakeRegistries()
|
|
|
|
{
|
|
|
|
Registries registries;
|
|
|
|
registries.push_back(getFlagRegistry(registryOverrides));
|
|
|
|
registries.push_back(getUserRegistry());
|
|
|
|
registries.push_back(getGlobalFlakeRegistry());
|
|
|
|
return registries;
|
|
|
|
}
|
|
|
|
|
2019-06-07 23:25:48 +03:00
|
|
|
Fingerprint ResolvedFlake::getFingerprint() const
|
|
|
|
{
|
|
|
|
// FIXME: as an optimization, if the flake contains a lockfile and
|
|
|
|
// we haven't changed it, then it's sufficient to use
|
|
|
|
// flake.sourceInfo.storePath for the fingerprint.
|
|
|
|
return hashString(htSHA256,
|
|
|
|
fmt("%s;%s", flake.sourceInfo.storePath, lockFile));
|
|
|
|
}
|
|
|
|
|
2019-05-29 16:31:07 +03:00
|
|
|
}
|