2019-02-12 19:23:11 +02:00
|
|
|
#include "flake.hh"
|
2018-11-29 20:18:36 +02:00
|
|
|
#include "primops.hh"
|
|
|
|
#include "eval-inline.hh"
|
|
|
|
#include "fetchGit.hh"
|
|
|
|
#include "download.hh"
|
2019-02-21 07:53:01 +02:00
|
|
|
#include "args.hh"
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-02-21 07:53:01 +02:00
|
|
|
#include <iostream>
|
2018-11-29 20:18:36 +02:00
|
|
|
#include <queue>
|
2018-11-30 17:11:15 +02:00
|
|
|
#include <regex>
|
2019-05-28 21:34:02 +03:00
|
|
|
#include <ctime>
|
|
|
|
#include <iomanip>
|
2018-11-29 20:18:36 +02:00
|
|
|
#include <nlohmann/json.hpp>
|
|
|
|
|
|
|
|
namespace nix {
|
|
|
|
|
2019-05-29 16:31:07 +03:00
|
|
|
using namespace flake;
|
|
|
|
|
|
|
|
namespace flake {
|
|
|
|
|
2019-04-15 15:08:18 +03:00
|
|
|
/* Read a registry. */
|
2019-03-21 10:30:16 +02:00
|
|
|
std::shared_ptr<FlakeRegistry> readRegistry(const Path & path)
|
2019-02-12 23:43:22 +02:00
|
|
|
{
|
2019-03-21 10:30:16 +02:00
|
|
|
auto registry = std::make_shared<FlakeRegistry>();
|
2019-02-12 23:43:22 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
if (!pathExists(path))
|
|
|
|
return std::make_shared<FlakeRegistry>();
|
2019-03-26 13:48:57 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
2019-03-26 13:48:57 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 1)
|
|
|
|
throw Error("flake registry '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
auto flakes = json["flakes"];
|
2019-04-08 20:03:00 +03:00
|
|
|
for (auto i = flakes.begin(); i != flakes.end(); ++i)
|
|
|
|
registry->entries.emplace(i.key(), FlakeRef(i->value("uri", "")));
|
2019-02-12 23:43:22 +02:00
|
|
|
|
|
|
|
return registry;
|
|
|
|
}
|
|
|
|
|
2019-04-15 15:08:18 +03:00
|
|
|
/* Write a registry to a file. */
|
2019-04-16 15:27:54 +03:00
|
|
|
void writeRegistry(const FlakeRegistry & registry, const Path & path)
|
2019-02-25 14:46:37 +02:00
|
|
|
{
|
2019-03-29 17:18:25 +02:00
|
|
|
nlohmann::json json;
|
2019-02-21 07:53:01 +02:00
|
|
|
json["version"] = 1;
|
2019-04-08 20:03:00 +03:00
|
|
|
for (auto elem : registry.entries)
|
|
|
|
json["flakes"][elem.first.to_string()] = { {"uri", elem.second.to_string()} };
|
2019-03-26 13:48:57 +02:00
|
|
|
createDirs(dirOf(path));
|
2019-02-25 14:46:37 +02:00
|
|
|
writeFile(path, json.dump(4)); // The '4' is the number of spaces used in the indentation in the json file.
|
|
|
|
}
|
|
|
|
|
2019-03-29 17:18:25 +02:00
|
|
|
LockFile::FlakeEntry readFlakeEntry(nlohmann::json json)
|
|
|
|
{
|
|
|
|
FlakeRef flakeRef(json["uri"]);
|
|
|
|
if (!flakeRef.isImmutable())
|
2019-05-08 00:20:42 +03:00
|
|
|
throw Error("cannot use mutable flake '%s' in pure mode", flakeRef);
|
2019-03-29 17:18:25 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
LockFile::FlakeEntry entry(flakeRef, Hash((std::string) json["contentHash"]));
|
2019-03-29 17:18:25 +02:00
|
|
|
|
|
|
|
auto nonFlakeRequires = json["nonFlakeRequires"];
|
|
|
|
|
|
|
|
for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) {
|
|
|
|
FlakeRef flakeRef(i->value("uri", ""));
|
|
|
|
if (!flakeRef.isImmutable())
|
2019-04-19 12:43:56 +03:00
|
|
|
throw Error("requested to fetch FlakeRef '%s' purely, which is mutable", flakeRef);
|
2019-05-01 12:38:48 +03:00
|
|
|
LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", "")));
|
|
|
|
entry.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
|
2019-03-29 17:18:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
auto requires = json["requires"];
|
|
|
|
|
|
|
|
for (auto i = requires.begin(); i != requires.end(); ++i)
|
|
|
|
entry.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
LockFile readLockFile(const Path & path)
|
|
|
|
{
|
|
|
|
LockFile lockFile;
|
|
|
|
|
|
|
|
if (!pathExists(path))
|
|
|
|
return lockFile;
|
|
|
|
|
|
|
|
auto json = nlohmann::json::parse(readFile(path));
|
|
|
|
|
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version != 1)
|
|
|
|
throw Error("lock file '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
auto nonFlakeRequires = json["nonFlakeRequires"];
|
|
|
|
|
|
|
|
for (auto i = nonFlakeRequires.begin(); i != nonFlakeRequires.end(); ++i) {
|
|
|
|
FlakeRef flakeRef(i->value("uri", ""));
|
2019-05-01 12:38:48 +03:00
|
|
|
LockFile::NonFlakeEntry nonEntry(flakeRef, Hash(i->value("contentHash", "")));
|
2019-03-29 17:18:25 +02:00
|
|
|
if (!flakeRef.isImmutable())
|
2019-05-01 12:38:48 +03:00
|
|
|
throw Error("found mutable FlakeRef '%s' in lockfile at path %s", flakeRef, path);
|
|
|
|
lockFile.nonFlakeEntries.insert_or_assign(i.key(), nonEntry);
|
2019-03-29 17:18:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
auto requires = json["requires"];
|
|
|
|
|
|
|
|
for (auto i = requires.begin(); i != requires.end(); ++i)
|
|
|
|
lockFile.flakeEntries.insert_or_assign(i.key(), readFlakeEntry(*i));
|
|
|
|
|
|
|
|
return lockFile;
|
|
|
|
}
|
|
|
|
|
2019-04-16 15:27:54 +03:00
|
|
|
nlohmann::json flakeEntryToJson(const LockFile::FlakeEntry & entry)
|
2019-03-29 17:18:25 +02:00
|
|
|
{
|
|
|
|
nlohmann::json json;
|
|
|
|
json["uri"] = entry.ref.to_string();
|
2019-05-28 14:08:40 +03:00
|
|
|
json["contentHash"] = entry.narHash.to_string(SRI);
|
2019-05-01 18:01:03 +03:00
|
|
|
for (auto & x : entry.nonFlakeEntries) {
|
|
|
|
json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string();
|
2019-05-28 14:08:40 +03:00
|
|
|
json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI);
|
2019-05-01 18:01:03 +03:00
|
|
|
}
|
2019-03-29 17:18:25 +02:00
|
|
|
for (auto & x : entry.flakeEntries)
|
2019-04-16 17:18:47 +03:00
|
|
|
json["requires"][x.first.to_string()] = flakeEntryToJson(x.second);
|
2019-03-29 17:18:25 +02:00
|
|
|
return json;
|
|
|
|
}
|
|
|
|
|
2019-04-16 15:27:54 +03:00
|
|
|
void writeLockFile(const LockFile & lockFile, const Path & path)
|
2019-03-29 17:18:25 +02:00
|
|
|
{
|
|
|
|
nlohmann::json json;
|
|
|
|
json["version"] = 1;
|
2019-04-16 15:23:10 +03:00
|
|
|
json["nonFlakeRequires"] = nlohmann::json::object();
|
2019-05-01 18:01:03 +03:00
|
|
|
for (auto & x : lockFile.nonFlakeEntries) {
|
|
|
|
json["nonFlakeRequires"][x.first]["uri"] = x.second.ref.to_string();
|
2019-05-28 14:08:40 +03:00
|
|
|
json["nonFlakeRequires"][x.first]["contentHash"] = x.second.narHash.to_string(SRI);
|
2019-05-01 18:01:03 +03:00
|
|
|
}
|
2019-04-16 15:23:10 +03:00
|
|
|
json["requires"] = nlohmann::json::object();
|
2019-03-29 17:18:25 +02:00
|
|
|
for (auto & x : lockFile.flakeEntries)
|
2019-04-16 17:18:47 +03:00
|
|
|
json["requires"][x.first.to_string()] = flakeEntryToJson(x.second);
|
2019-03-29 17:18:25 +02:00
|
|
|
createDirs(dirOf(path));
|
2019-05-08 19:20:35 +03:00
|
|
|
writeFile(path, json.dump(4) + "\n"); // '4' = indentation in json file
|
2019-03-29 17:18:25 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
Path getUserRegistryPath()
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-04-08 20:03:00 +03:00
|
|
|
return getHome() + "/.config/nix/registry.json";
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
std::shared_ptr<FlakeRegistry> getUserRegistry()
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-04-08 20:03:00 +03:00
|
|
|
return readRegistry(getUserRegistryPath());
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
std::shared_ptr<FlakeRegistry> getFlagRegistry(RegistryOverrides registryOverrides)
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-03-21 10:30:16 +02:00
|
|
|
auto flagRegistry = std::make_shared<FlakeRegistry>();
|
|
|
|
for (auto const & x : registryOverrides) {
|
|
|
|
flagRegistry->entries.insert_or_assign(FlakeRef(x.first), FlakeRef(x.second));
|
|
|
|
}
|
|
|
|
return flagRegistry;
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
2019-04-30 13:47:15 +03:00
|
|
|
std::vector<FlakeRef> pastSearches = {});
|
|
|
|
|
|
|
|
FlakeRef updateFlakeRef(EvalState & state, const FlakeRef & newRef, const Registries & registries, std::vector<FlakeRef> pastSearches)
|
|
|
|
{
|
|
|
|
std::string errorMsg = "found cycle in flake registries: ";
|
|
|
|
for (FlakeRef oldRef : pastSearches) {
|
|
|
|
errorMsg += oldRef.to_string();
|
|
|
|
if (oldRef == newRef)
|
|
|
|
throw Error(errorMsg);
|
|
|
|
errorMsg += " - ";
|
|
|
|
}
|
|
|
|
pastSearches.push_back(newRef);
|
|
|
|
return lookupFlake(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
static FlakeRef lookupFlake(EvalState & state, const FlakeRef & flakeRef, const Registries & registries,
|
|
|
|
std::vector<FlakeRef> pastSearches)
|
2019-02-12 19:23:11 +02:00
|
|
|
{
|
2019-04-16 16:02:02 +03:00
|
|
|
if (registries.empty() && !flakeRef.isDirect())
|
2019-04-19 12:43:56 +03:00
|
|
|
throw Error("indirect flake reference '%s' is not allowed", flakeRef);
|
2019-04-16 16:02:02 +03:00
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
for (std::shared_ptr<FlakeRegistry> registry : registries) {
|
|
|
|
auto i = registry->entries.find(flakeRef);
|
|
|
|
if (i != registry->entries.end()) {
|
|
|
|
auto newRef = i->second;
|
2019-04-30 13:47:15 +03:00
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto j = registry->entries.find(flakeRef.baseRef());
|
|
|
|
if (j != registry->entries.end()) {
|
|
|
|
auto newRef = j->second;
|
|
|
|
newRef.ref = flakeRef.ref;
|
|
|
|
newRef.rev = flakeRef.rev;
|
|
|
|
return updateFlakeRef(state, newRef, registries, pastSearches);
|
2019-02-12 23:43:22 +02:00
|
|
|
}
|
2019-04-08 20:03:00 +03:00
|
|
|
}
|
2019-04-16 16:02:02 +03:00
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
if (!flakeRef.isDirect())
|
2019-04-19 12:43:56 +03:00
|
|
|
throw Error("could not resolve flake reference '%s'", flakeRef);
|
2019-04-16 16:02:02 +03:00
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
return flakeRef;
|
2019-02-12 19:23:11 +02:00
|
|
|
}
|
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
// Lookups happen here too
|
|
|
|
static SourceInfo fetchFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
2019-05-01 12:38:48 +03:00
|
|
|
FlakeRef resolvedRef = lookupFlake(state, flakeRef,
|
|
|
|
impureIsAllowed ? state.getFlakeRegistries() : std::vector<std::shared_ptr<FlakeRegistry>>());
|
|
|
|
|
|
|
|
if (evalSettings.pureEval && !impureIsAllowed && !resolvedRef.isImmutable())
|
|
|
|
throw Error("requested to fetch mutable flake '%s' in pure mode", resolvedRef);
|
2019-04-17 14:54:06 +03:00
|
|
|
|
2019-05-28 21:34:02 +03:00
|
|
|
auto doGit = [&](const GitInfo & gitInfo) {
|
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.ref = gitInfo.ref;
|
|
|
|
ref.rev = gitInfo.rev;
|
|
|
|
SourceInfo info(ref);
|
|
|
|
info.storePath = gitInfo.storePath;
|
|
|
|
info.revCount = gitInfo.revCount;
|
|
|
|
info.narHash = state.store->queryPathInfo(info.storePath)->narHash;
|
|
|
|
info.lastModified = gitInfo.lastModified;
|
|
|
|
return info;
|
|
|
|
};
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
// This only downloads only one revision of the repo, not the entire history.
|
2019-05-01 12:38:48 +03:00
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&resolvedRef.data)) {
|
2018-12-12 14:20:59 +02:00
|
|
|
|
|
|
|
// FIXME: use regular /archive URLs instead? api.github.com
|
|
|
|
// might have stricter rate limits.
|
2019-02-12 19:23:11 +02:00
|
|
|
|
2019-02-25 17:20:50 +02:00
|
|
|
auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s",
|
|
|
|
refData->owner, refData->repo,
|
2019-05-01 12:38:48 +03:00
|
|
|
resolvedRef.rev ? resolvedRef.rev->to_string(Base16, false)
|
|
|
|
: resolvedRef.ref ? *resolvedRef.ref : "master");
|
2019-02-25 17:20:50 +02:00
|
|
|
|
2019-04-10 13:12:44 +03:00
|
|
|
std::string accessToken = settings.githubAccessToken.get();
|
|
|
|
if (accessToken != "")
|
|
|
|
url += "?access_token=" + accessToken;
|
|
|
|
|
2019-05-23 00:36:29 +03:00
|
|
|
CachedDownloadRequest request(url);
|
|
|
|
request.unpack = true;
|
|
|
|
request.name = "source";
|
|
|
|
request.ttl = resolvedRef.rev ? 1000000000 : settings.tarballTtl;
|
2019-05-28 23:35:41 +03:00
|
|
|
request.getLastModified = true;
|
2019-05-23 00:36:29 +03:00
|
|
|
auto result = getDownloader()->downloadCached(state.store, request);
|
2018-12-12 14:20:59 +02:00
|
|
|
|
2019-02-25 17:20:50 +02:00
|
|
|
if (!result.etag)
|
|
|
|
throw Error("did not receive an ETag header from '%s'", url);
|
2018-12-12 14:20:59 +02:00
|
|
|
|
2019-02-25 17:20:50 +02:00
|
|
|
if (result.etag->size() != 42 || (*result.etag)[0] != '"' || (*result.etag)[41] != '"')
|
|
|
|
throw Error("ETag header '%s' from '%s' is not a Git revision", *result.etag, url);
|
|
|
|
|
2019-05-08 00:20:42 +03:00
|
|
|
FlakeRef ref(resolvedRef.baseRef());
|
|
|
|
ref.rev = Hash(std::string(*result.etag, 1, result.etag->size() - 2), htSHA1);
|
2019-05-01 12:38:48 +03:00
|
|
|
SourceInfo info(ref);
|
2019-05-15 16:38:24 +03:00
|
|
|
info.storePath = result.storePath;
|
2019-05-28 14:07:15 +03:00
|
|
|
info.narHash = state.store->queryPathInfo(info.storePath)->narHash;
|
2019-05-28 23:35:41 +03:00
|
|
|
info.lastModified = result.lastModified;
|
2019-02-25 17:20:50 +02:00
|
|
|
|
|
|
|
return info;
|
2018-12-12 14:20:59 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 20:03:00 +03:00
|
|
|
// This downloads the entire git history
|
2019-05-01 12:38:48 +03:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsGit>(&resolvedRef.data)) {
|
2019-05-28 21:34:02 +03:00
|
|
|
return doGit(exportGit(state.store, refData->uri, resolvedRef.ref, resolvedRef.rev, "source"));
|
2019-04-08 23:46:25 +03:00
|
|
|
}
|
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
else if (auto refData = std::get_if<FlakeRef::IsPath>(&resolvedRef.data)) {
|
2019-04-08 23:46:25 +03:00
|
|
|
if (!pathExists(refData->path + "/.git"))
|
|
|
|
throw Error("flake '%s' does not reference a Git repository", refData->path);
|
2019-05-28 21:34:02 +03:00
|
|
|
return doGit(exportGit(state.store, refData->path, {}, {}, "source"));
|
2018-11-30 17:11:15 +02:00
|
|
|
}
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-02-12 19:23:11 +02:00
|
|
|
else abort();
|
2018-11-30 17:11:15 +02:00
|
|
|
}
|
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
// This will return the flake which corresponds to a given FlakeRef. The lookupFlake is done within `fetchFlake`, which is used here.
|
2019-03-29 17:18:25 +02:00
|
|
|
Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool impureIsAllowed = false)
|
2018-11-30 17:11:15 +02:00
|
|
|
{
|
2019-05-01 12:38:48 +03:00
|
|
|
SourceInfo sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed);
|
|
|
|
debug("got flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-05-01 11:34:23 +03:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-02-25 17:20:50 +02:00
|
|
|
|
2019-04-16 16:40:58 +03:00
|
|
|
state.store->assertStorePath(sourceInfo.storePath);
|
2018-11-30 17:11:15 +02:00
|
|
|
|
2019-02-12 21:35:03 +02:00
|
|
|
if (state.allowedPaths)
|
2019-05-15 16:38:24 +03:00
|
|
|
state.allowedPaths->insert(state.store->toRealPath(sourceInfo.storePath));
|
2019-02-12 21:35:03 +02:00
|
|
|
|
2019-05-01 21:38:41 +03:00
|
|
|
// Guard against symlink attacks.
|
2019-05-01 12:38:48 +03:00
|
|
|
Path flakeFile = canonPath(sourceInfo.storePath + "/" + resolvedRef.subdir + "/flake.nix");
|
2019-05-15 16:38:24 +03:00
|
|
|
Path realFlakeFile = state.store->toRealPath(flakeFile);
|
|
|
|
if (!isInDir(realFlakeFile, state.store->toRealPath(sourceInfo.storePath)))
|
|
|
|
throw Error("'flake.nix' file of flake '%s' escapes from '%s'", resolvedRef, sourceInfo.storePath);
|
2019-05-01 12:38:48 +03:00
|
|
|
|
|
|
|
Flake flake(flakeRef, sourceInfo);
|
|
|
|
|
2019-05-15 16:38:24 +03:00
|
|
|
if (!pathExists(realFlakeFile))
|
2019-05-01 19:07:36 +03:00
|
|
|
throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", resolvedRef, resolvedRef.subdir);
|
2019-04-19 12:43:56 +03:00
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
Value vInfo;
|
2019-05-15 16:38:24 +03:00
|
|
|
state.evalFile(realFlakeFile, vInfo); // FIXME: symlink attack
|
2018-11-29 20:18:36 +02:00
|
|
|
|
|
|
|
state.forceAttrs(vInfo);
|
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
auto sEpoch = state.symbols.create("epoch");
|
|
|
|
|
|
|
|
if (auto epoch = vInfo.attrs->get(sEpoch)) {
|
2019-05-22 15:31:40 +03:00
|
|
|
flake.epoch = state.forceInt(*(**epoch).value, *(**epoch).pos);
|
|
|
|
if (flake.epoch > 2019)
|
|
|
|
throw Error("flake '%s' requires unsupported epoch %d; please upgrade Nix", flakeRef, flake.epoch);
|
|
|
|
} else
|
|
|
|
throw Error("flake '%s' lacks attribute 'epoch'", flakeRef);
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
if (auto name = vInfo.attrs->get(state.sName))
|
2019-02-12 19:23:11 +02:00
|
|
|
flake.id = state.forceStringNoCtx(*(**name).value, *(**name).pos);
|
2018-11-29 20:18:36 +02:00
|
|
|
else
|
2019-05-22 15:31:40 +03:00
|
|
|
throw Error("flake '%s' lacks attribute 'name'", flakeRef);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
|
|
|
if (auto description = vInfo.attrs->get(state.sDescription))
|
|
|
|
flake.description = state.forceStringNoCtx(*(**description).value, *(**description).pos);
|
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
auto sRequires = state.symbols.create("requires");
|
|
|
|
|
|
|
|
if (auto requires = vInfo.attrs->get(sRequires)) {
|
2018-11-29 20:18:36 +02:00
|
|
|
state.forceList(*(**requires).value, *(**requires).pos);
|
|
|
|
for (unsigned int n = 0; n < (**requires).value->listSize(); ++n)
|
2019-02-12 23:43:22 +02:00
|
|
|
flake.requires.push_back(FlakeRef(state.forceStringNoCtx(
|
|
|
|
*(**requires).value->listElems()[n], *(**requires).pos)));
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
auto sNonFlakeRequires = state.symbols.create("nonFlakeRequires");
|
|
|
|
|
|
|
|
if (std::optional<Attr *> nonFlakeRequires = vInfo.attrs->get(sNonFlakeRequires)) {
|
2019-03-21 10:30:16 +02:00
|
|
|
state.forceAttrs(*(**nonFlakeRequires).value, *(**nonFlakeRequires).pos);
|
|
|
|
for (Attr attr : *(*(**nonFlakeRequires).value).attrs) {
|
|
|
|
std::string myNonFlakeUri = state.forceStringNoCtx(*attr.value, *attr.pos);
|
|
|
|
FlakeRef nonFlakeRef = FlakeRef(myNonFlakeUri);
|
|
|
|
flake.nonFlakeRequires.insert_or_assign(attr.name, nonFlakeRef);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
auto sProvides = state.symbols.create("provides");
|
|
|
|
|
|
|
|
if (auto provides = vInfo.attrs->get(sProvides)) {
|
2018-11-29 20:18:36 +02:00
|
|
|
state.forceFunction(*(**provides).value, *(**provides).pos);
|
|
|
|
flake.vProvides = (**provides).value;
|
|
|
|
} else
|
2019-05-22 15:31:40 +03:00
|
|
|
throw Error("flake '%s' lacks attribute 'provides'", flakeRef);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-05-29 16:12:22 +03:00
|
|
|
for (auto & attr : *vInfo.attrs) {
|
|
|
|
if (attr.name != sEpoch &&
|
|
|
|
attr.name != state.sName &&
|
|
|
|
attr.name != state.sDescription &&
|
|
|
|
attr.name != sRequires &&
|
|
|
|
attr.name != sNonFlakeRequires &&
|
|
|
|
attr.name != sProvides)
|
|
|
|
throw Error("flake '%s' has an unsupported attribute '%s', at %s",
|
|
|
|
flakeRef, attr.name, *attr.pos);
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
return flake;
|
|
|
|
}
|
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
// Get the `NonFlake` corresponding to a `FlakeRef`.
|
2019-05-28 11:51:45 +03:00
|
|
|
NonFlake getNonFlake(EvalState & state, const FlakeRef & flakeRef, FlakeAlias alias, bool impureIsAllowed = false)
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-05-28 13:58:28 +03:00
|
|
|
auto sourceInfo = fetchFlake(state, flakeRef, impureIsAllowed);
|
2019-05-01 12:38:48 +03:00
|
|
|
debug("got non-flake source '%s' with flakeref %s", sourceInfo.storePath, sourceInfo.resolvedRef.to_string());
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
FlakeRef resolvedRef = sourceInfo.resolvedRef;
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
NonFlake nonFlake(flakeRef, sourceInfo);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-28 13:58:28 +03:00
|
|
|
state.store->assertStorePath(nonFlake.sourceInfo.storePath);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
if (state.allowedPaths)
|
2019-05-28 13:58:28 +03:00
|
|
|
state.allowedPaths->insert(nonFlake.sourceInfo.storePath);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
nonFlake.alias = alias;
|
2019-03-21 10:30:16 +02:00
|
|
|
|
|
|
|
return nonFlake;
|
|
|
|
}
|
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
LockFile entryToLockFile(const LockFile::FlakeEntry & entry)
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
2019-04-16 17:18:47 +03:00
|
|
|
LockFile lockFile;
|
2019-05-01 12:38:48 +03:00
|
|
|
lockFile.flakeEntries = entry.flakeEntries;
|
|
|
|
lockFile.nonFlakeEntries = entry.nonFlakeEntries;
|
|
|
|
return lockFile;
|
|
|
|
}
|
2019-04-16 17:18:47 +03:00
|
|
|
|
2019-05-14 12:34:45 +03:00
|
|
|
LockFile::FlakeEntry dependenciesToFlakeEntry(const ResolvedFlake & resolvedFlake)
|
|
|
|
{
|
2019-05-28 14:12:43 +03:00
|
|
|
LockFile::FlakeEntry entry(
|
|
|
|
resolvedFlake.flake.sourceInfo.resolvedRef,
|
|
|
|
resolvedFlake.flake.sourceInfo.narHash);
|
2019-05-14 12:34:45 +03:00
|
|
|
|
|
|
|
for (auto & info : resolvedFlake.flakeDeps)
|
|
|
|
entry.flakeEntries.insert_or_assign(info.first.to_string(), dependenciesToFlakeEntry(info.second));
|
|
|
|
|
|
|
|
for (auto & nonFlake : resolvedFlake.nonFlakeDeps) {
|
2019-05-28 14:12:43 +03:00
|
|
|
LockFile::NonFlakeEntry nonEntry(
|
|
|
|
nonFlake.sourceInfo.resolvedRef,
|
|
|
|
nonFlake.sourceInfo.narHash);
|
2019-05-14 12:34:45 +03:00
|
|
|
entry.nonFlakeEntries.insert_or_assign(nonFlake.alias, nonEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2019-05-21 16:03:54 +03:00
|
|
|
bool allowedToWrite(HandleLockFile handle)
|
2019-05-14 12:34:45 +03:00
|
|
|
{
|
2019-05-21 16:03:54 +03:00
|
|
|
return handle == UpdateLockFile || handle == RecreateLockFile;
|
2019-05-14 12:34:45 +03:00
|
|
|
}
|
|
|
|
|
2019-05-21 16:03:54 +03:00
|
|
|
bool recreateLockFile(HandleLockFile handle)
|
2019-05-14 12:34:45 +03:00
|
|
|
{
|
2019-05-21 16:03:54 +03:00
|
|
|
return handle == RecreateLockFile || handle == UseNewLockFile;
|
2019-05-14 12:34:45 +03:00
|
|
|
}
|
|
|
|
|
2019-05-21 16:03:54 +03:00
|
|
|
bool allowedToUseRegistries(HandleLockFile handle, bool isTopRef)
|
2019-05-14 12:34:45 +03:00
|
|
|
{
|
|
|
|
if (handle == AllPure) return false;
|
|
|
|
else if (handle == TopRefUsesRegistries) return isTopRef;
|
|
|
|
else if (handle == UpdateLockFile) return true;
|
|
|
|
else if (handle == UseUpdatedLockFile) return true;
|
|
|
|
else if (handle == RecreateLockFile) return true;
|
|
|
|
else if (handle == UseNewLockFile) return true;
|
|
|
|
else assert(false);
|
|
|
|
}
|
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
ResolvedFlake resolveFlakeFromLockFile(EvalState & state, const FlakeRef & flakeRef,
|
2019-05-14 12:34:45 +03:00
|
|
|
HandleLockFile handleLockFile, LockFile lockFile = {}, bool topRef = false)
|
2019-05-01 12:38:48 +03:00
|
|
|
{
|
2019-05-14 12:34:45 +03:00
|
|
|
Flake flake = getFlake(state, flakeRef, allowedToUseRegistries(handleLockFile, topRef));
|
2019-04-16 17:18:47 +03:00
|
|
|
|
2019-04-19 15:23:35 +03:00
|
|
|
ResolvedFlake deps(flake);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
for (auto & nonFlakeInfo : flake.nonFlakeRequires) {
|
|
|
|
FlakeRef ref = nonFlakeInfo.second;
|
|
|
|
auto i = lockFile.nonFlakeEntries.find(nonFlakeInfo.first);
|
2019-05-01 12:38:48 +03:00
|
|
|
if (i != lockFile.nonFlakeEntries.end()) {
|
|
|
|
NonFlake nonFlake = getNonFlake(state, i->second.ref, nonFlakeInfo.first);
|
2019-05-28 14:08:40 +03:00
|
|
|
if (nonFlake.sourceInfo.narHash != i->second.narHash)
|
2019-05-21 16:03:54 +03:00
|
|
|
throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string());
|
2019-05-01 12:38:48 +03:00
|
|
|
deps.nonFlakeDeps.push_back(nonFlake);
|
|
|
|
} else {
|
2019-05-14 12:34:45 +03:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-05-21 16:03:54 +03:00
|
|
|
throw Error("cannot update non-flake dependency '%s' in pure mode", nonFlakeInfo.first);
|
2019-05-28 11:51:45 +03:00
|
|
|
deps.nonFlakeDeps.push_back(getNonFlake(state, nonFlakeInfo.second, nonFlakeInfo.first, allowedToUseRegistries(handleLockFile, false)));
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-03-21 10:30:16 +02:00
|
|
|
|
2019-04-16 17:18:47 +03:00
|
|
|
for (auto newFlakeRef : flake.requires) {
|
|
|
|
auto i = lockFile.flakeEntries.find(newFlakeRef);
|
2019-05-01 12:38:48 +03:00
|
|
|
if (i != lockFile.flakeEntries.end()) { // Propagate lockFile downwards if possible
|
2019-05-14 12:34:45 +03:00
|
|
|
ResolvedFlake newResFlake = resolveFlakeFromLockFile(state, i->second.ref, handleLockFile, entryToLockFile(i->second));
|
2019-05-28 14:08:40 +03:00
|
|
|
if (newResFlake.flake.sourceInfo.narHash != i->second.narHash)
|
2019-05-21 16:03:54 +03:00
|
|
|
throw Error("the content hash of flakeref '%s' doesn't match", i->second.ref.to_string());
|
2019-05-14 12:34:45 +03:00
|
|
|
deps.flakeDeps.insert_or_assign(newFlakeRef, newResFlake);
|
2019-05-01 12:38:48 +03:00
|
|
|
} else {
|
2019-05-14 12:34:45 +03:00
|
|
|
if (handleLockFile == AllPure || handleLockFile == TopRefUsesRegistries)
|
2019-05-21 16:03:54 +03:00
|
|
|
throw Error("cannot update flake dependency '%s' in pure mode", newFlakeRef.to_string());
|
2019-05-14 12:34:45 +03:00
|
|
|
deps.flakeDeps.insert_or_assign(newFlakeRef, resolveFlakeFromLockFile(state, newFlakeRef, handleLockFile));
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-04-16 17:18:47 +03:00
|
|
|
}
|
2019-02-12 22:05:44 +02:00
|
|
|
|
2019-03-29 17:18:25 +02:00
|
|
|
return deps;
|
|
|
|
}
|
2019-02-12 22:55:43 +02:00
|
|
|
|
2019-05-01 12:38:48 +03:00
|
|
|
/* Given a flake reference, recursively fetch it and its dependencies.
|
|
|
|
FIXME: this should return a graph of flakes.
|
|
|
|
*/
|
2019-05-14 12:34:45 +03:00
|
|
|
ResolvedFlake resolveFlake(EvalState & state, const FlakeRef & topRef, HandleLockFile handleLockFile)
|
2019-05-01 12:38:48 +03:00
|
|
|
{
|
2019-05-14 12:34:45 +03:00
|
|
|
Flake flake = getFlake(state, topRef, allowedToUseRegistries(handleLockFile, true));
|
2019-05-21 15:55:43 +03:00
|
|
|
LockFile oldLockFile;
|
2019-05-01 12:38:48 +03:00
|
|
|
|
2019-05-14 12:34:45 +03:00
|
|
|
if (!recreateLockFile (handleLockFile)) {
|
2019-05-01 12:38:48 +03:00
|
|
|
// If recreateLockFile, start with an empty lockfile
|
2019-05-28 13:58:28 +03:00
|
|
|
oldLockFile = readLockFile(flake.sourceInfo.storePath + "/flake.lock"); // FIXME: symlink attack
|
2019-05-01 12:38:48 +03:00
|
|
|
}
|
2019-05-01 12:38:48 +03:00
|
|
|
|
2019-05-21 15:55:43 +03:00
|
|
|
LockFile lockFile(oldLockFile);
|
|
|
|
|
2019-05-14 12:34:45 +03:00
|
|
|
ResolvedFlake resFlake = resolveFlakeFromLockFile(state, topRef, handleLockFile, lockFile, true);
|
|
|
|
lockFile = entryToLockFile(dependenciesToFlakeEntry(resFlake));
|
2019-02-12 23:43:22 +02:00
|
|
|
|
2019-05-21 15:55:43 +03:00
|
|
|
if (!(lockFile == oldLockFile)) {
|
|
|
|
if (allowedToWrite(handleLockFile)) {
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsPath>(&topRef.data)) {
|
|
|
|
writeLockFile(lockFile, refData->path + (topRef.subdir == "" ? "" : "/" + topRef.subdir) + "/flake.lock");
|
|
|
|
|
|
|
|
// Hack: Make sure that flake.lock is visible to Git, so it ends up in the Nix store.
|
|
|
|
runProgram("git", true, { "-C", refData->path, "add",
|
|
|
|
(topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock" });
|
2019-05-21 16:03:54 +03:00
|
|
|
} else
|
|
|
|
warn("cannot write lockfile of remote flake '%s'", topRef);
|
2019-05-21 15:55:43 +03:00
|
|
|
} else if (handleLockFile != AllPure && handleLockFile != TopRefUsesRegistries)
|
2019-05-21 16:03:54 +03:00
|
|
|
warn("using updated lockfile without writing it to file");
|
2019-05-21 15:55:43 +03:00
|
|
|
}
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-05-14 12:34:45 +03:00
|
|
|
return resFlake;
|
2019-02-21 07:53:01 +02:00
|
|
|
}
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
void updateLockFile(EvalState & state, const FlakeRef & flakeRef, bool recreateLockFile)
|
2019-02-21 07:53:01 +02:00
|
|
|
{
|
2019-05-14 12:34:45 +03:00
|
|
|
resolveFlake(state, flakeRef, recreateLockFile ? RecreateLockFile : UpdateLockFile);
|
2019-02-21 07:53:01 +02:00
|
|
|
}
|
|
|
|
|
2019-05-28 15:01:08 +03:00
|
|
|
static void emitSourceInfoAttrs(EvalState & state, const SourceInfo & sourceInfo, Value & vAttrs)
|
|
|
|
{
|
|
|
|
auto & path = sourceInfo.storePath;
|
|
|
|
state.store->isValidPath(path);
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.sOutPath), path, {path});
|
|
|
|
|
|
|
|
if (sourceInfo.resolvedRef.rev) {
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("rev")),
|
|
|
|
sourceInfo.resolvedRef.rev->gitRev());
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("shortRev")),
|
|
|
|
sourceInfo.resolvedRef.rev->gitShortRev());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sourceInfo.revCount)
|
|
|
|
mkInt(*state.allocAttr(vAttrs, state.symbols.create("revCount")), *sourceInfo.revCount);
|
2019-05-28 21:34:02 +03:00
|
|
|
|
|
|
|
if (sourceInfo.lastModified)
|
|
|
|
mkString(*state.allocAttr(vAttrs, state.symbols.create("lastModified")),
|
|
|
|
fmt("%s",
|
|
|
|
std::put_time(std::gmtime(&*sourceInfo.lastModified), "%Y%m%d%H%M%S")));
|
2019-05-28 15:01:08 +03:00
|
|
|
}
|
|
|
|
|
2019-04-19 15:23:35 +03:00
|
|
|
void callFlake(EvalState & state, const ResolvedFlake & resFlake, Value & v)
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
2019-04-16 14:56:08 +03:00
|
|
|
// Construct the resulting attrset '{description, provides,
|
|
|
|
// ...}'. This attrset is passed lazily as an argument to 'provides'.
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-04-19 15:23:35 +03:00
|
|
|
state.mkAttrs(v, resFlake.flakeDeps.size() + resFlake.nonFlakeDeps.size() + 8);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-05-14 12:34:45 +03:00
|
|
|
for (auto info : resFlake.flakeDeps) {
|
|
|
|
const ResolvedFlake newResFlake = info.second;
|
2019-04-19 15:23:35 +03:00
|
|
|
auto vFlake = state.allocAttr(v, newResFlake.flake.id);
|
|
|
|
callFlake(state, newResFlake, *vFlake);
|
2019-04-16 14:56:08 +03:00
|
|
|
}
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-04-19 15:23:35 +03:00
|
|
|
for (const NonFlake nonFlake : resFlake.nonFlakeDeps) {
|
|
|
|
auto vNonFlake = state.allocAttr(v, nonFlake.alias);
|
2019-05-28 15:01:08 +03:00
|
|
|
state.mkAttrs(*vNonFlake, 8);
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-05-28 13:58:28 +03:00
|
|
|
state.store->isValidPath(nonFlake.sourceInfo.storePath);
|
|
|
|
mkString(*state.allocAttr(*vNonFlake, state.sOutPath),
|
|
|
|
nonFlake.sourceInfo.storePath, {nonFlake.sourceInfo.storePath});
|
2019-05-08 14:38:32 +03:00
|
|
|
|
2019-05-28 15:01:08 +03:00
|
|
|
emitSourceInfoAttrs(state, nonFlake.sourceInfo, *vNonFlake);
|
2019-04-16 14:56:08 +03:00
|
|
|
}
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-04-19 15:23:35 +03:00
|
|
|
mkString(*state.allocAttr(v, state.sDescription), resFlake.flake.description);
|
2019-04-08 23:46:25 +03:00
|
|
|
|
2019-05-28 15:01:08 +03:00
|
|
|
emitSourceInfoAttrs(state, resFlake.flake.sourceInfo, v);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-04-16 14:56:08 +03:00
|
|
|
auto vProvides = state.allocAttr(v, state.symbols.create("provides"));
|
2019-04-19 15:23:35 +03:00
|
|
|
mkApp(*vProvides, *resFlake.flake.vProvides, v);
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2019-04-16 17:29:44 +03:00
|
|
|
v.attrs->push_back(Attr(state.symbols.create("self"), &v));
|
|
|
|
|
2019-04-16 14:56:08 +03:00
|
|
|
v.attrs->sort();
|
|
|
|
}
|
2019-02-12 22:55:43 +02:00
|
|
|
|
2019-03-29 17:18:25 +02:00
|
|
|
// This function is exposed to be used in nix files.
|
2019-02-12 22:55:43 +02:00
|
|
|
static void prim_getFlake(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|
|
|
{
|
2019-05-29 16:44:48 +03:00
|
|
|
callFlake(state, resolveFlake(state, state.forceStringNoCtx(*args[0], pos),
|
|
|
|
evalSettings.pureEval ? AllPure : UseUpdatedLockFile), v);
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static RegisterPrimOp r2("getFlake", 1, prim_getFlake);
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
void gitCloneFlake(FlakeRef flakeRef, EvalState & state, Registries registries, const Path & destDir)
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
|
|
|
flakeRef = lookupFlake(state, flakeRef, registries);
|
|
|
|
|
|
|
|
std::string uri;
|
|
|
|
|
|
|
|
Strings args = {"clone"};
|
|
|
|
|
|
|
|
if (auto refData = std::get_if<FlakeRef::IsGitHub>(&flakeRef.data)) {
|
|
|
|
uri = "git@github.com:" + refData->owner + "/" + refData->repo + ".git";
|
|
|
|
args.push_back(uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
} else if (auto refData = std::get_if<FlakeRef::IsGit>(&flakeRef.data)) {
|
|
|
|
args.push_back(refData->uri);
|
|
|
|
if (flakeRef.ref) {
|
|
|
|
args.push_back("--branch");
|
|
|
|
args.push_back(*flakeRef.ref);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
if (destDir != "")
|
|
|
|
args.push_back(destDir);
|
2019-03-21 10:30:16 +02:00
|
|
|
|
|
|
|
runProgram("git", true, args);
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
2019-05-29 16:31:07 +03:00
|
|
|
|
|
|
|
std::shared_ptr<flake::FlakeRegistry> EvalState::getGlobalFlakeRegistry()
|
|
|
|
{
|
|
|
|
std::call_once(_globalFlakeRegistryInit, [&]() {
|
|
|
|
auto path = evalSettings.flakeRegistry;
|
|
|
|
|
|
|
|
if (!hasPrefix(path, "/")) {
|
|
|
|
CachedDownloadRequest request(evalSettings.flakeRegistry);
|
|
|
|
request.name = "flake-registry.json";
|
|
|
|
request.gcRoot = true;
|
|
|
|
path = getDownloader()->downloadCached(store, request).path;
|
|
|
|
}
|
|
|
|
|
|
|
|
_globalFlakeRegistry = readRegistry(path);
|
|
|
|
});
|
|
|
|
|
|
|
|
return _globalFlakeRegistry;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This always returns a vector with flakeReg, userReg, globalReg.
|
|
|
|
// If one of them doesn't exist, the registry is left empty but does exist.
|
|
|
|
const Registries EvalState::getFlakeRegistries()
|
|
|
|
{
|
|
|
|
Registries registries;
|
|
|
|
registries.push_back(getFlagRegistry(registryOverrides));
|
|
|
|
registries.push_back(getUserRegistry());
|
|
|
|
registries.push_back(getGlobalFlakeRegistry());
|
|
|
|
return registries;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|