2019-06-04 21:01:21 +03:00
|
|
|
#include "lockfile.hh"
|
|
|
|
#include "store-api.hh"
|
|
|
|
|
2019-10-21 23:11:21 +03:00
|
|
|
#include <nlohmann/json.hpp>
|
|
|
|
|
2019-06-04 21:01:21 +03:00
|
|
|
namespace nix::flake {
|
|
|
|
|
2020-01-31 20:16:40 +02:00
|
|
|
FlakeRef flakeRefFromJson(const nlohmann::json & json)
|
|
|
|
{
|
2020-02-06 15:27:31 +02:00
|
|
|
return FlakeRef::fromAttrs(jsonToAttrs(json));
|
2020-01-31 20:16:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
FlakeRef getFlakeRef(
|
|
|
|
const nlohmann::json & json,
|
|
|
|
const char * version3Attr1,
|
|
|
|
const char * version3Attr2,
|
|
|
|
const char * version4Attr)
|
|
|
|
{
|
|
|
|
auto i = json.find(version4Attr);
|
|
|
|
if (i != json.end())
|
|
|
|
return flakeRefFromJson(*i);
|
|
|
|
|
|
|
|
// FIXME: remove these.
|
|
|
|
i = json.find(version3Attr1);
|
|
|
|
if (i != json.end())
|
|
|
|
return parseFlakeRef(*i);
|
|
|
|
|
|
|
|
i = json.find(version3Attr2);
|
|
|
|
if (i != json.end())
|
|
|
|
return parseFlakeRef(*i);
|
|
|
|
|
|
|
|
throw Error("attribute '%s' missing in lock file", version4Attr);
|
|
|
|
}
|
|
|
|
|
2020-02-02 00:33:44 +02:00
|
|
|
static TreeInfo parseTreeInfo(const nlohmann::json & json)
|
|
|
|
{
|
|
|
|
TreeInfo info;
|
|
|
|
|
|
|
|
auto i = json.find("info");
|
|
|
|
if (i != json.end()) {
|
|
|
|
const nlohmann::json & i2(*i);
|
|
|
|
|
|
|
|
auto j = i2.find("narHash");
|
|
|
|
if (j != i2.end())
|
|
|
|
info.narHash = Hash((std::string) *j);
|
|
|
|
else
|
|
|
|
throw Error("attribute 'narHash' missing in lock file");
|
|
|
|
|
|
|
|
j = i2.find("revCount");
|
|
|
|
if (j != i2.end())
|
|
|
|
info.revCount = *j;
|
|
|
|
|
|
|
|
j = i2.find("lastModified");
|
|
|
|
if (j != i2.end())
|
|
|
|
info.lastModified = *j;
|
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
i = json.find("narHash");
|
|
|
|
if (i != json.end()) {
|
|
|
|
info.narHash = Hash((std::string) *i);
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
|
|
|
throw Error("attribute 'info' missing in lock file");
|
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
LockedNode::LockedNode(const nlohmann::json & json)
|
|
|
|
: lockedRef(getFlakeRef(json, "url", "uri", "locked"))
|
2020-02-02 01:05:53 +02:00
|
|
|
, originalRef(getFlakeRef(json, "originalUrl", "originalUri", "original"))
|
2020-02-02 00:33:44 +02:00
|
|
|
, info(parseTreeInfo(json))
|
2020-03-09 16:27:49 +02:00
|
|
|
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
|
2019-06-04 21:01:21 +03:00
|
|
|
{
|
2020-02-02 12:31:58 +02:00
|
|
|
if (!lockedRef.input->isImmutable())
|
2020-02-02 01:05:53 +02:00
|
|
|
throw Error("lockfile contains mutable flakeref '%s'", lockedRef);
|
2019-06-04 21:01:21 +03:00
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
StorePath LockedNode::computeStorePath(Store & store) const
|
2019-06-04 21:01:21 +03:00
|
|
|
{
|
2020-02-02 13:29:53 +02:00
|
|
|
return info.computeStorePath(store);
|
2019-06-04 21:01:21 +03:00
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
std::shared_ptr<Node> Node::findInput(const InputPath & path)
|
Respect lock files of inputs + fine-grained lock file control
When computing a lock file, we now respect the lock files of flake
inputs. This is important for usability / reproducibility. For
example, the 'nixops' flake depends on the 'nixops-aws' and
'nixops-hetzner' repositories. So when the 'nixops' flake is used in
another flake, we want the versions of 'nixops-aws' and
'nixops-hetzner' locked by the the 'nixops' flake because those
presumably have been tested.
This can lead to a proliferation of versions of flakes like 'nixpkgs'
(since every flake's lock file could depend on a different version of
'nixpkgs'). This is not a major issue when using Nixpkgs overlays or
NixOS modules, since then the top-level flake composes those
overlays/modules into *its* version of Nixpkgs and all other versions
are ignored. Lock file computation has been made a bit more lazy so it
won't try to fetch all those versions of 'nixpkgs'.
However, in case it's necessary to minimize flake versions, there now
are two input attributes that allow this. First, you can copy an input
from another flake, as follows:
inputs.nixpkgs.follows = "dwarffs/nixpkgs";
This states that the calling flake's 'nixpkgs' input shall be the same
as the 'nixpkgs' input of the 'dwarffs' input.
Second, you can override inputs of inputs:
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
or equivalently, using 'follows':
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.follows = "nixpkgs";
This states that the 'nixpkgs' input of the 'nixops' input shall be
the same as the calling flake's 'nixpkgs' input.
Finally, at '-v' Nix now prints the changes to the lock file, e.g.
$ nix flake update ~/Misc/eelco-configurations/hagbard
inputs of flake 'git+file:///home/eelco/Misc/eelco-configurations?subdir=hagbard' changed:
updated 'nixpkgs': 'github:edolstra/nixpkgs/7845bf5f4b3013df1cf036e9c9c3a55a30331db9' -> 'github:edolstra/nixpkgs/03f3def66a104a221aac8b751eeb7075374848fd'
removed 'nixops'
removed 'nixops/nixops-aws'
removed 'nixops/nixops-hetzner'
removed 'nixops/nixpkgs'
2020-01-24 23:05:11 +02:00
|
|
|
{
|
2020-03-12 23:06:57 +02:00
|
|
|
auto pos = shared_from_this();
|
Respect lock files of inputs + fine-grained lock file control
When computing a lock file, we now respect the lock files of flake
inputs. This is important for usability / reproducibility. For
example, the 'nixops' flake depends on the 'nixops-aws' and
'nixops-hetzner' repositories. So when the 'nixops' flake is used in
another flake, we want the versions of 'nixops-aws' and
'nixops-hetzner' locked by the the 'nixops' flake because those
presumably have been tested.
This can lead to a proliferation of versions of flakes like 'nixpkgs'
(since every flake's lock file could depend on a different version of
'nixpkgs'). This is not a major issue when using Nixpkgs overlays or
NixOS modules, since then the top-level flake composes those
overlays/modules into *its* version of Nixpkgs and all other versions
are ignored. Lock file computation has been made a bit more lazy so it
won't try to fetch all those versions of 'nixpkgs'.
However, in case it's necessary to minimize flake versions, there now
are two input attributes that allow this. First, you can copy an input
from another flake, as follows:
inputs.nixpkgs.follows = "dwarffs/nixpkgs";
This states that the calling flake's 'nixpkgs' input shall be the same
as the 'nixpkgs' input of the 'dwarffs' input.
Second, you can override inputs of inputs:
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
or equivalently, using 'follows':
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.follows = "nixpkgs";
This states that the 'nixpkgs' input of the 'nixops' input shall be
the same as the calling flake's 'nixpkgs' input.
Finally, at '-v' Nix now prints the changes to the lock file, e.g.
$ nix flake update ~/Misc/eelco-configurations/hagbard
inputs of flake 'git+file:///home/eelco/Misc/eelco-configurations?subdir=hagbard' changed:
updated 'nixpkgs': 'github:edolstra/nixpkgs/7845bf5f4b3013df1cf036e9c9c3a55a30331db9' -> 'github:edolstra/nixpkgs/03f3def66a104a221aac8b751eeb7075374848fd'
removed 'nixops'
removed 'nixops/nixops-aws'
removed 'nixops/nixops-hetzner'
removed 'nixops/nixpkgs'
2020-01-24 23:05:11 +02:00
|
|
|
|
|
|
|
for (auto & elem : path) {
|
|
|
|
auto i = pos->inputs.find(elem);
|
|
|
|
if (i == pos->inputs.end())
|
|
|
|
return {};
|
2020-03-12 23:06:57 +02:00
|
|
|
pos = i->second;
|
Respect lock files of inputs + fine-grained lock file control
When computing a lock file, we now respect the lock files of flake
inputs. This is important for usability / reproducibility. For
example, the 'nixops' flake depends on the 'nixops-aws' and
'nixops-hetzner' repositories. So when the 'nixops' flake is used in
another flake, we want the versions of 'nixops-aws' and
'nixops-hetzner' locked by the the 'nixops' flake because those
presumably have been tested.
This can lead to a proliferation of versions of flakes like 'nixpkgs'
(since every flake's lock file could depend on a different version of
'nixpkgs'). This is not a major issue when using Nixpkgs overlays or
NixOS modules, since then the top-level flake composes those
overlays/modules into *its* version of Nixpkgs and all other versions
are ignored. Lock file computation has been made a bit more lazy so it
won't try to fetch all those versions of 'nixpkgs'.
However, in case it's necessary to minimize flake versions, there now
are two input attributes that allow this. First, you can copy an input
from another flake, as follows:
inputs.nixpkgs.follows = "dwarffs/nixpkgs";
This states that the calling flake's 'nixpkgs' input shall be the same
as the 'nixpkgs' input of the 'dwarffs' input.
Second, you can override inputs of inputs:
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
or equivalently, using 'follows':
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.follows = "nixpkgs";
This states that the 'nixpkgs' input of the 'nixops' input shall be
the same as the calling flake's 'nixpkgs' input.
Finally, at '-v' Nix now prints the changes to the lock file, e.g.
$ nix flake update ~/Misc/eelco-configurations/hagbard
inputs of flake 'git+file:///home/eelco/Misc/eelco-configurations?subdir=hagbard' changed:
updated 'nixpkgs': 'github:edolstra/nixpkgs/7845bf5f4b3013df1cf036e9c9c3a55a30331db9' -> 'github:edolstra/nixpkgs/03f3def66a104a221aac8b751eeb7075374848fd'
removed 'nixops'
removed 'nixops/nixops-aws'
removed 'nixops/nixops-hetzner'
removed 'nixops/nixpkgs'
2020-01-24 23:05:11 +02:00
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
return pos;
|
Respect lock files of inputs + fine-grained lock file control
When computing a lock file, we now respect the lock files of flake
inputs. This is important for usability / reproducibility. For
example, the 'nixops' flake depends on the 'nixops-aws' and
'nixops-hetzner' repositories. So when the 'nixops' flake is used in
another flake, we want the versions of 'nixops-aws' and
'nixops-hetzner' locked by the the 'nixops' flake because those
presumably have been tested.
This can lead to a proliferation of versions of flakes like 'nixpkgs'
(since every flake's lock file could depend on a different version of
'nixpkgs'). This is not a major issue when using Nixpkgs overlays or
NixOS modules, since then the top-level flake composes those
overlays/modules into *its* version of Nixpkgs and all other versions
are ignored. Lock file computation has been made a bit more lazy so it
won't try to fetch all those versions of 'nixpkgs'.
However, in case it's necessary to minimize flake versions, there now
are two input attributes that allow this. First, you can copy an input
from another flake, as follows:
inputs.nixpkgs.follows = "dwarffs/nixpkgs";
This states that the calling flake's 'nixpkgs' input shall be the same
as the 'nixpkgs' input of the 'dwarffs' input.
Second, you can override inputs of inputs:
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
or equivalently, using 'follows':
inputs.nixpkgs.url = github:edolstra/nixpkgs/<hash>;
inputs.nixops.inputs.nixpkgs.follows = "nixpkgs";
This states that the 'nixpkgs' input of the 'nixops' input shall be
the same as the calling flake's 'nixpkgs' input.
Finally, at '-v' Nix now prints the changes to the lock file, e.g.
$ nix flake update ~/Misc/eelco-configurations/hagbard
inputs of flake 'git+file:///home/eelco/Misc/eelco-configurations?subdir=hagbard' changed:
updated 'nixpkgs': 'github:edolstra/nixpkgs/7845bf5f4b3013df1cf036e9c9c3a55a30331db9' -> 'github:edolstra/nixpkgs/03f3def66a104a221aac8b751eeb7075374848fd'
removed 'nixops'
removed 'nixops/nixops-aws'
removed 'nixops/nixops-hetzner'
removed 'nixops/nixpkgs'
2020-01-24 23:05:11 +02:00
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
LockFile::LockFile(const nlohmann::json & json, const Path & path)
|
2020-01-30 00:12:58 +02:00
|
|
|
{
|
2020-03-12 23:06:57 +02:00
|
|
|
auto version = json.value("version", 0);
|
|
|
|
if (version < 3 || version > 5)
|
|
|
|
throw Error("lock file '%s' has unsupported version %d", path, version);
|
|
|
|
|
|
|
|
if (version < 5) {
|
|
|
|
std::function<void(Node & node, const nlohmann::json & json)> getInputs;
|
|
|
|
|
|
|
|
getInputs = [&](Node & node, const nlohmann::json & json)
|
|
|
|
{
|
|
|
|
for (auto & i : json["inputs"].items()) {
|
|
|
|
auto input = std::make_shared<LockedNode>(i.value());
|
|
|
|
getInputs(*input, i.value());
|
|
|
|
node.inputs.insert_or_assign(i.key(), input);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
getInputs(*root, json);
|
|
|
|
}
|
2020-01-30 00:12:58 +02:00
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
else {
|
|
|
|
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap;
|
|
|
|
|
|
|
|
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
|
|
|
|
|
|
|
|
getInputs = [&](Node & node, const nlohmann::json & jsonNode)
|
|
|
|
{
|
|
|
|
if (jsonNode.find("inputs") == jsonNode.end()) return;
|
|
|
|
for (auto & i : jsonNode["inputs"].items()) {
|
|
|
|
std::string inputKey = i.value();
|
|
|
|
auto k = nodeMap.find(inputKey);
|
|
|
|
if (k == nodeMap.end()) {
|
|
|
|
auto jsonNode2 = json["nodes"][inputKey];
|
|
|
|
auto input = std::make_shared<LockedNode>(jsonNode2);
|
|
|
|
k = nodeMap.insert_or_assign(inputKey, input).first;
|
|
|
|
getInputs(*input, jsonNode2);
|
|
|
|
}
|
|
|
|
node.inputs.insert_or_assign(i.key(), k->second);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
std::string rootKey = json["root"];
|
|
|
|
nodeMap.insert_or_assign(rootKey, root);
|
|
|
|
getInputs(*root, json["nodes"][rootKey]);
|
2020-01-30 00:12:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-04 21:01:21 +03:00
|
|
|
nlohmann::json LockFile::toJson() const
|
|
|
|
{
|
2020-03-12 23:06:57 +02:00
|
|
|
nlohmann::json nodes;
|
|
|
|
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
|
|
|
|
std::unordered_set<std::string> keys;
|
|
|
|
|
|
|
|
std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode;
|
|
|
|
|
|
|
|
dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string
|
|
|
|
{
|
|
|
|
auto k = nodeKeys.find(node);
|
|
|
|
if (k != nodeKeys.end())
|
|
|
|
return k->second;
|
|
|
|
|
|
|
|
if (!keys.insert(key).second) {
|
|
|
|
for (int n = 2; ; ++n) {
|
|
|
|
auto k = fmt("%s_%d", key, n);
|
|
|
|
if (keys.insert(k).second) {
|
|
|
|
key = k;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeKeys.insert_or_assign(node, key);
|
|
|
|
|
|
|
|
auto n = nlohmann::json::object();
|
|
|
|
|
|
|
|
if (!node->inputs.empty()) {
|
|
|
|
auto inputs = nlohmann::json::object();
|
|
|
|
for (auto & i : node->inputs)
|
|
|
|
inputs[i.first] = dumpNode(i.first, i.second);
|
|
|
|
n["inputs"] = std::move(inputs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) {
|
|
|
|
n["original"] = fetchers::attrsToJson(lockedNode->originalRef.toAttrs());
|
|
|
|
n["locked"] = fetchers::attrsToJson(lockedNode->lockedRef.toAttrs());
|
2020-04-02 12:51:34 +03:00
|
|
|
n["info"] = lockedNode->info.toJson();
|
2020-03-12 23:06:57 +02:00
|
|
|
if (!lockedNode->isFlake) n["flake"] = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes[key] = std::move(n);
|
|
|
|
|
|
|
|
return key;
|
|
|
|
};
|
|
|
|
|
|
|
|
nlohmann::json json;
|
|
|
|
json["version"] = 5;
|
|
|
|
json["root"] = dumpNode("root", root);
|
|
|
|
json["nodes"] = std::move(nodes);
|
|
|
|
|
2019-06-04 21:01:21 +03:00
|
|
|
return json;
|
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
std::string LockFile::to_string() const
|
2019-06-04 21:01:21 +03:00
|
|
|
{
|
2020-03-12 23:06:57 +02:00
|
|
|
return toJson().dump(2);
|
|
|
|
}
|
2019-06-04 21:01:21 +03:00
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
LockFile LockFile::read(const Path & path)
|
|
|
|
{
|
|
|
|
if (!pathExists(path)) return LockFile();
|
|
|
|
return LockFile(nlohmann::json::parse(readFile(path)), path);
|
2019-06-04 21:01:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
std::ostream & operator <<(std::ostream & stream, const LockFile & lockFile)
|
|
|
|
{
|
2020-01-31 20:16:40 +02:00
|
|
|
stream << lockFile.toJson().dump(2);
|
2019-06-04 21:01:21 +03:00
|
|
|
return stream;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LockFile::write(const Path & path) const
|
|
|
|
{
|
|
|
|
createDirs(dirOf(path));
|
|
|
|
writeFile(path, fmt("%s\n", *this));
|
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
bool LockFile::isImmutable() const
|
|
|
|
{
|
|
|
|
std::unordered_set<std::shared_ptr<const Node>> nodes;
|
|
|
|
|
|
|
|
std::function<void(std::shared_ptr<const Node> node)> visit;
|
|
|
|
|
|
|
|
visit = [&](std::shared_ptr<const Node> node)
|
|
|
|
{
|
|
|
|
if (!nodes.insert(node).second) return;
|
|
|
|
for (auto & i : node->inputs) visit(i.second);
|
|
|
|
};
|
|
|
|
|
|
|
|
visit(root);
|
|
|
|
|
|
|
|
for (auto & i : nodes) {
|
|
|
|
if (i == root) continue;
|
|
|
|
auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i);
|
|
|
|
if (lockedNode && !lockedNode->lockedRef.input->isImmutable()) return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LockFile::operator ==(const LockFile & other) const
|
|
|
|
{
|
|
|
|
// FIXME: slow
|
|
|
|
return toJson() == other.toJson();
|
|
|
|
}
|
|
|
|
|
2020-01-29 15:57:57 +02:00
|
|
|
InputPath parseInputPath(std::string_view s)
|
|
|
|
{
|
|
|
|
InputPath path;
|
|
|
|
|
|
|
|
for (auto & elem : tokenizeString<std::vector<std::string>>(s, "/")) {
|
2020-03-30 15:03:28 +03:00
|
|
|
if (!std::regex_match(elem, flakeIdRegex))
|
2020-01-29 15:57:57 +02:00
|
|
|
throw Error("invalid flake input path element '%s'", elem);
|
|
|
|
path.push_back(elem);
|
|
|
|
}
|
|
|
|
|
|
|
|
return path;
|
|
|
|
}
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
static void flattenLockFile(
|
|
|
|
std::shared_ptr<const Node> node,
|
|
|
|
const InputPath & prefix,
|
2020-03-27 17:15:50 +02:00
|
|
|
std::unordered_set<std::shared_ptr<const Node>> & done,
|
2020-03-12 23:06:57 +02:00
|
|
|
std::map<InputPath, std::shared_ptr<const LockedNode>> & res)
|
|
|
|
{
|
2020-03-27 17:15:50 +02:00
|
|
|
if (!done.insert(node).second) return;
|
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
for (auto &[id, input] : node->inputs) {
|
|
|
|
auto inputPath(prefix);
|
|
|
|
inputPath.push_back(id);
|
|
|
|
if (auto lockedInput = std::dynamic_pointer_cast<const LockedNode>(input))
|
|
|
|
res.emplace(inputPath, lockedInput);
|
2020-03-27 17:15:50 +02:00
|
|
|
flattenLockFile(input, inputPath, done, res);
|
2020-03-12 23:06:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string diffLockFiles(const LockFile & oldLocks, const LockFile & newLocks)
|
|
|
|
{
|
2020-03-27 17:15:50 +02:00
|
|
|
std::unordered_set<std::shared_ptr<const Node>> done;
|
2020-03-12 23:06:57 +02:00
|
|
|
std::map<InputPath, std::shared_ptr<const LockedNode>> oldFlat, newFlat;
|
2020-03-27 17:15:50 +02:00
|
|
|
flattenLockFile(oldLocks.root, {}, done, oldFlat);
|
|
|
|
done.clear();
|
|
|
|
flattenLockFile(newLocks.root, {}, done, newFlat);
|
2020-03-12 23:06:57 +02:00
|
|
|
|
|
|
|
auto i = oldFlat.begin();
|
|
|
|
auto j = newFlat.begin();
|
|
|
|
std::string res;
|
|
|
|
|
|
|
|
while (i != oldFlat.end() || j != newFlat.end()) {
|
|
|
|
if (j != newFlat.end() && (i == oldFlat.end() || i->first > j->first)) {
|
|
|
|
res += fmt("* Added '%s': '%s'\n", concatStringsSep("/", j->first), j->second->lockedRef);
|
|
|
|
++j;
|
|
|
|
} else if (i != oldFlat.end() && (j == newFlat.end() || i->first < j->first)) {
|
|
|
|
res += fmt("* Removed '%s'\n", concatStringsSep("/", i->first));
|
|
|
|
++i;
|
|
|
|
} else {
|
|
|
|
if (!(i->second->lockedRef == j->second->lockedRef)) {
|
|
|
|
assert(i->second->lockedRef.to_string() != j->second->lockedRef.to_string());
|
|
|
|
res += fmt("* Updated '%s': '%s' -> '%s'\n",
|
|
|
|
concatStringsSep("/", i->first),
|
|
|
|
i->second->lockedRef,
|
|
|
|
j->second->lockedRef);
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
++j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2019-06-04 21:01:21 +03:00
|
|
|
}
|