2018-11-29 20:18:36 +02:00
|
|
|
#include "command.hh"
|
2023-02-03 21:53:40 +02:00
|
|
|
#include "installable-flake.hh"
|
2018-11-29 20:18:36 +02:00
|
|
|
#include "common-args.hh"
|
|
|
|
#include "shared.hh"
|
|
|
|
#include "eval.hh"
|
2019-05-29 18:25:41 +03:00
|
|
|
#include "eval-inline.hh"
|
2023-07-31 16:19:19 +03:00
|
|
|
#include "eval-settings.hh"
|
2019-06-05 17:51:54 +03:00
|
|
|
#include "flake/flake.hh"
|
2019-05-29 18:25:41 +03:00
|
|
|
#include "get-drvs.hh"
|
|
|
|
#include "store-api.hh"
|
2019-06-17 18:59:57 +03:00
|
|
|
#include "derivations.hh"
|
2023-01-10 18:27:19 +02:00
|
|
|
#include "outputs-spec.hh"
|
2019-09-19 21:15:42 +03:00
|
|
|
#include "attr-path.hh"
|
2020-03-30 15:03:28 +03:00
|
|
|
#include "fetchers.hh"
|
|
|
|
#include "registry.hh"
|
2020-04-20 14:14:59 +03:00
|
|
|
#include "eval-cache.hh"
|
2022-02-17 20:59:32 +02:00
|
|
|
#include "markdown.hh"
|
2023-10-25 07:43:36 +03:00
|
|
|
#include "users.hh"
|
2019-04-16 15:10:05 +03:00
|
|
|
|
2019-02-27 20:54:18 +02:00
|
|
|
#include <nlohmann/json.hpp>
|
2019-03-29 17:18:25 +02:00
|
|
|
#include <queue>
|
2019-05-28 21:34:02 +03:00
|
|
|
#include <iomanip>
|
2018-11-29 20:18:36 +02:00
|
|
|
|
|
|
|
using namespace nix;
|
2019-05-29 16:31:07 +03:00
|
|
|
using namespace nix::flake;
|
2022-11-16 17:49:49 +02:00
|
|
|
using json = nlohmann::json;
|
2018-11-29 20:18:36 +02:00
|
|
|
|
2023-08-12 21:51:19 +03:00
|
|
|
struct CmdFlakeUpdate;
|
2020-06-08 17:20:00 +03:00
|
|
|
class FlakeCommand : virtual Args, public MixFlakeOptions
|
2019-05-16 23:48:16 +03:00
|
|
|
{
|
2023-08-12 21:51:19 +03:00
|
|
|
protected:
|
2019-10-08 17:30:04 +03:00
|
|
|
std::string flakeUrl = ".";
|
2019-05-16 23:48:16 +03:00
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
FlakeCommand()
|
|
|
|
{
|
2020-05-11 23:10:33 +03:00
|
|
|
expectArgs({
|
|
|
|
.label = "flake-url",
|
|
|
|
.optional = true,
|
|
|
|
.handler = {&flakeUrl},
|
Overhaul completions, redo #6693 (#8131)
As I complained in
https://github.com/NixOS/nix/pull/6784#issuecomment-1421777030 (a
comment on the wrong PR, sorry again!), #6693 introduced a second
completions mechanism to fix a bug. Having two completion mechanisms
isn't so nice.
As @thufschmitt also pointed out, it was a bummer to go from `FlakeRef`
to `std::string` when collecting flake refs. Now it is `FlakeRefs`
again.
The underlying issue that sought to work around was that completion of
arguments not at the end can still benefit from the information from
latter arguments.
To fix this better, we rip out that change and simply defer all
completion processing until after all the (regular, already-complete)
arguments have been passed.
In addition, I noticed the original completion logic used some global
variables. I do not like global variables, because even if they save
lines of code, they also obfuscate the architecture of the code.
I got rid of them moved them to a new `RootArgs` class, which now has
`parseCmdline` instead of `Args`. The idea is that we have many argument
parsers from subcommands and what-not, but only one root args that owns
the other per actual parsing invocation. The state that was global is
now part of the root args instead.
This did, admittedly, add a bunch of new code. And I do feel bad about
that. So I went and added a lot of API docs to try to at least make the
current state of things clear to the next person.
--
This is needed for RFC 134 (tracking issue #7868). It was very hard to
modularize `Installable` parsing when there were two completion
arguments. I wouldn't go as far as to say it is *easy* now, but at least
it is less hard (and the completions test finally passed).
Co-authored-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2023-10-23 16:03:11 +03:00
|
|
|
.completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) {
|
|
|
|
completeFlakeRef(completions, getStore(), prefix);
|
2020-05-11 23:10:33 +03:00
|
|
|
}}
|
|
|
|
});
|
2019-05-16 23:48:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
FlakeRef getFlakeRef()
|
|
|
|
{
|
2020-01-21 17:27:53 +02:00
|
|
|
return parseFlakeRef(flakeUrl, absPath(".")); //FIXME
|
2019-05-16 23:48:16 +03:00
|
|
|
}
|
|
|
|
|
2020-01-22 21:59:59 +02:00
|
|
|
LockedFlake lockFlake()
|
2019-05-22 14:46:07 +03:00
|
|
|
{
|
2020-01-29 22:01:34 +02:00
|
|
|
return flake::lockFlake(*getEvalState(), getFlakeRef(), lockFlags);
|
2019-05-16 23:48:16 +03:00
|
|
|
}
|
2020-06-08 17:20:00 +03:00
|
|
|
|
Overhaul completions, redo #6693 (#8131)
As I complained in
https://github.com/NixOS/nix/pull/6784#issuecomment-1421777030 (a
comment on the wrong PR, sorry again!), #6693 introduced a second
completions mechanism to fix a bug. Having two completion mechanisms
isn't so nice.
As @thufschmitt also pointed out, it was a bummer to go from `FlakeRef`
to `std::string` when collecting flake refs. Now it is `FlakeRefs`
again.
The underlying issue that sought to work around was that completion of
arguments not at the end can still benefit from the information from
latter arguments.
To fix this better, we rip out that change and simply defer all
completion processing until after all the (regular, already-complete)
arguments have been passed.
In addition, I noticed the original completion logic used some global
variables. I do not like global variables, because even if they save
lines of code, they also obfuscate the architecture of the code.
I got rid of them moved them to a new `RootArgs` class, which now has
`parseCmdline` instead of `Args`. The idea is that we have many argument
parsers from subcommands and what-not, but only one root args that owns
the other per actual parsing invocation. The state that was global is
now part of the root args instead.
This did, admittedly, add a bunch of new code. And I do feel bad about
that. So I went and added a lot of API docs to try to at least make the
current state of things clear to the next person.
--
This is needed for RFC 134 (tracking issue #7868). It was very hard to
modularize `Installable` parsing when there were two completion
arguments. I wouldn't go as far as to say it is *easy* now, but at least
it is less hard (and the completions test finally passed).
Co-authored-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2023-10-23 16:03:11 +03:00
|
|
|
std::vector<FlakeRef> getFlakeRefsForCompletion() override
|
2020-06-08 17:20:00 +03:00
|
|
|
{
|
Overhaul completions, redo #6693 (#8131)
As I complained in
https://github.com/NixOS/nix/pull/6784#issuecomment-1421777030 (a
comment on the wrong PR, sorry again!), #6693 introduced a second
completions mechanism to fix a bug. Having two completion mechanisms
isn't so nice.
As @thufschmitt also pointed out, it was a bummer to go from `FlakeRef`
to `std::string` when collecting flake refs. Now it is `FlakeRefs`
again.
The underlying issue that sought to work around was that completion of
arguments not at the end can still benefit from the information from
latter arguments.
To fix this better, we rip out that change and simply defer all
completion processing until after all the (regular, already-complete)
arguments have been passed.
In addition, I noticed the original completion logic used some global
variables. I do not like global variables, because even if they save
lines of code, they also obfuscate the architecture of the code.
I got rid of them moved them to a new `RootArgs` class, which now has
`parseCmdline` instead of `Args`. The idea is that we have many argument
parsers from subcommands and what-not, but only one root args that owns
the other per actual parsing invocation. The state that was global is
now part of the root args instead.
This did, admittedly, add a bunch of new code. And I do feel bad about
that. So I went and added a lot of API docs to try to at least make the
current state of things clear to the next person.
--
This is needed for RFC 134 (tracking issue #7868). It was very hard to
modularize `Installable` parsing when there were two completion
arguments. I wouldn't go as far as to say it is *easy* now, but at least
it is less hard (and the completions test finally passed).
Co-authored-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2023-10-23 16:03:11 +03:00
|
|
|
return {
|
|
|
|
// Like getFlakeRef but with expandTilde calld first
|
|
|
|
parseFlakeRef(expandTilde(flakeUrl), absPath("."))
|
|
|
|
};
|
2020-06-08 17:20:00 +03:00
|
|
|
}
|
2019-05-16 23:48:16 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct CmdFlakeUpdate : FlakeCommand
|
2019-02-21 07:53:01 +02:00
|
|
|
{
|
2023-08-12 21:51:19 +03:00
|
|
|
public:
|
|
|
|
|
2019-02-21 07:53:01 +02:00
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "update flake lock file";
|
|
|
|
}
|
|
|
|
|
2021-02-26 15:55:54 +02:00
|
|
|
CmdFlakeUpdate()
|
|
|
|
{
|
2023-08-12 21:51:19 +03:00
|
|
|
expectedArgs.clear();
|
|
|
|
addFlag({
|
|
|
|
.longName="flake",
|
|
|
|
.description="The flake to operate on. Default is the current directory.",
|
|
|
|
.labels={"flake-url"},
|
|
|
|
.handler={&flakeUrl},
|
|
|
|
.completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) {
|
|
|
|
completeFlakeRef(completions, getStore(), prefix);
|
|
|
|
}}
|
|
|
|
});
|
|
|
|
expectArgs({
|
|
|
|
.label="inputs",
|
|
|
|
.optional=true,
|
|
|
|
.handler={[&](std::string inputToUpdate){
|
2023-11-25 05:26:57 +02:00
|
|
|
InputPath inputPath;
|
|
|
|
try {
|
|
|
|
inputPath = flake::parseInputPath(inputToUpdate);
|
|
|
|
} catch (Error & e) {
|
|
|
|
warn("Invalid flake input '%s'. To update a specific flake, use 'nix flake update --flake %s' instead.", inputToUpdate, inputToUpdate);
|
|
|
|
throw e;
|
|
|
|
}
|
2023-08-12 21:51:19 +03:00
|
|
|
if (lockFlags.inputUpdates.contains(inputPath))
|
|
|
|
warn("Input '%s' was specified multiple times. You may have done this by accident.");
|
|
|
|
lockFlags.inputUpdates.insert(inputPath);
|
|
|
|
}},
|
|
|
|
.completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) {
|
|
|
|
completeFlakeInputPath(completions, getEvalState(), getFlakeRefsForCompletion(), prefix);
|
|
|
|
}}
|
|
|
|
});
|
|
|
|
|
2021-02-26 15:55:54 +02:00
|
|
|
/* Remove flags that don't make sense. */
|
|
|
|
removeFlag("no-update-lock-file");
|
2021-03-16 17:53:39 +02:00
|
|
|
removeFlag("no-write-lock-file");
|
2021-02-26 15:55:54 +02:00
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-update.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2019-02-21 07:53:01 +02:00
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
2021-02-26 15:55:54 +02:00
|
|
|
settings.tarballTtl = 0;
|
2023-08-12 21:51:19 +03:00
|
|
|
auto updateAll = lockFlags.inputUpdates.empty();
|
2021-02-26 15:55:54 +02:00
|
|
|
|
2023-08-12 21:51:19 +03:00
|
|
|
lockFlags.recreateLockFile = updateAll;
|
2021-03-16 17:53:39 +02:00
|
|
|
lockFlags.writeLockFile = true;
|
2021-07-01 17:54:22 +03:00
|
|
|
lockFlags.applyNixConfig = true;
|
2021-02-26 15:55:54 +02:00
|
|
|
|
|
|
|
lockFlake();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct CmdFlakeLock : FlakeCommand
|
|
|
|
{
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "create missing lock file entries";
|
|
|
|
}
|
|
|
|
|
2021-03-16 17:53:39 +02:00
|
|
|
CmdFlakeLock()
|
|
|
|
{
|
|
|
|
/* Remove flags that don't make sense. */
|
|
|
|
removeFlag("no-write-lock-file");
|
|
|
|
}
|
|
|
|
|
2021-02-26 15:55:54 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-lock.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
2020-02-01 13:26:05 +02:00
|
|
|
settings.tarballTtl = 0;
|
|
|
|
|
2021-03-16 17:53:39 +02:00
|
|
|
lockFlags.writeLockFile = true;
|
2021-07-01 17:54:22 +03:00
|
|
|
lockFlags.applyNixConfig = true;
|
2021-03-16 17:53:39 +02:00
|
|
|
|
2020-01-22 21:59:59 +02:00
|
|
|
lockFlake();
|
2019-02-21 07:53:01 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
static void enumerateOutputs(EvalState & state, Value & vFlake,
|
2022-03-04 20:31:59 +02:00
|
|
|
std::function<void(const std::string & name, Value & vProvide, const PosIdx pos)> callback)
|
nix flake info --json: List the "provides"
It also lists the contents of "checks" and "packages".
For example:
$ nix flake info --json | jq
{
"branch": "HEAD",
"description": "The purely functional package manager",
"epoch": 2019,
"id": "nix",
"lastModified": 1559161142,
"path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source",
"provides": {
"checks": {
"binaryTarball": {},
"nix-copy-closure": {},
"perlBindings": {},
"remoteBuilds": {},
"setuid": {}
},
"defaultPackage": {},
"devShell": {},
"hydraJobs": {},
"packages": {
"nix": {},
"nix-perl-bindings": {}
}
},
"revCount": 6955,
"revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98",
"uri": "/home/eelco/Dev/nix"
}
Fixes #2820.
2019-05-29 23:17:08 +03:00
|
|
|
{
|
2022-01-21 17:43:16 +02:00
|
|
|
auto pos = vFlake.determinePos(noPos);
|
2023-01-19 14:23:04 +02:00
|
|
|
state.forceAttrs(vFlake, pos, "while evaluating a flake to get its outputs");
|
nix flake info --json: List the "provides"
It also lists the contents of "checks" and "packages".
For example:
$ nix flake info --json | jq
{
"branch": "HEAD",
"description": "The purely functional package manager",
"epoch": 2019,
"id": "nix",
"lastModified": 1559161142,
"path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source",
"provides": {
"checks": {
"binaryTarball": {},
"nix-copy-closure": {},
"perlBindings": {},
"remoteBuilds": {},
"setuid": {}
},
"defaultPackage": {},
"devShell": {},
"hydraJobs": {},
"packages": {
"nix": {},
"nix-perl-bindings": {}
}
},
"revCount": 6955,
"revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98",
"uri": "/home/eelco/Dev/nix"
}
Fixes #2820.
2019-05-29 23:17:08 +03:00
|
|
|
|
2019-09-22 22:53:01 +03:00
|
|
|
auto aOutputs = vFlake.attrs->get(state.symbols.create("outputs"));
|
|
|
|
assert(aOutputs);
|
nix flake info --json: List the "provides"
It also lists the contents of "checks" and "packages".
For example:
$ nix flake info --json | jq
{
"branch": "HEAD",
"description": "The purely functional package manager",
"epoch": 2019,
"id": "nix",
"lastModified": 1559161142,
"path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source",
"provides": {
"checks": {
"binaryTarball": {},
"nix-copy-closure": {},
"perlBindings": {},
"remoteBuilds": {},
"setuid": {}
},
"defaultPackage": {},
"devShell": {},
"hydraJobs": {},
"packages": {
"nix": {},
"nix-perl-bindings": {}
}
},
"revCount": 6955,
"revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98",
"uri": "/home/eelco/Dev/nix"
}
Fixes #2820.
2019-05-29 23:17:08 +03:00
|
|
|
|
2023-01-19 14:23:04 +02:00
|
|
|
state.forceAttrs(*aOutputs->value, pos, "while evaluating the outputs of a flake");
|
nix flake info --json: List the "provides"
It also lists the contents of "checks" and "packages".
For example:
$ nix flake info --json | jq
{
"branch": "HEAD",
"description": "The purely functional package manager",
"epoch": 2019,
"id": "nix",
"lastModified": 1559161142,
"path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source",
"provides": {
"checks": {
"binaryTarball": {},
"nix-copy-closure": {},
"perlBindings": {},
"remoteBuilds": {},
"setuid": {}
},
"defaultPackage": {},
"devShell": {},
"hydraJobs": {},
"packages": {
"nix": {},
"nix-perl-bindings": {}
}
},
"revCount": 6955,
"revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98",
"uri": "/home/eelco/Dev/nix"
}
Fixes #2820.
2019-05-29 23:17:08 +03:00
|
|
|
|
2021-09-22 18:15:07 +03:00
|
|
|
auto sHydraJobs = state.symbols.create("hydraJobs");
|
|
|
|
|
|
|
|
/* Hack: ensure that hydraJobs is evaluated before anything
|
|
|
|
else. This way we can disable IFD for hydraJobs and then enable
|
|
|
|
it for other outputs. */
|
|
|
|
if (auto attr = aOutputs->value->attrs->get(sHydraJobs))
|
2022-03-05 15:40:24 +02:00
|
|
|
callback(state.symbols[attr->name], *attr->value, attr->pos);
|
2021-09-22 18:15:07 +03:00
|
|
|
|
|
|
|
for (auto & attr : *aOutputs->value->attrs) {
|
|
|
|
if (attr.name != sHydraJobs)
|
2022-03-05 15:40:24 +02:00
|
|
|
callback(state.symbols[attr.name], *attr.value, attr.pos);
|
2021-09-22 18:15:07 +03:00
|
|
|
}
|
nix flake info --json: List the "provides"
It also lists the contents of "checks" and "packages".
For example:
$ nix flake info --json | jq
{
"branch": "HEAD",
"description": "The purely functional package manager",
"epoch": 2019,
"id": "nix",
"lastModified": 1559161142,
"path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source",
"provides": {
"checks": {
"binaryTarball": {},
"nix-copy-closure": {},
"perlBindings": {},
"remoteBuilds": {},
"setuid": {}
},
"defaultPackage": {},
"devShell": {},
"hydraJobs": {},
"packages": {
"nix": {},
"nix-perl-bindings": {}
}
},
"revCount": 6955,
"revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98",
"uri": "/home/eelco/Dev/nix"
}
Fixes #2820.
2019-05-29 23:17:08 +03:00
|
|
|
}
|
|
|
|
|
2021-03-16 18:19:04 +02:00
|
|
|
struct CmdFlakeMetadata : FlakeCommand, MixJSON
|
2019-02-21 07:53:01 +02:00
|
|
|
{
|
|
|
|
std::string description() override
|
|
|
|
{
|
2021-03-16 18:19:04 +02:00
|
|
|
return "show flake metadata";
|
2019-02-21 07:53:01 +02:00
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
2021-03-16 18:19:04 +02:00
|
|
|
#include "flake-metadata.md"
|
2020-12-23 14:19:53 +02:00
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2019-02-21 07:53:01 +02:00
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
2021-03-16 18:19:04 +02:00
|
|
|
auto lockedFlake = lockFlake();
|
|
|
|
auto & flake = lockedFlake.flake;
|
nix flake info --json: List the "provides"
It also lists the contents of "checks" and "packages".
For example:
$ nix flake info --json | jq
{
"branch": "HEAD",
"description": "The purely functional package manager",
"epoch": 2019,
"id": "nix",
"lastModified": 1559161142,
"path": "/nix/store/2w2qla8735dbxah8gai8r1nsbf5x4f5d-source",
"provides": {
"checks": {
"binaryTarball": {},
"nix-copy-closure": {},
"perlBindings": {},
"remoteBuilds": {},
"setuid": {}
},
"defaultPackage": {},
"devShell": {},
"hydraJobs": {},
"packages": {
"nix": {},
"nix-perl-bindings": {}
}
},
"revCount": 6955,
"revision": "8cb24e04e8b6cc60e2504733afe78e0eadafcd98",
"uri": "/home/eelco/Dev/nix"
}
Fixes #2820.
2019-05-29 23:17:08 +03:00
|
|
|
|
2020-04-06 15:39:47 +03:00
|
|
|
if (json) {
|
2021-03-16 18:19:04 +02:00
|
|
|
nlohmann::json j;
|
|
|
|
if (flake.description)
|
|
|
|
j["description"] = *flake.description;
|
|
|
|
j["originalUrl"] = flake.originalRef.to_string();
|
|
|
|
j["original"] = fetchers::attrsToJSON(flake.originalRef.toAttrs());
|
|
|
|
j["resolvedUrl"] = flake.resolvedRef.to_string();
|
|
|
|
j["resolved"] = fetchers::attrsToJSON(flake.resolvedRef.toAttrs());
|
|
|
|
j["url"] = flake.lockedRef.to_string(); // FIXME: rename to lockedUrl
|
|
|
|
j["locked"] = fetchers::attrsToJSON(flake.lockedRef.toAttrs());
|
|
|
|
if (auto rev = flake.lockedRef.input.getRev())
|
2023-10-13 04:48:15 +03:00
|
|
|
j["revision"] = rev->to_string(HashFormat::Base16, false);
|
2021-10-14 15:44:45 +03:00
|
|
|
if (auto dirtyRev = fetchers::maybeGetStrAttr(flake.lockedRef.toAttrs(), "dirtyRev"))
|
|
|
|
j["dirtyRevision"] = *dirtyRev;
|
2021-03-16 18:19:04 +02:00
|
|
|
if (auto revCount = flake.lockedRef.input.getRevCount())
|
|
|
|
j["revCount"] = *revCount;
|
|
|
|
if (auto lastModified = flake.lockedRef.input.getLastModified())
|
|
|
|
j["lastModified"] = *lastModified;
|
2023-10-20 20:50:21 +03:00
|
|
|
j["path"] = store->printStorePath(flake.storePath);
|
2021-03-16 18:19:04 +02:00
|
|
|
j["locks"] = lockedFlake.lockFile.toJSON();
|
|
|
|
logger->cout("%s", j.dump());
|
|
|
|
} else {
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Resolved URL:" ANSI_NORMAL " %s",
|
|
|
|
flake.resolvedRef.to_string());
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Locked URL:" ANSI_NORMAL " %s",
|
|
|
|
flake.lockedRef.to_string());
|
|
|
|
if (flake.description)
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Description:" ANSI_NORMAL " %s",
|
|
|
|
*flake.description);
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Path:" ANSI_NORMAL " %s",
|
2023-10-20 20:50:21 +03:00
|
|
|
store->printStorePath(flake.storePath));
|
2021-03-16 18:19:04 +02:00
|
|
|
if (auto rev = flake.lockedRef.input.getRev())
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
|
2023-10-13 04:48:15 +03:00
|
|
|
rev->to_string(HashFormat::Base16, false));
|
2021-10-14 15:44:45 +03:00
|
|
|
if (auto dirtyRev = fetchers::maybeGetStrAttr(flake.lockedRef.toAttrs(), "dirtyRev"))
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Revision:" ANSI_NORMAL " %s",
|
|
|
|
*dirtyRev);
|
2021-03-16 18:19:04 +02:00
|
|
|
if (auto revCount = flake.lockedRef.input.getRevCount())
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Revisions:" ANSI_NORMAL " %s",
|
|
|
|
*revCount);
|
|
|
|
if (auto lastModified = flake.lockedRef.input.getLastModified())
|
|
|
|
logger->cout(
|
|
|
|
ANSI_BOLD "Last modified:" ANSI_NORMAL " %s",
|
|
|
|
std::put_time(std::localtime(&*lastModified), "%F %T"));
|
|
|
|
|
2022-08-16 15:58:08 +03:00
|
|
|
if (!lockedFlake.lockFile.root->inputs.empty())
|
|
|
|
logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
|
2020-01-31 13:54:52 +02:00
|
|
|
|
2022-12-07 13:58:58 +02:00
|
|
|
std::set<ref<Node>> visited;
|
2020-05-22 01:58:02 +03:00
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
std::function<void(const Node & node, const std::string & prefix)> recurse;
|
2020-01-31 13:54:52 +02:00
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
recurse = [&](const Node & node, const std::string & prefix)
|
2020-01-31 13:54:52 +02:00
|
|
|
{
|
2020-03-12 23:06:57 +02:00
|
|
|
for (const auto & [i, input] : enumerate(node.inputs)) {
|
|
|
|
bool last = i + 1 == node.inputs.size();
|
2020-05-22 01:58:02 +03:00
|
|
|
|
2020-06-11 15:40:21 +03:00
|
|
|
if (auto lockedNode = std::get_if<0>(&input.second)) {
|
2023-02-01 00:33:05 +02:00
|
|
|
std::string lastModifiedStr = "";
|
|
|
|
if (auto lastModified = (*lockedNode)->lockedRef.input.getLastModified())
|
|
|
|
lastModifiedStr = fmt(" (%s)", std::put_time(std::gmtime(&*lastModified), "%F %T"));
|
|
|
|
logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s%s",
|
2020-06-11 15:40:21 +03:00
|
|
|
prefix + (last ? treeLast : treeConn), input.first,
|
2023-02-01 00:33:05 +02:00
|
|
|
(*lockedNode)->lockedRef,
|
|
|
|
lastModifiedStr);
|
Remove TreeInfo
The attributes previously stored in TreeInfo (narHash, revCount,
lastModified) are now stored in Input. This makes it less arbitrary
what attributes are stored where.
As a result, the lock file format has changed. An entry like
"info": {
"lastModified": 1585405475,
"narHash": "sha256-bESW0n4KgPmZ0luxvwJ+UyATrC6iIltVCsGdLiphVeE="
},
"locked": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b88ff468e9850410070d4e0ccd68c7011f15b2be",
"type": "github"
},
is now stored as
"locked": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b88ff468e9850410070d4e0ccd68c7011f15b2be",
"type": "github",
"lastModified": 1585405475,
"narHash": "sha256-bESW0n4KgPmZ0luxvwJ+UyATrC6iIltVCsGdLiphVeE="
},
The 'Input' class is now a dumb set of attributes. All the fetcher
implementations subclass InputScheme, not Input. This simplifies the
API.
Also, fix substitution of flake inputs. This was broken since lazy
flake fetching started using fetchTree internally.
2020-05-30 01:44:11 +03:00
|
|
|
|
2020-06-11 15:40:21 +03:00
|
|
|
bool firstVisit = visited.insert(*lockedNode).second;
|
|
|
|
|
|
|
|
if (firstVisit) recurse(**lockedNode, prefix + (last ? treeNull : treeLine));
|
|
|
|
} else if (auto follows = std::get_if<1>(&input.second)) {
|
2020-09-25 18:30:04 +03:00
|
|
|
logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL " follows input '%s'",
|
2020-06-11 15:40:21 +03:00
|
|
|
prefix + (last ? treeLast : treeConn), input.first,
|
|
|
|
printInputPath(*follows));
|
|
|
|
}
|
2020-01-31 13:54:52 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-16 18:19:04 +02:00
|
|
|
visited.insert(lockedFlake.lockFile.root);
|
|
|
|
recurse(*lockedFlake.lockFile.root, "");
|
2020-01-31 13:54:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-19 18:21:37 +02:00
|
|
|
struct CmdFlakeInfo : CmdFlakeMetadata
|
|
|
|
{
|
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
|
|
|
warn("'nix flake info' is a deprecated alias for 'nix flake metadata'");
|
|
|
|
CmdFlakeMetadata::run(store);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-01-30 01:58:55 +02:00
|
|
|
struct CmdFlakeCheck : FlakeCommand
|
2019-05-29 18:25:41 +03:00
|
|
|
{
|
|
|
|
bool build = true;
|
2023-05-23 07:59:44 +03:00
|
|
|
bool checkAllSystems = false;
|
2019-05-29 18:25:41 +03:00
|
|
|
|
|
|
|
CmdFlakeCheck()
|
|
|
|
{
|
2020-05-05 19:59:33 +03:00
|
|
|
addFlag({
|
|
|
|
.longName = "no-build",
|
2021-01-13 15:18:04 +02:00
|
|
|
.description = "Do not build checks.",
|
2020-05-05 19:59:33 +03:00
|
|
|
.handler = {&build, false}
|
|
|
|
});
|
2023-05-23 07:59:44 +03:00
|
|
|
addFlag({
|
|
|
|
.longName = "all-systems",
|
|
|
|
.description = "Check the outputs for all systems.",
|
|
|
|
.handler = {&checkAllSystems, true}
|
|
|
|
});
|
2019-05-29 18:25:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "check whether the flake evaluates and run its tests";
|
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-check.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2019-05-29 18:25:41 +03:00
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
2021-09-22 18:15:07 +03:00
|
|
|
if (!build) {
|
|
|
|
settings.readOnlyMode = true;
|
|
|
|
evalSettings.enableImportFromDerivation.setDefault(false);
|
|
|
|
}
|
2019-05-29 22:00:44 +03:00
|
|
|
|
2019-05-29 18:25:41 +03:00
|
|
|
auto state = getEvalState();
|
2021-07-01 17:54:22 +03:00
|
|
|
|
|
|
|
lockFlags.applyNixConfig = true;
|
2020-01-22 21:59:59 +02:00
|
|
|
auto flake = lockFlake();
|
2023-05-23 07:59:44 +03:00
|
|
|
auto localSystem = std::string(settings.thisSystem.get());
|
2019-05-29 18:25:41 +03:00
|
|
|
|
2021-06-02 11:36:33 +03:00
|
|
|
bool hasErrors = false;
|
2021-06-02 12:24:31 +03:00
|
|
|
auto reportError = [&](const Error & e) {
|
2021-06-02 11:36:33 +03:00
|
|
|
try {
|
|
|
|
throw e;
|
|
|
|
} catch (Error & e) {
|
|
|
|
if (settings.keepGoing) {
|
|
|
|
ignoreException();
|
|
|
|
hasErrors = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-05-23 07:59:44 +03:00
|
|
|
std::set<std::string> omittedSystems;
|
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
// FIXME: rewrite to use EvalCache.
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto resolve = [&] (PosIdx p) {
|
|
|
|
return state->positions[p];
|
|
|
|
};
|
|
|
|
|
2022-04-22 22:45:39 +03:00
|
|
|
auto argHasName = [&] (Symbol arg, std::string_view expected) {
|
2022-03-05 15:40:24 +02:00
|
|
|
std::string_view name = state->symbols[arg];
|
|
|
|
return
|
|
|
|
name == expected
|
|
|
|
|| name == "_"
|
|
|
|
|| (hasPrefix(name, "_") && name.substr(1) == expected);
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkSystemName = [&](const std::string & system, const PosIdx pos) {
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
// FIXME: what's the format of "system"?
|
|
|
|
if (system.find('-') == std::string::npos)
|
2022-03-04 20:31:59 +02:00
|
|
|
reportError(Error("'%s' is not a valid system type, at %s", system, resolve(pos)));
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
};
|
|
|
|
|
2023-05-23 07:59:44 +03:00
|
|
|
auto checkSystemType = [&](const std::string & system, const PosIdx pos) {
|
|
|
|
if (!checkAllSystems && system != localSystem) {
|
|
|
|
omittedSystems.insert(system);
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkDerivation = [&](const std::string & attrPath, Value & v, const PosIdx pos) -> std::optional<StorePath> {
|
2019-05-29 21:57:08 +03:00
|
|
|
try {
|
|
|
|
auto drvInfo = getDerivation(*state, v, false);
|
|
|
|
if (!drvInfo)
|
|
|
|
throw Error("flake attribute '%s' is not a derivation", attrPath);
|
|
|
|
// FIXME: check meta attributes
|
2022-03-02 11:57:19 +02:00
|
|
|
return drvInfo->queryDrvPath();
|
2019-05-29 21:57:08 +03:00
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the derivation '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-05-29 21:57:08 +03:00
|
|
|
}
|
2021-06-02 11:36:33 +03:00
|
|
|
return std::nullopt;
|
2019-05-29 21:57:08 +03:00
|
|
|
};
|
|
|
|
|
2021-04-05 16:48:18 +03:00
|
|
|
std::vector<DerivedPath> drvPaths;
|
2019-05-29 18:25:41 +03:00
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkApp = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2019-06-17 18:59:57 +03:00
|
|
|
try {
|
2020-06-29 20:08:50 +03:00
|
|
|
#if 0
|
|
|
|
// FIXME
|
2019-06-17 18:59:57 +03:00
|
|
|
auto app = App(*state, v);
|
|
|
|
for (auto & i : app.context) {
|
2023-01-03 18:44:59 +02:00
|
|
|
auto [drvPathS, outputName] = NixStringContextElem::parse(i);
|
2020-06-22 12:31:07 +03:00
|
|
|
store->parseStorePath(drvPathS);
|
2019-06-17 18:59:57 +03:00
|
|
|
}
|
2020-06-29 20:08:50 +03:00
|
|
|
#endif
|
2019-06-17 18:59:57 +03:00
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the app definition '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-06-17 18:59:57 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkOverlay = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2019-09-10 15:52:22 +03:00
|
|
|
try {
|
2019-09-10 18:39:55 +03:00
|
|
|
state->forceValue(v, pos);
|
2023-06-27 15:58:29 +03:00
|
|
|
if (!v.isLambda()) {
|
|
|
|
throw Error("overlay is not a function, but %s instead", showType(v));
|
|
|
|
}
|
|
|
|
if (v.lambda.fun->hasFormals()
|
2021-11-04 15:52:35 +02:00
|
|
|
|| !argHasName(v.lambda.fun->arg, "final"))
|
2019-09-10 15:52:22 +03:00
|
|
|
throw Error("overlay does not take an argument named 'final'");
|
|
|
|
auto body = dynamic_cast<ExprLambda *>(v.lambda.fun->body);
|
2021-11-04 15:52:35 +02:00
|
|
|
if (!body
|
|
|
|
|| body->hasFormals()
|
|
|
|
|| !argHasName(body->arg, "prev"))
|
2019-09-10 15:52:22 +03:00
|
|
|
throw Error("overlay does not take an argument named 'prev'");
|
|
|
|
// FIXME: if we have a 'nixpkgs' input, use it to
|
|
|
|
// evaluate the overlay.
|
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the overlay '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-09-10 16:25:10 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkModule = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2019-09-10 16:25:10 +03:00
|
|
|
try {
|
2019-09-10 18:39:55 +03:00
|
|
|
state->forceValue(v, pos);
|
2019-09-10 16:25:10 +03:00
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the NixOS module '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-09-10 15:52:22 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
std::function<void(const std::string & attrPath, Value & v, const PosIdx pos)> checkHydraJobs;
|
2019-09-10 18:39:55 +03:00
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
checkHydraJobs = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2019-09-10 18:39:55 +03:00
|
|
|
try {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(v, pos, "");
|
2019-09-10 18:39:55 +03:00
|
|
|
|
|
|
|
if (state->isDerivation(v))
|
|
|
|
throw Error("jobset should not be a derivation at top-level");
|
|
|
|
|
|
|
|
for (auto & attr : *v.attrs) {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(*attr.value, attr.pos, "");
|
2022-03-05 15:40:24 +02:00
|
|
|
auto attrPath2 = concatStrings(attrPath, ".", state->symbols[attr.name]);
|
2021-09-22 18:15:07 +03:00
|
|
|
if (state->isDerivation(*attr.value)) {
|
|
|
|
Activity act(*logger, lvlChatty, actUnknown,
|
|
|
|
fmt("checking Hydra job '%s'", attrPath2));
|
2022-03-04 20:31:59 +02:00
|
|
|
checkDerivation(attrPath2, *attr.value, attr.pos);
|
2021-09-22 18:15:07 +03:00
|
|
|
} else
|
2022-03-04 20:31:59 +02:00
|
|
|
checkHydraJobs(attrPath2, *attr.value, attr.pos);
|
2019-09-10 18:39:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the Hydra jobset '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-09-10 18:39:55 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkNixOSConfiguration = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2019-09-19 21:15:42 +03:00
|
|
|
try {
|
|
|
|
Activity act(*logger, lvlChatty, actUnknown,
|
|
|
|
fmt("checking NixOS configuration '%s'", attrPath));
|
|
|
|
Bindings & bindings(*state->allocBindings(0));
|
2020-02-07 15:08:24 +02:00
|
|
|
auto vToplevel = findAlongAttrPath(*state, "config.system.build.toplevel", bindings, v).first;
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceValue(*vToplevel, pos);
|
2019-09-19 21:15:42 +03:00
|
|
|
if (!state->isDerivation(*vToplevel))
|
|
|
|
throw Error("attribute 'config.system.build.toplevel' is not a derivation");
|
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the NixOS configuration '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-09-19 21:15:42 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkTemplate = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2020-06-04 21:02:50 +03:00
|
|
|
try {
|
|
|
|
Activity act(*logger, lvlChatty, actUnknown,
|
|
|
|
fmt("checking template '%s'", attrPath));
|
|
|
|
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(v, pos, "");
|
2020-06-04 21:02:50 +03:00
|
|
|
|
|
|
|
if (auto attr = v.attrs->get(state->symbols.create("path"))) {
|
|
|
|
if (attr->name == state->symbols.create("path")) {
|
Use `std::set<StringContextElem>` not `PathSet` for string contexts
Motivation
`PathSet` is not correct because string contexts have other forms
(`Built` and `DrvDeep`) that are not rendered as plain store paths.
Instead of wrongly using `PathSet`, or "stringly typed" using
`StringSet`, use `std::std<StringContextElem>`.
-----
In support of this change, `NixStringContext` is now defined as
`std::std<StringContextElem>` not `std:vector<StringContextElem>`. The
old definition was just used by a `getContext` method which was only
used by the eval cache. It can be deleted altogether since the types are
now unified and the preexisting `copyContext` function already suffices.
Summarizing the previous paragraph:
Old:
- `value/context.hh`: `NixStringContext = std::vector<StringContextElem>`
- `value.hh`: `NixStringContext Value::getContext(...)`
- `value.hh`: `copyContext(...)`
New:
- `value/context.hh`: `NixStringContext = std::set<StringContextElem>`
- `value.hh`: `copyContext(...)`
----
The string representation of string context elements no longer contains
the store dir. The diff of `src/libexpr/tests/value/context.cc` should
make clear what the new representation is, so we recommend reviewing
that file first. This was done for two reasons:
Less API churn:
`Value::mkString` and friends did not take a `Store` before. But if
`NixStringContextElem::{parse, to_string}` *do* take a store (as they
did before), then we cannot have the `Value` functions use them (in
order to work with the fully-structured `NixStringContext`) without
adding that argument.
That would have been a lot of churn of threading the store, and this
diff is already large enough, so the easier and less invasive thing to
do was simply make the element `parse` and `to_string` functions not
take the `Store` reference, and the easiest way to do that was to simply
drop the store dir.
Space usage:
Dropping the `/nix/store/` (or similar) from the internal representation
will safe space in the heap of the Nix programming being interpreted. If
the heap contains many strings with non-trivial contexts, the saving
could add up to something significant.
----
The eval cache version is bumped.
The eval cache serialization uses `NixStringContextElem::{parse,
to_string}`, and since those functions are changed per the above, that
means the on-disk representation is also changed.
This is simply done by changing the name of the used for the eval cache
from `eval-cache-v4` to eval-cache-v5`.
----
To avoid some duplication `EvalCache::mkPathString` is added to abstract
over the simple case of turning a store path to a string with just that
string in the context.
Context
This PR picks up where #7543 left off. That one introduced the fully
structured `NixStringContextElem` data type, but kept `PathSet context`
as an awkward middle ground between internal `char[][]` interpreter heap
string contexts and `NixStringContext` fully parsed string contexts.
The infelicity of `PathSet context` was specifically called out during
Nix team group review, but it was agreeing that fixing it could be left
as future work. This is that future work.
A possible follow-up step would be to get rid of the `char[][]`
evaluator heap representation, too, but it is not yet clear how to do
that. To use `NixStringContextElem` there we would need to get the STL
containers to GC pointers in the GC build, and I am not sure how to do
that.
----
PR #7543 effectively is writing the inverse of a `mkPathString`,
`mkOutputString`, and one more such function for the `DrvDeep` case. I
would like that PR to have property tests ensuring it is actually the
inverse as expected.
This PR sets things up nicely so that reworking that PR to be in that
more elegant and better tested way is possible.
Co-authored-by: Théophane Hufschmitt <7226587+thufschmitt@users.noreply.github.com>
2023-01-29 03:31:10 +02:00
|
|
|
NixStringContext context;
|
2023-01-19 14:23:04 +02:00
|
|
|
auto path = state->coerceToPath(attr->pos, *attr->value, context, "");
|
2023-04-06 14:15:50 +03:00
|
|
|
if (!path.pathExists())
|
|
|
|
throw Error("template '%s' refers to a non-existent path '%s'", attrPath, path);
|
2020-06-04 21:02:50 +03:00
|
|
|
// TODO: recursively check the flake in 'path'.
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
throw Error("template '%s' lacks attribute 'path'", attrPath);
|
|
|
|
|
|
|
|
if (auto attr = v.attrs->get(state->symbols.create("description")))
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceStringNoCtx(*attr->value, attr->pos, "");
|
2020-06-04 21:02:50 +03:00
|
|
|
else
|
|
|
|
throw Error("template '%s' lacks attribute 'description'", attrPath);
|
|
|
|
|
|
|
|
for (auto & attr : *v.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
std::string_view name(state->symbols[attr.name]);
|
2022-04-05 12:03:43 +03:00
|
|
|
if (name != "path" && name != "description" && name != "welcomeText")
|
2020-06-04 21:02:50 +03:00
|
|
|
throw Error("template '%s' has unsupported attribute '%s'", attrPath, name);
|
|
|
|
}
|
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the template '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2020-06-04 21:02:50 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-03-04 20:31:59 +02:00
|
|
|
auto checkBundler = [&](const std::string & attrPath, Value & v, const PosIdx pos) {
|
2020-07-30 19:33:22 +03:00
|
|
|
try {
|
|
|
|
state->forceValue(v, pos);
|
2020-12-12 03:15:11 +02:00
|
|
|
if (!v.isLambda())
|
2020-07-30 23:03:57 +03:00
|
|
|
throw Error("bundler must be a function");
|
2022-01-21 18:43:11 +02:00
|
|
|
// TODO: check types of inputs/outputs?
|
2020-07-30 19:33:22 +03:00
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking the template '%s'", attrPath));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2020-07-30 19:33:22 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-05-29 18:25:41 +03:00
|
|
|
{
|
|
|
|
Activity act(*logger, lvlInfo, actUnknown, "evaluating flake");
|
|
|
|
|
|
|
|
auto vFlake = state->allocValue();
|
|
|
|
flake::callFlake(*state, flake, *vFlake);
|
|
|
|
|
2019-05-30 00:09:23 +03:00
|
|
|
enumerateOutputs(*state,
|
2019-05-29 18:25:41 +03:00
|
|
|
*vFlake,
|
2022-03-04 20:31:59 +02:00
|
|
|
[&](const std::string & name, Value & vOutput, const PosIdx pos) {
|
2019-05-29 18:25:41 +03:00
|
|
|
Activity act(*logger, lvlChatty, actUnknown,
|
|
|
|
fmt("checking flake output '%s'", name));
|
|
|
|
|
|
|
|
try {
|
2021-09-22 18:15:07 +03:00
|
|
|
evalSettings.enableImportFromDerivation.setDefault(name != "hydraJobs");
|
|
|
|
|
2019-09-10 18:39:55 +03:00
|
|
|
state->forceValue(vOutput, pos);
|
2019-05-29 18:25:41 +03:00
|
|
|
|
2022-02-22 15:19:39 +02:00
|
|
|
std::string_view replacement =
|
|
|
|
name == "defaultPackage" ? "packages.<system>.default" :
|
2022-05-21 15:41:24 +03:00
|
|
|
name == "defaultApp" ? "apps.<system>.default" :
|
2022-02-22 15:19:39 +02:00
|
|
|
name == "defaultTemplate" ? "templates.default" :
|
|
|
|
name == "defaultBundler" ? "bundlers.<system>.default" :
|
|
|
|
name == "overlay" ? "overlays.default" :
|
|
|
|
name == "devShell" ? "devShells.<system>.default" :
|
2022-04-06 19:20:39 +03:00
|
|
|
name == "nixosModule" ? "nixosModules.default" :
|
2022-02-22 15:19:39 +02:00
|
|
|
"";
|
|
|
|
if (replacement != "")
|
|
|
|
warn("flake output attribute '%s' is deprecated; use '%s' instead", name, replacement);
|
|
|
|
|
2019-05-29 18:25:41 +03:00
|
|
|
if (name == "checks") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
state->forceAttrs(*attr.value, attr.pos, "");
|
|
|
|
for (auto & attr2 : *attr.value->attrs) {
|
|
|
|
auto drvPath = checkDerivation(
|
|
|
|
fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
|
|
|
|
*attr2.value, attr2.pos);
|
|
|
|
if (drvPath && attr_name == settings.thisSystem.get()) {
|
|
|
|
drvPaths.push_back(DerivedPath::Built {
|
Make the Derived Path family of types inductive for dynamic derivations
We want to be able to write down `foo.drv^bar.drv^baz`:
`foo.drv^bar.drv` is the dynamic derivation (since it is itself a
derivation output, `bar.drv` from `foo.drv`).
To that end, we create `Single{Derivation,BuiltPath}` types, that are
very similar except instead of having multiple outputs (in a set or
map), they have a single one. This is for everything to the left of the
rightmost `^`.
`NixStringContextElem` has an analogous change, and now can reuse
`SingleDerivedPath` at the top level. In fact, if we ever get rid of
`DrvDeep`, `NixStringContextElem` could be replaced with
`SingleDerivedPath` entirely!
Important note: some JSON formats have changed.
We already can *produce* dynamic derivations, but we can't refer to them
directly. Today, we can merely express building or example at the top
imperatively over time by building `foo.drv^bar.drv`, and then with a
second nix invocation doing `<result-from-first>^baz`, but this is not
declarative. The ethos of Nix of being able to write down the full plan
everything you want to do, and then execute than plan with a single
command, and for that we need the new inductive form of these types.
Co-authored-by: Robert Hensing <roberth@users.noreply.github.com>
Co-authored-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2023-01-16 00:39:04 +02:00
|
|
|
.drvPath = makeConstantStorePathRef(*drvPath),
|
|
|
|
.outputs = OutputsSpec::All { },
|
|
|
|
});
|
2023-05-23 07:59:44 +03:00
|
|
|
}
|
2023-01-11 23:32:30 +02:00
|
|
|
}
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
}
|
|
|
|
}
|
2019-05-29 21:57:08 +03:00
|
|
|
}
|
|
|
|
|
2022-03-11 15:57:28 +02:00
|
|
|
else if (name == "formatter") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2022-03-11 15:57:28 +02:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
checkApp(
|
|
|
|
fmt("%s.%s", name, attr_name),
|
|
|
|
*attr.value, attr.pos);
|
|
|
|
};
|
2022-03-11 15:57:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-13 18:23:29 +03:00
|
|
|
else if (name == "packages" || name == "devShells") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
state->forceAttrs(*attr.value, attr.pos, "");
|
|
|
|
for (auto & attr2 : *attr.value->attrs)
|
|
|
|
checkDerivation(
|
|
|
|
fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
|
|
|
|
*attr2.value, attr2.pos);
|
|
|
|
};
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
}
|
2019-05-29 18:25:41 +03:00
|
|
|
}
|
|
|
|
|
2019-06-17 18:59:57 +03:00
|
|
|
else if (name == "apps") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
state->forceAttrs(*attr.value, attr.pos, "");
|
|
|
|
for (auto & attr2 : *attr.value->attrs)
|
|
|
|
checkApp(
|
|
|
|
fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
|
|
|
|
*attr2.value, attr2.pos);
|
|
|
|
};
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
}
|
2019-06-17 18:59:57 +03:00
|
|
|
}
|
|
|
|
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
else if (name == "defaultPackage" || name == "devShell") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
checkDerivation(
|
|
|
|
fmt("%s.%s", name, attr_name),
|
|
|
|
*attr.value, attr.pos);
|
|
|
|
};
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
}
|
|
|
|
}
|
2019-05-29 21:57:08 +03:00
|
|
|
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
else if (name == "defaultApp") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos) ) {
|
|
|
|
checkApp(
|
|
|
|
fmt("%s.%s", name, attr_name),
|
|
|
|
*attr.value, attr.pos);
|
|
|
|
};
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
}
|
|
|
|
}
|
2019-06-17 18:59:57 +03:00
|
|
|
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
else if (name == "legacyPackages") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
checkSystemName(state->symbols[attr.name], attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
checkSystemType(state->symbols[attr.name], attr.pos);
|
Support non-x86_64-linux system types in flakes
A command like
$ nix run nixpkgs#hello
will now build the attribute 'packages.${system}.hello' rather than
'packages.hello'. Note that this does mean that the flake needs to
export an attribute for every system type it supports, and you can't
build on unsupported systems. So 'packages' typically looks like this:
packages = nixpkgs.lib.genAttrs ["x86_64-linux" "i686-linux"] (system: {
hello = ...;
});
The 'checks', 'defaultPackage', 'devShell', 'apps' and 'defaultApp'
outputs similarly are now attrsets that map system types to
derivations/apps. 'nix flake check' checks that the derivations for
all platforms evaluate correctly, but only builds the derivations in
'checks.${system}'.
Fixes #2861. (That issue also talks about access to ~/.config/nixpkgs
and --arg, but I think it's reasonable to say that flakes shouldn't
support those.)
The alternative to attribute selection is to pass the system type as
an argument to the flake's 'outputs' function, e.g. 'outputs = { self,
nixpkgs, system }: ...'. However, that approach would be at odds with
hermetic evaluation and make it impossible to enumerate the packages
provided by a flake.
2019-10-15 18:52:10 +03:00
|
|
|
// FIXME: do getDerivations?
|
|
|
|
}
|
|
|
|
}
|
2019-06-18 10:45:14 +03:00
|
|
|
|
2019-09-10 15:52:22 +03:00
|
|
|
else if (name == "overlay")
|
2019-09-10 16:25:10 +03:00
|
|
|
checkOverlay(name, vOutput, pos);
|
|
|
|
|
|
|
|
else if (name == "overlays") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2019-09-10 16:25:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs)
|
2022-03-05 15:40:24 +02:00
|
|
|
checkOverlay(fmt("%s.%s", name, state->symbols[attr.name]),
|
2022-03-04 20:31:59 +02:00
|
|
|
*attr.value, attr.pos);
|
2019-09-10 16:25:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
else if (name == "nixosModule")
|
|
|
|
checkModule(name, vOutput, pos);
|
|
|
|
|
|
|
|
else if (name == "nixosModules") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2019-09-10 16:25:10 +03:00
|
|
|
for (auto & attr : *vOutput.attrs)
|
2022-03-05 15:40:24 +02:00
|
|
|
checkModule(fmt("%s.%s", name, state->symbols[attr.name]),
|
2022-03-04 20:31:59 +02:00
|
|
|
*attr.value, attr.pos);
|
2019-09-10 16:25:10 +03:00
|
|
|
}
|
2019-09-10 15:52:22 +03:00
|
|
|
|
2019-09-19 21:15:42 +03:00
|
|
|
else if (name == "nixosConfigurations") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2019-09-19 21:15:42 +03:00
|
|
|
for (auto & attr : *vOutput.attrs)
|
2022-03-05 15:40:24 +02:00
|
|
|
checkNixOSConfiguration(fmt("%s.%s", name, state->symbols[attr.name]),
|
2022-03-04 20:31:59 +02:00
|
|
|
*attr.value, attr.pos);
|
2019-09-19 21:15:42 +03:00
|
|
|
}
|
|
|
|
|
2019-09-10 18:39:55 +03:00
|
|
|
else if (name == "hydraJobs")
|
|
|
|
checkHydraJobs(name, vOutput, pos);
|
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
else if (name == "defaultTemplate")
|
|
|
|
checkTemplate(name, vOutput, pos);
|
|
|
|
|
|
|
|
else if (name == "templates") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2020-06-04 21:02:50 +03:00
|
|
|
for (auto & attr : *vOutput.attrs)
|
2022-03-05 15:40:24 +02:00
|
|
|
checkTemplate(fmt("%s.%s", name, state->symbols[attr.name]),
|
2022-03-04 20:31:59 +02:00
|
|
|
*attr.value, attr.pos);
|
2020-06-04 21:02:50 +03:00
|
|
|
}
|
|
|
|
|
2022-01-21 18:43:11 +02:00
|
|
|
else if (name == "defaultBundler") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2022-01-21 18:43:11 +02:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
checkBundler(
|
|
|
|
fmt("%s.%s", name, attr_name),
|
|
|
|
*attr.value, attr.pos);
|
|
|
|
};
|
2022-01-21 18:43:11 +02:00
|
|
|
}
|
|
|
|
}
|
2020-07-30 19:33:22 +03:00
|
|
|
|
2020-07-30 23:03:57 +03:00
|
|
|
else if (name == "bundlers") {
|
2023-01-19 14:23:04 +02:00
|
|
|
state->forceAttrs(vOutput, pos, "");
|
2022-01-21 18:43:11 +02:00
|
|
|
for (auto & attr : *vOutput.attrs) {
|
2022-03-05 15:40:24 +02:00
|
|
|
const auto & attr_name = state->symbols[attr.name];
|
|
|
|
checkSystemName(attr_name, attr.pos);
|
2023-05-23 07:59:44 +03:00
|
|
|
if (checkSystemType(attr_name, attr.pos)) {
|
|
|
|
state->forceAttrs(*attr.value, attr.pos, "");
|
|
|
|
for (auto & attr2 : *attr.value->attrs) {
|
|
|
|
checkBundler(
|
|
|
|
fmt("%s.%s.%s", name, attr_name, state->symbols[attr2.name]),
|
|
|
|
*attr2.value, attr2.pos);
|
|
|
|
}
|
|
|
|
};
|
2022-01-21 18:43:11 +02:00
|
|
|
}
|
2020-07-30 19:33:22 +03:00
|
|
|
}
|
|
|
|
|
2023-01-16 21:16:45 +02:00
|
|
|
else if (
|
|
|
|
name == "lib"
|
|
|
|
|| name == "darwinConfigurations"
|
|
|
|
|| name == "darwinModules"
|
|
|
|
|| name == "flakeModule"
|
|
|
|
|| name == "flakeModules"
|
|
|
|
|| name == "herculesCI"
|
|
|
|
|| name == "homeConfigurations"
|
|
|
|
|| name == "nixopsConfigurations"
|
|
|
|
)
|
|
|
|
// Known but unchecked community attribute
|
|
|
|
;
|
|
|
|
|
2019-06-17 19:05:32 +03:00
|
|
|
else
|
|
|
|
warn("unknown flake output '%s'", name);
|
|
|
|
|
2019-05-29 18:25:41 +03:00
|
|
|
} catch (Error & e) {
|
2022-03-04 20:31:59 +02:00
|
|
|
e.addTrace(resolve(pos), hintfmt("while checking flake output '%s'", name));
|
2021-06-02 12:24:31 +03:00
|
|
|
reportError(e);
|
2019-05-29 18:25:41 +03:00
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-06-17 18:59:57 +03:00
|
|
|
if (build && !drvPaths.empty()) {
|
2019-05-29 18:25:41 +03:00
|
|
|
Activity act(*logger, lvlInfo, actUnknown, "running flake checks");
|
|
|
|
store->buildPaths(drvPaths);
|
|
|
|
}
|
2021-06-02 11:36:33 +03:00
|
|
|
if (hasErrors)
|
2021-09-22 18:15:07 +03:00
|
|
|
throw Error("some errors were encountered during the evaluation");
|
2023-05-23 07:59:44 +03:00
|
|
|
|
|
|
|
if (!omittedSystems.empty()) {
|
|
|
|
warn(
|
|
|
|
"The check omitted these incompatible systems: %s\n"
|
|
|
|
"Use '--all-systems' to check all.",
|
|
|
|
concatStringsSep(", ", omittedSystems)
|
|
|
|
);
|
|
|
|
};
|
|
|
|
};
|
2019-05-29 18:25:41 +03:00
|
|
|
};
|
|
|
|
|
2022-02-14 21:39:44 +02:00
|
|
|
static Strings defaultTemplateAttrPathsPrefixes{"templates."};
|
2022-02-11 19:11:08 +02:00
|
|
|
static Strings defaultTemplateAttrPaths = {"templates.default", "defaultTemplate"};
|
2022-02-14 21:39:44 +02:00
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
struct CmdFlakeInitCommon : virtual Args, EvalCommand
|
2019-04-09 00:36:12 +03:00
|
|
|
{
|
2020-06-04 21:02:50 +03:00
|
|
|
std::string templateUrl = "templates";
|
|
|
|
Path destDir;
|
|
|
|
|
2020-06-05 15:09:12 +03:00
|
|
|
const LockFlags lockFlags{ .writeLockFile = false };
|
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
CmdFlakeInitCommon()
|
2019-04-09 00:36:12 +03:00
|
|
|
{
|
2020-06-04 21:02:50 +03:00
|
|
|
addFlag({
|
|
|
|
.longName = "template",
|
|
|
|
.shortName = 't',
|
2021-01-13 15:18:04 +02:00
|
|
|
.description = "The template to use.",
|
2020-06-04 21:02:50 +03:00
|
|
|
.labels = {"template"},
|
|
|
|
.handler = {&templateUrl},
|
Overhaul completions, redo #6693 (#8131)
As I complained in
https://github.com/NixOS/nix/pull/6784#issuecomment-1421777030 (a
comment on the wrong PR, sorry again!), #6693 introduced a second
completions mechanism to fix a bug. Having two completion mechanisms
isn't so nice.
As @thufschmitt also pointed out, it was a bummer to go from `FlakeRef`
to `std::string` when collecting flake refs. Now it is `FlakeRefs`
again.
The underlying issue that sought to work around was that completion of
arguments not at the end can still benefit from the information from
latter arguments.
To fix this better, we rip out that change and simply defer all
completion processing until after all the (regular, already-complete)
arguments have been passed.
In addition, I noticed the original completion logic used some global
variables. I do not like global variables, because even if they save
lines of code, they also obfuscate the architecture of the code.
I got rid of them moved them to a new `RootArgs` class, which now has
`parseCmdline` instead of `Args`. The idea is that we have many argument
parsers from subcommands and what-not, but only one root args that owns
the other per actual parsing invocation. The state that was global is
now part of the root args instead.
This did, admittedly, add a bunch of new code. And I do feel bad about
that. So I went and added a lot of API docs to try to at least make the
current state of things clear to the next person.
--
This is needed for RFC 134 (tracking issue #7868). It was very hard to
modularize `Installable` parsing when there were two completion
arguments. I wouldn't go as far as to say it is *easy* now, but at least
it is less hard (and the completions test finally passed).
Co-authored-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2023-10-23 16:03:11 +03:00
|
|
|
.completer = {[&](AddCompletions & completions, size_t, std::string_view prefix) {
|
2020-06-05 15:09:12 +03:00
|
|
|
completeFlakeRefWithFragment(
|
Overhaul completions, redo #6693 (#8131)
As I complained in
https://github.com/NixOS/nix/pull/6784#issuecomment-1421777030 (a
comment on the wrong PR, sorry again!), #6693 introduced a second
completions mechanism to fix a bug. Having two completion mechanisms
isn't so nice.
As @thufschmitt also pointed out, it was a bummer to go from `FlakeRef`
to `std::string` when collecting flake refs. Now it is `FlakeRefs`
again.
The underlying issue that sought to work around was that completion of
arguments not at the end can still benefit from the information from
latter arguments.
To fix this better, we rip out that change and simply defer all
completion processing until after all the (regular, already-complete)
arguments have been passed.
In addition, I noticed the original completion logic used some global
variables. I do not like global variables, because even if they save
lines of code, they also obfuscate the architecture of the code.
I got rid of them moved them to a new `RootArgs` class, which now has
`parseCmdline` instead of `Args`. The idea is that we have many argument
parsers from subcommands and what-not, but only one root args that owns
the other per actual parsing invocation. The state that was global is
now part of the root args instead.
This did, admittedly, add a bunch of new code. And I do feel bad about
that. So I went and added a lot of API docs to try to at least make the
current state of things clear to the next person.
--
This is needed for RFC 134 (tracking issue #7868). It was very hard to
modularize `Installable` parsing when there were two completion
arguments. I wouldn't go as far as to say it is *easy* now, but at least
it is less hard (and the completions test finally passed).
Co-authored-by: Valentin Gagarin <valentin.gagarin@tweag.io>
2023-10-23 16:03:11 +03:00
|
|
|
completions,
|
2020-06-05 15:09:12 +03:00
|
|
|
getEvalState(),
|
|
|
|
lockFlags,
|
2022-02-14 21:39:44 +02:00
|
|
|
defaultTemplateAttrPathsPrefixes,
|
|
|
|
defaultTemplateAttrPaths,
|
2020-06-05 15:09:12 +03:00
|
|
|
prefix);
|
2020-06-04 21:02:50 +03:00
|
|
|
}}
|
|
|
|
});
|
2019-04-09 00:36:12 +03:00
|
|
|
}
|
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
|
|
|
auto flakeDir = absPath(destDir);
|
|
|
|
|
|
|
|
auto evalState = getEvalState();
|
|
|
|
|
|
|
|
auto [templateFlakeRef, templateName] = parseFlakeRefWithFragment(templateUrl, absPath("."));
|
|
|
|
|
2021-02-17 18:32:10 +02:00
|
|
|
auto installable = InstallableFlake(nullptr,
|
2023-08-16 19:29:23 +03:00
|
|
|
evalState, std::move(templateFlakeRef), templateName, ExtendedOutputsSpec::Default(),
|
2022-02-14 21:39:44 +02:00
|
|
|
defaultTemplateAttrPaths,
|
|
|
|
defaultTemplateAttrPathsPrefixes,
|
|
|
|
lockFlags);
|
2020-06-04 21:02:50 +03:00
|
|
|
|
2022-04-14 15:04:19 +03:00
|
|
|
auto cursor = installable.getCursor(*evalState);
|
2020-06-04 21:02:50 +03:00
|
|
|
|
2022-03-07 18:57:52 +02:00
|
|
|
auto templateDirAttr = cursor->getAttr("path");
|
|
|
|
auto templateDir = templateDirAttr->getString();
|
|
|
|
|
2022-03-07 21:07:43 +02:00
|
|
|
if (!store->isInStore(templateDir))
|
2022-03-07 18:57:52 +02:00
|
|
|
throw TypeError(
|
2022-03-07 21:07:43 +02:00
|
|
|
"'%s' was not found in the Nix store\n"
|
2022-03-07 18:57:52 +02:00
|
|
|
"If you've set '%s' to a string, try using a path instead.",
|
|
|
|
templateDir, templateDirAttr->getAttrPathStr());
|
2020-06-04 21:02:50 +03:00
|
|
|
|
2022-06-21 19:26:32 +03:00
|
|
|
std::vector<Path> changedFiles;
|
|
|
|
std::vector<Path> conflictedFiles;
|
2020-06-04 21:02:50 +03:00
|
|
|
|
|
|
|
std::function<void(const Path & from, const Path & to)> copyDir;
|
|
|
|
copyDir = [&](const Path & from, const Path & to)
|
|
|
|
{
|
|
|
|
createDirs(to);
|
|
|
|
|
|
|
|
for (auto & entry : readDirectory(from)) {
|
|
|
|
auto from2 = from + "/" + entry.name;
|
|
|
|
auto to2 = to + "/" + entry.name;
|
|
|
|
auto st = lstat(from2);
|
|
|
|
if (S_ISDIR(st.st_mode))
|
|
|
|
copyDir(from2, to2);
|
|
|
|
else if (S_ISREG(st.st_mode)) {
|
|
|
|
auto contents = readFile(from2);
|
|
|
|
if (pathExists(to2)) {
|
|
|
|
auto contents2 = readFile(to2);
|
2022-06-21 19:26:32 +03:00
|
|
|
if (contents != contents2) {
|
2022-06-26 20:00:34 +03:00
|
|
|
printError("refusing to overwrite existing file '%s'\n please merge it manually with '%s'", to2, from2);
|
2022-06-21 19:26:32 +03:00
|
|
|
conflictedFiles.push_back(to2);
|
|
|
|
} else {
|
|
|
|
notice("skipping identical file: %s", from2);
|
|
|
|
}
|
|
|
|
continue;
|
2020-06-04 21:02:50 +03:00
|
|
|
} else
|
|
|
|
writeFile(to2, contents);
|
|
|
|
}
|
|
|
|
else if (S_ISLNK(st.st_mode)) {
|
|
|
|
auto target = readLink(from2);
|
|
|
|
if (pathExists(to2)) {
|
2022-06-21 19:26:32 +03:00
|
|
|
if (readLink(to2) != target) {
|
2022-06-26 20:00:57 +03:00
|
|
|
printError("refusing to overwrite existing file '%s'\n please merge it manually with '%s'", to2, from2);
|
2022-06-21 19:26:32 +03:00
|
|
|
conflictedFiles.push_back(to2);
|
|
|
|
} else {
|
|
|
|
notice("skipping identical file: %s", from2);
|
|
|
|
}
|
2022-06-26 23:29:45 +03:00
|
|
|
continue;
|
2020-06-04 21:02:50 +03:00
|
|
|
} else
|
|
|
|
createSymlink(target, to2);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
throw Error("file '%s' has unsupported type", from2);
|
2022-06-21 19:26:32 +03:00
|
|
|
changedFiles.push_back(to2);
|
2022-02-15 18:50:14 +02:00
|
|
|
notice("wrote: %s", to2);
|
2020-06-04 21:02:50 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
copyDir(templateDir, flakeDir);
|
|
|
|
|
2022-06-26 23:29:45 +03:00
|
|
|
if (!changedFiles.empty() && pathExists(flakeDir + "/.git")) {
|
2020-06-23 17:25:32 +03:00
|
|
|
Strings args = { "-C", flakeDir, "add", "--intent-to-add", "--force", "--" };
|
2022-06-21 19:26:32 +03:00
|
|
|
for (auto & s : changedFiles) args.push_back(s);
|
2020-06-04 21:02:50 +03:00
|
|
|
runProgram("git", true, args);
|
|
|
|
}
|
2022-02-15 18:50:14 +02:00
|
|
|
auto welcomeText = cursor->maybeGetAttr("welcomeText");
|
|
|
|
if (welcomeText) {
|
2022-02-17 20:59:32 +02:00
|
|
|
notice("\n");
|
|
|
|
notice(renderMarkdownToTerminal(welcomeText->getString()));
|
2022-02-15 18:50:14 +02:00
|
|
|
}
|
2022-06-21 19:26:32 +03:00
|
|
|
|
2022-06-26 20:12:30 +03:00
|
|
|
if (!conflictedFiles.empty())
|
|
|
|
throw Error("Encountered %d conflicts - see above", conflictedFiles.size());
|
2020-06-04 21:02:50 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct CmdFlakeInit : CmdFlakeInitCommon
|
|
|
|
{
|
|
|
|
std::string description() override
|
2019-04-09 00:36:12 +03:00
|
|
|
{
|
2020-06-04 21:02:50 +03:00
|
|
|
return "create a flake in the current directory from a template";
|
|
|
|
}
|
2019-04-09 00:36:12 +03:00
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-init.md"
|
|
|
|
;
|
2020-06-04 21:02:50 +03:00
|
|
|
}
|
2019-04-09 00:36:12 +03:00
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
CmdFlakeInit()
|
|
|
|
{
|
|
|
|
destDir = ".";
|
|
|
|
}
|
|
|
|
};
|
2019-04-09 00:36:12 +03:00
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
struct CmdFlakeNew : CmdFlakeInitCommon
|
|
|
|
{
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "create a flake in the specified directory from a template";
|
|
|
|
}
|
2020-04-27 23:53:11 +03:00
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-new.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
CmdFlakeNew()
|
|
|
|
{
|
|
|
|
expectArgs({
|
|
|
|
.label = "dest-dir",
|
|
|
|
.handler = {&destDir},
|
|
|
|
.completer = completePath
|
|
|
|
});
|
2019-04-09 00:36:12 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-05-16 23:48:16 +03:00
|
|
|
struct CmdFlakeClone : FlakeCommand
|
2019-03-21 10:30:16 +02:00
|
|
|
{
|
2019-05-22 14:46:07 +03:00
|
|
|
Path destDir;
|
2019-03-21 10:30:16 +02:00
|
|
|
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "clone flake repository";
|
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-clone.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2019-03-21 10:30:16 +02:00
|
|
|
CmdFlakeClone()
|
|
|
|
{
|
2020-05-05 19:59:33 +03:00
|
|
|
addFlag({
|
|
|
|
.longName = "dest",
|
|
|
|
.shortName = 'f',
|
2021-01-13 15:18:04 +02:00
|
|
|
.description = "Clone the flake to path *dest*.",
|
2020-05-05 19:59:33 +03:00
|
|
|
.labels = {"path"},
|
|
|
|
.handler = {&destDir}
|
|
|
|
});
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
2020-01-21 17:27:53 +02:00
|
|
|
if (destDir.empty())
|
|
|
|
throw Error("missing flag '--dest'");
|
2019-03-21 10:30:16 +02:00
|
|
|
|
Remove TreeInfo
The attributes previously stored in TreeInfo (narHash, revCount,
lastModified) are now stored in Input. This makes it less arbitrary
what attributes are stored where.
As a result, the lock file format has changed. An entry like
"info": {
"lastModified": 1585405475,
"narHash": "sha256-bESW0n4KgPmZ0luxvwJ+UyATrC6iIltVCsGdLiphVeE="
},
"locked": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b88ff468e9850410070d4e0ccd68c7011f15b2be",
"type": "github"
},
is now stored as
"locked": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b88ff468e9850410070d4e0ccd68c7011f15b2be",
"type": "github",
"lastModified": 1585405475,
"narHash": "sha256-bESW0n4KgPmZ0luxvwJ+UyATrC6iIltVCsGdLiphVeE="
},
The 'Input' class is now a dumb set of attributes. All the fetcher
implementations subclass InputScheme, not Input. This simplifies the
API.
Also, fix substitution of flake inputs. This was broken since lazy
flake fetching started using fetchTree internally.
2020-05-30 01:44:11 +03:00
|
|
|
getFlakeRef().resolve(store).input.clone(destDir);
|
2019-03-21 10:30:16 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-01-30 01:58:55 +02:00
|
|
|
struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
|
|
|
|
{
|
|
|
|
std::string dstUri;
|
|
|
|
|
|
|
|
CmdFlakeArchive()
|
|
|
|
{
|
2020-05-05 19:59:33 +03:00
|
|
|
addFlag({
|
|
|
|
.longName = "to",
|
|
|
|
.description = "URI of the destination Nix store",
|
|
|
|
.labels = {"store-uri"},
|
|
|
|
.handler = {&dstUri}
|
|
|
|
});
|
2020-01-30 01:58:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "copy a flake and all its inputs to a store";
|
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-archive.md"
|
|
|
|
;
|
2020-01-30 01:58:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
|
|
|
auto flake = lockFlake();
|
|
|
|
|
|
|
|
StorePathSet sources;
|
|
|
|
|
2023-10-20 20:50:21 +03:00
|
|
|
sources.insert(flake.flake.storePath);
|
2020-01-30 01:58:55 +02:00
|
|
|
|
2020-03-12 23:06:57 +02:00
|
|
|
// FIXME: use graph output, handle cycles.
|
2022-11-16 17:49:49 +02:00
|
|
|
std::function<nlohmann::json(const Node & node)> traverse;
|
|
|
|
traverse = [&](const Node & node)
|
2020-01-30 01:58:55 +02:00
|
|
|
{
|
2022-11-16 17:49:49 +02:00
|
|
|
nlohmann::json jsonObj2 = json ? json::object() : nlohmann::json(nullptr);
|
2020-06-11 15:40:21 +03:00
|
|
|
for (auto & [inputName, input] : node.inputs) {
|
|
|
|
if (auto inputNode = std::get_if<0>(&input)) {
|
|
|
|
auto storePath =
|
|
|
|
dryRun
|
|
|
|
? (*inputNode)->lockedRef.input.computeStorePath(*store)
|
2023-10-20 20:50:21 +03:00
|
|
|
: (*inputNode)->lockedRef.input.fetch(store).first;
|
2022-11-16 17:49:49 +02:00
|
|
|
if (json) {
|
|
|
|
auto& jsonObj3 = jsonObj2[inputName];
|
|
|
|
jsonObj3["path"] = store->printStorePath(storePath);
|
|
|
|
sources.insert(std::move(storePath));
|
|
|
|
jsonObj3["inputs"] = traverse(**inputNode);
|
|
|
|
} else {
|
|
|
|
sources.insert(std::move(storePath));
|
|
|
|
traverse(**inputNode);
|
|
|
|
}
|
2020-06-11 15:40:21 +03:00
|
|
|
}
|
2020-01-30 01:58:55 +02:00
|
|
|
}
|
2022-11-16 17:49:49 +02:00
|
|
|
return jsonObj2;
|
2020-01-30 01:58:55 +02:00
|
|
|
};
|
|
|
|
|
2022-11-16 17:49:49 +02:00
|
|
|
if (json) {
|
|
|
|
nlohmann::json jsonRoot = {
|
2023-10-20 20:50:21 +03:00
|
|
|
{"path", store->printStorePath(flake.flake.storePath)},
|
2022-11-16 17:49:49 +02:00
|
|
|
{"inputs", traverse(*flake.lockFile.root)},
|
|
|
|
};
|
2023-03-02 16:02:24 +02:00
|
|
|
logger->cout("%s", jsonRoot);
|
2022-11-16 17:49:49 +02:00
|
|
|
} else {
|
|
|
|
traverse(*flake.lockFile.root);
|
|
|
|
}
|
2020-01-30 01:58:55 +02:00
|
|
|
|
|
|
|
if (!dryRun && !dstUri.empty()) {
|
|
|
|
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
|
2021-07-19 13:01:06 +03:00
|
|
|
copyPaths(*store, *dstStore, sources);
|
2020-01-30 01:58:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-08-18 06:04:48 +03:00
|
|
|
struct CmdFlakeShow : FlakeCommand, MixJSON
|
2020-04-16 16:36:15 +03:00
|
|
|
{
|
|
|
|
bool showLegacy = false;
|
2022-09-07 21:28:30 +03:00
|
|
|
bool showAllSystems = false;
|
2020-04-16 16:36:15 +03:00
|
|
|
|
|
|
|
CmdFlakeShow()
|
|
|
|
{
|
2020-05-05 19:59:33 +03:00
|
|
|
addFlag({
|
|
|
|
.longName = "legacy",
|
2021-01-13 15:18:04 +02:00
|
|
|
.description = "Show the contents of the `legacyPackages` output.",
|
2020-05-05 19:59:33 +03:00
|
|
|
.handler = {&showLegacy, true}
|
|
|
|
});
|
2022-09-01 23:02:38 +03:00
|
|
|
addFlag({
|
2022-09-07 21:28:30 +03:00
|
|
|
.longName = "all-systems",
|
|
|
|
.description = "Show the contents of outputs for all systems.",
|
|
|
|
.handler = {&showAllSystems, true}
|
2022-09-01 23:02:38 +03:00
|
|
|
});
|
2020-04-16 16:36:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "show the outputs provided by a flake";
|
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-show.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2020-04-16 16:36:15 +03:00
|
|
|
void run(nix::ref<nix::Store> store) override
|
|
|
|
{
|
2021-09-22 18:15:07 +03:00
|
|
|
evalSettings.enableImportFromDerivation.setDefault(false);
|
|
|
|
|
2020-04-16 16:36:15 +03:00
|
|
|
auto state = getEvalState();
|
2020-04-20 16:27:09 +03:00
|
|
|
auto flake = std::make_shared<LockedFlake>(lockFlake());
|
2022-09-01 23:02:38 +03:00
|
|
|
auto localSystem = std::string(settings.thisSystem.get());
|
2020-04-16 16:36:15 +03:00
|
|
|
|
2023-01-31 19:16:31 +02:00
|
|
|
std::function<bool(
|
|
|
|
eval_cache::AttrCursor & visitor,
|
|
|
|
const std::vector<Symbol> &attrPath,
|
|
|
|
const Symbol &attr)> hasContent;
|
|
|
|
|
|
|
|
// For frameworks it's important that structures are as lazy as possible
|
|
|
|
// to prevent infinite recursions, performance issues and errors that
|
|
|
|
// aren't related to the thing to evaluate. As a consequence, they have
|
|
|
|
// to emit more attributes than strictly (sic) necessary.
|
|
|
|
// However, these attributes with empty values are not useful to the user
|
|
|
|
// so we omit them.
|
|
|
|
hasContent = [&](
|
|
|
|
eval_cache::AttrCursor & visitor,
|
|
|
|
const std::vector<Symbol> &attrPath,
|
|
|
|
const Symbol &attr) -> bool
|
|
|
|
{
|
|
|
|
auto attrPath2(attrPath);
|
|
|
|
attrPath2.push_back(attr);
|
|
|
|
auto attrPathS = state->symbols.resolve(attrPath2);
|
|
|
|
const auto & attrName = state->symbols[attr];
|
|
|
|
|
|
|
|
auto visitor2 = visitor.getAttr(attrName);
|
|
|
|
|
2023-03-26 17:39:24 +03:00
|
|
|
try {
|
|
|
|
if ((attrPathS[0] == "apps"
|
|
|
|
|| attrPathS[0] == "checks"
|
|
|
|
|| attrPathS[0] == "devShells"
|
|
|
|
|| attrPathS[0] == "legacyPackages"
|
|
|
|
|| attrPathS[0] == "packages")
|
|
|
|
&& (attrPathS.size() == 1 || attrPathS.size() == 2)) {
|
|
|
|
for (const auto &subAttr : visitor2->getAttrs()) {
|
|
|
|
if (hasContent(*visitor2, attrPath2, subAttr)) {
|
|
|
|
return true;
|
|
|
|
}
|
2023-01-31 19:16:31 +02:00
|
|
|
}
|
2023-03-26 17:39:24 +03:00
|
|
|
return false;
|
2023-01-31 19:16:31 +02:00
|
|
|
}
|
|
|
|
|
2023-03-26 17:39:24 +03:00
|
|
|
if ((attrPathS.size() == 1)
|
|
|
|
&& (attrPathS[0] == "formatter"
|
|
|
|
|| attrPathS[0] == "nixosConfigurations"
|
|
|
|
|| attrPathS[0] == "nixosModules"
|
|
|
|
|| attrPathS[0] == "overlays"
|
|
|
|
)) {
|
|
|
|
for (const auto &subAttr : visitor2->getAttrs()) {
|
|
|
|
if (hasContent(*visitor2, attrPath2, subAttr)) {
|
|
|
|
return true;
|
|
|
|
}
|
2023-01-31 19:16:31 +02:00
|
|
|
}
|
2023-03-26 17:39:24 +03:00
|
|
|
return false;
|
2023-01-31 19:16:31 +02:00
|
|
|
}
|
|
|
|
|
2023-03-26 17:39:24 +03:00
|
|
|
// If we don't recognize it, it's probably content
|
|
|
|
return true;
|
|
|
|
} catch (EvalError & e) {
|
|
|
|
// Some attrs may contain errors, eg. legacyPackages of
|
|
|
|
// nixpkgs. We still want to recurse into it, instead of
|
|
|
|
// skipping it at all.
|
|
|
|
return true;
|
|
|
|
}
|
2023-01-31 19:16:31 +02:00
|
|
|
};
|
|
|
|
|
2021-09-14 18:18:29 +03:00
|
|
|
std::function<nlohmann::json(
|
|
|
|
eval_cache::AttrCursor & visitor,
|
|
|
|
const std::vector<Symbol> & attrPath,
|
|
|
|
const std::string & headerPrefix,
|
|
|
|
const std::string & nextPrefix)> visit;
|
|
|
|
|
|
|
|
visit = [&](
|
|
|
|
eval_cache::AttrCursor & visitor,
|
|
|
|
const std::vector<Symbol> & attrPath,
|
|
|
|
const std::string & headerPrefix,
|
|
|
|
const std::string & nextPrefix)
|
|
|
|
-> nlohmann::json
|
2021-08-18 06:04:48 +03:00
|
|
|
{
|
2021-09-14 18:18:29 +03:00
|
|
|
auto j = nlohmann::json::object();
|
2021-08-18 06:04:48 +03:00
|
|
|
|
2022-04-22 22:45:39 +03:00
|
|
|
auto attrPathS = state->symbols.resolve(attrPath);
|
|
|
|
|
2020-04-16 16:36:15 +03:00
|
|
|
Activity act(*logger, lvlInfo, actUnknown,
|
2022-04-22 22:45:39 +03:00
|
|
|
fmt("evaluating '%s'", concatStringsSep(".", attrPathS)));
|
|
|
|
|
2020-04-16 16:36:15 +03:00
|
|
|
try {
|
|
|
|
auto recurse = [&]()
|
|
|
|
{
|
2021-09-14 18:18:29 +03:00
|
|
|
if (!json)
|
2021-08-18 06:04:48 +03:00
|
|
|
logger->cout("%s", headerPrefix);
|
2023-01-31 19:16:31 +02:00
|
|
|
std::vector<Symbol> attrs;
|
|
|
|
for (const auto &attr : visitor.getAttrs()) {
|
|
|
|
if (hasContent(visitor, attrPath, attr))
|
|
|
|
attrs.push_back(attr);
|
|
|
|
}
|
|
|
|
|
2020-04-16 16:36:15 +03:00
|
|
|
for (const auto & [i, attr] : enumerate(attrs)) {
|
2022-04-22 22:45:39 +03:00
|
|
|
const auto & attrName = state->symbols[attr];
|
2020-04-16 16:36:15 +03:00
|
|
|
bool last = i + 1 == attrs.size();
|
2022-04-22 22:45:39 +03:00
|
|
|
auto visitor2 = visitor.getAttr(attrName);
|
2020-04-16 16:36:15 +03:00
|
|
|
auto attrPath2(attrPath);
|
|
|
|
attrPath2.push_back(attr);
|
2021-09-14 18:18:29 +03:00
|
|
|
auto j2 = visit(*visitor2, attrPath2,
|
2022-04-22 22:45:39 +03:00
|
|
|
fmt(ANSI_GREEN "%s%s" ANSI_NORMAL ANSI_BOLD "%s" ANSI_NORMAL, nextPrefix, last ? treeLast : treeConn, attrName),
|
2020-04-16 16:36:15 +03:00
|
|
|
nextPrefix + (last ? treeNull : treeLine));
|
2022-04-22 22:45:39 +03:00
|
|
|
if (json) j.emplace(attrName, std::move(j2));
|
2020-04-16 16:36:15 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto showDerivation = [&]()
|
|
|
|
{
|
2022-04-26 15:01:21 +03:00
|
|
|
auto name = visitor.getAttr(state->sName)->getString();
|
2021-09-14 18:18:29 +03:00
|
|
|
if (json) {
|
|
|
|
std::optional<std::string> description;
|
2022-04-20 17:39:47 +03:00
|
|
|
if (auto aMeta = visitor.maybeGetAttr(state->sMeta)) {
|
|
|
|
if (auto aDescription = aMeta->maybeGetAttr(state->sDescription))
|
2021-09-14 18:18:29 +03:00
|
|
|
description = aDescription->getString();
|
|
|
|
}
|
|
|
|
j.emplace("type", "derivation");
|
|
|
|
j.emplace("name", name);
|
|
|
|
if (description)
|
|
|
|
j.emplace("description", *description);
|
2021-08-18 06:04:48 +03:00
|
|
|
} else {
|
|
|
|
logger->cout("%s: %s '%s'",
|
|
|
|
headerPrefix,
|
2022-04-22 22:45:39 +03:00
|
|
|
attrPath.size() == 2 && attrPathS[0] == "devShell" ? "development environment" :
|
|
|
|
attrPath.size() >= 2 && attrPathS[0] == "devShells" ? "development environment" :
|
|
|
|
attrPath.size() == 3 && attrPathS[0] == "checks" ? "derivation" :
|
|
|
|
attrPath.size() >= 1 && attrPathS[0] == "hydraJobs" ? "derivation" :
|
2021-08-18 06:04:48 +03:00
|
|
|
"package",
|
|
|
|
name);
|
|
|
|
}
|
2020-04-16 16:36:15 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
if (attrPath.size() == 0
|
|
|
|
|| (attrPath.size() == 1 && (
|
2022-04-22 22:45:39 +03:00
|
|
|
attrPathS[0] == "defaultPackage"
|
|
|
|
|| attrPathS[0] == "devShell"
|
|
|
|
|| attrPathS[0] == "formatter"
|
|
|
|
|| attrPathS[0] == "nixosConfigurations"
|
|
|
|
|| attrPathS[0] == "nixosModules"
|
|
|
|
|| attrPathS[0] == "defaultApp"
|
|
|
|
|| attrPathS[0] == "templates"
|
|
|
|
|| attrPathS[0] == "overlays"))
|
2020-04-16 16:36:15 +03:00
|
|
|
|| ((attrPath.size() == 1 || attrPath.size() == 2)
|
2022-04-22 22:45:39 +03:00
|
|
|
&& (attrPathS[0] == "checks"
|
|
|
|
|| attrPathS[0] == "packages"
|
|
|
|
|| attrPathS[0] == "devShells"
|
|
|
|
|| attrPathS[0] == "apps"))
|
2020-04-16 16:36:15 +03:00
|
|
|
)
|
|
|
|
{
|
|
|
|
recurse();
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (
|
2022-04-22 22:45:39 +03:00
|
|
|
(attrPath.size() == 2 && (attrPathS[0] == "defaultPackage" || attrPathS[0] == "devShell" || attrPathS[0] == "formatter"))
|
|
|
|
|| (attrPath.size() == 3 && (attrPathS[0] == "checks" || attrPathS[0] == "packages" || attrPathS[0] == "devShells"))
|
2020-04-16 16:36:15 +03:00
|
|
|
)
|
|
|
|
{
|
2022-09-07 21:28:30 +03:00
|
|
|
if (!showAllSystems && std::string(attrPathS[1]) != localSystem) {
|
2022-09-01 23:02:38 +03:00
|
|
|
if (!json)
|
2022-09-07 21:28:30 +03:00
|
|
|
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", headerPrefix));
|
2022-09-01 23:02:38 +03:00
|
|
|
else {
|
2022-09-07 21:28:30 +03:00
|
|
|
logger->warn(fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS)));
|
2022-09-01 23:02:38 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (visitor.isDerivation())
|
|
|
|
showDerivation();
|
|
|
|
else
|
|
|
|
throw Error("expected a derivation");
|
|
|
|
}
|
2020-04-16 16:36:15 +03:00
|
|
|
}
|
|
|
|
|
2022-04-22 22:45:39 +03:00
|
|
|
else if (attrPath.size() > 0 && attrPathS[0] == "hydraJobs") {
|
2020-04-16 16:36:15 +03:00
|
|
|
if (visitor.isDerivation())
|
|
|
|
showDerivation();
|
|
|
|
else
|
|
|
|
recurse();
|
|
|
|
}
|
|
|
|
|
2022-04-22 22:45:39 +03:00
|
|
|
else if (attrPath.size() > 0 && attrPathS[0] == "legacyPackages") {
|
2020-04-16 16:36:15 +03:00
|
|
|
if (attrPath.size() == 1)
|
|
|
|
recurse();
|
2022-05-13 18:12:11 +03:00
|
|
|
else if (!showLegacy){
|
|
|
|
if (!json)
|
|
|
|
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--legacy' to show)", headerPrefix));
|
|
|
|
else {
|
|
|
|
logger->warn(fmt("%s omitted (use '--legacy' to show)", concatStringsSep(".", attrPathS)));
|
|
|
|
}
|
2023-01-28 00:59:48 +02:00
|
|
|
} else if (!showAllSystems && std::string(attrPathS[1]) != localSystem) {
|
|
|
|
if (!json)
|
|
|
|
logger->cout(fmt("%s " ANSI_WARNING "omitted" ANSI_NORMAL " (use '--all-systems' to show)", headerPrefix));
|
|
|
|
else {
|
|
|
|
logger->warn(fmt("%s omitted (use '--all-systems' to show)", concatStringsSep(".", attrPathS)));
|
|
|
|
}
|
2022-05-13 18:12:11 +03:00
|
|
|
} else {
|
2020-04-16 16:36:15 +03:00
|
|
|
if (visitor.isDerivation())
|
|
|
|
showDerivation();
|
|
|
|
else if (attrPath.size() <= 2)
|
|
|
|
// FIXME: handle recurseIntoAttrs
|
|
|
|
recurse();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-17 02:21:24 +03:00
|
|
|
else if (
|
2022-04-22 22:45:39 +03:00
|
|
|
(attrPath.size() == 2 && attrPathS[0] == "defaultApp") ||
|
|
|
|
(attrPath.size() == 3 && attrPathS[0] == "apps"))
|
2020-04-17 02:21:24 +03:00
|
|
|
{
|
|
|
|
auto aType = visitor.maybeGetAttr("type");
|
|
|
|
if (!aType || aType->getString() != "app")
|
|
|
|
throw EvalError("not an app definition");
|
2021-09-14 18:18:29 +03:00
|
|
|
if (json) {
|
|
|
|
j.emplace("type", "app");
|
2021-08-18 06:04:48 +03:00
|
|
|
} else {
|
|
|
|
logger->cout("%s: app", headerPrefix);
|
|
|
|
}
|
2020-04-17 02:21:24 +03:00
|
|
|
}
|
|
|
|
|
2020-06-04 21:02:50 +03:00
|
|
|
else if (
|
2022-04-22 22:45:39 +03:00
|
|
|
(attrPath.size() == 1 && attrPathS[0] == "defaultTemplate") ||
|
|
|
|
(attrPath.size() == 2 && attrPathS[0] == "templates"))
|
2020-06-04 21:02:50 +03:00
|
|
|
{
|
|
|
|
auto description = visitor.getAttr("description")->getString();
|
2021-09-14 18:18:29 +03:00
|
|
|
if (json) {
|
|
|
|
j.emplace("type", "template");
|
|
|
|
j.emplace("description", description);
|
2021-08-18 06:04:48 +03:00
|
|
|
} else {
|
|
|
|
logger->cout("%s: template: " ANSI_BOLD "%s" ANSI_NORMAL, headerPrefix, description);
|
|
|
|
}
|
2020-06-04 21:02:50 +03:00
|
|
|
}
|
|
|
|
|
2020-04-16 16:36:15 +03:00
|
|
|
else {
|
2021-09-14 18:18:29 +03:00
|
|
|
auto [type, description] =
|
2022-04-22 22:45:39 +03:00
|
|
|
(attrPath.size() == 1 && attrPathS[0] == "overlay")
|
|
|
|
|| (attrPath.size() == 2 && attrPathS[0] == "overlays") ? std::make_pair("nixpkgs-overlay", "Nixpkgs overlay") :
|
|
|
|
attrPath.size() == 2 && attrPathS[0] == "nixosConfigurations" ? std::make_pair("nixos-configuration", "NixOS configuration") :
|
|
|
|
(attrPath.size() == 1 && attrPathS[0] == "nixosModule")
|
|
|
|
|| (attrPath.size() == 2 && attrPathS[0] == "nixosModules") ? std::make_pair("nixos-module", "NixOS module") :
|
2021-09-14 18:18:29 +03:00
|
|
|
std::make_pair("unknown", "unknown");
|
|
|
|
if (json) {
|
|
|
|
j.emplace("type", type);
|
2021-08-18 06:04:48 +03:00
|
|
|
} else {
|
2021-09-14 17:58:35 +03:00
|
|
|
logger->cout("%s: " ANSI_WARNING "%s" ANSI_NORMAL, headerPrefix, description);
|
2021-08-18 06:04:48 +03:00
|
|
|
}
|
2020-04-16 16:36:15 +03:00
|
|
|
}
|
|
|
|
} catch (EvalError & e) {
|
2022-04-22 22:45:39 +03:00
|
|
|
if (!(attrPath.size() > 0 && attrPathS[0] == "legacyPackages"))
|
2020-04-20 00:07:06 +03:00
|
|
|
throw;
|
2020-04-16 16:36:15 +03:00
|
|
|
}
|
2021-09-14 18:18:29 +03:00
|
|
|
|
|
|
|
return j;
|
2020-04-16 16:36:15 +03:00
|
|
|
};
|
|
|
|
|
2020-08-07 15:13:24 +03:00
|
|
|
auto cache = openEvalCache(*state, flake);
|
2020-04-16 16:36:15 +03:00
|
|
|
|
2021-09-14 18:18:29 +03:00
|
|
|
auto j = visit(*cache->getRoot(), {}, fmt(ANSI_BOLD "%s" ANSI_NORMAL, flake->flake.lockedRef), "");
|
|
|
|
if (json)
|
2021-08-18 06:04:48 +03:00
|
|
|
logger->cout("%s", j.dump());
|
2020-04-16 16:36:15 +03:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-01-11 13:36:39 +02:00
|
|
|
struct CmdFlakePrefetch : FlakeCommand, MixJSON
|
|
|
|
{
|
|
|
|
CmdFlakePrefetch()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "download the source tree denoted by a flake reference into the Nix store";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake-prefetch.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
void run(ref<Store> store) override
|
|
|
|
{
|
|
|
|
auto originalRef = getFlakeRef();
|
|
|
|
auto resolvedRef = originalRef.resolve(store);
|
2023-10-20 20:50:21 +03:00
|
|
|
auto [storePath, lockedRef] = resolvedRef.fetchTree(store);
|
|
|
|
auto hash = store->queryPathInfo(storePath)->narHash;
|
2021-01-11 13:36:39 +02:00
|
|
|
|
|
|
|
if (json) {
|
|
|
|
auto res = nlohmann::json::object();
|
2023-10-20 20:50:21 +03:00
|
|
|
res["storePath"] = store->printStorePath(storePath);
|
2023-10-13 04:48:15 +03:00
|
|
|
res["hash"] = hash.to_string(HashFormat::SRI, true);
|
2023-09-18 11:57:18 +03:00
|
|
|
res["original"] = fetchers::attrsToJSON(resolvedRef.toAttrs());
|
|
|
|
res["locked"] = fetchers::attrsToJSON(lockedRef.toAttrs());
|
2021-01-11 13:36:39 +02:00
|
|
|
logger->cout(res.dump());
|
|
|
|
} else {
|
|
|
|
notice("Downloaded '%s' to '%s' (hash '%s').",
|
|
|
|
lockedRef.to_string(),
|
2023-10-20 20:50:21 +03:00
|
|
|
store->printStorePath(storePath),
|
2023-10-13 04:48:15 +03:00
|
|
|
hash.to_string(HashFormat::SRI, true));
|
2021-01-11 13:36:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-08-17 18:44:52 +03:00
|
|
|
struct CmdFlake : NixMultiCommand
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
|
|
|
CmdFlake()
|
2019-06-18 17:01:35 +03:00
|
|
|
: MultiCommand({
|
|
|
|
{"update", []() { return make_ref<CmdFlakeUpdate>(); }},
|
2021-02-26 15:55:54 +02:00
|
|
|
{"lock", []() { return make_ref<CmdFlakeLock>(); }},
|
2021-03-16 18:19:04 +02:00
|
|
|
{"metadata", []() { return make_ref<CmdFlakeMetadata>(); }},
|
2021-03-19 18:21:37 +02:00
|
|
|
{"info", []() { return make_ref<CmdFlakeInfo>(); }},
|
2019-06-18 17:01:35 +03:00
|
|
|
{"check", []() { return make_ref<CmdFlakeCheck>(); }},
|
|
|
|
{"init", []() { return make_ref<CmdFlakeInit>(); }},
|
2020-06-04 21:02:50 +03:00
|
|
|
{"new", []() { return make_ref<CmdFlakeNew>(); }},
|
2019-06-18 17:01:35 +03:00
|
|
|
{"clone", []() { return make_ref<CmdFlakeClone>(); }},
|
2020-01-30 01:58:55 +02:00
|
|
|
{"archive", []() { return make_ref<CmdFlakeArchive>(); }},
|
2020-04-16 16:36:15 +03:00
|
|
|
{"show", []() { return make_ref<CmdFlakeShow>(); }},
|
2021-01-11 13:36:39 +02:00
|
|
|
{"prefetch", []() { return make_ref<CmdFlakePrefetch>(); }},
|
2019-06-18 17:01:35 +03:00
|
|
|
})
|
2018-11-29 20:18:36 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string description() override
|
|
|
|
{
|
|
|
|
return "manage Nix flakes";
|
|
|
|
}
|
|
|
|
|
2020-12-23 14:19:53 +02:00
|
|
|
std::string doc() override
|
|
|
|
{
|
|
|
|
return
|
|
|
|
#include "flake.md"
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2018-11-29 20:18:36 +02:00
|
|
|
void run() override
|
|
|
|
{
|
|
|
|
if (!command)
|
|
|
|
throw UsageError("'nix flake' requires a sub-command.");
|
2023-03-17 16:33:48 +02:00
|
|
|
experimentalFeatureSettings.require(Xp::Flakes);
|
2020-05-05 19:59:33 +03:00
|
|
|
command->second->run();
|
2018-11-29 20:18:36 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-10-06 14:36:55 +03:00
|
|
|
static auto rCmdFlake = registerCommand<CmdFlake>("flake");
|