diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..d58577551
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,15 @@
+# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers.
+# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications.
+#
+# Merge permissions are required for maintaining an entry in this file.
+# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/
+
+# Default reviewers if nothing else matches
+* @edolstra @thufschmitt
+
+# This file
+.github/CODEOWNERS @edolstra
+
+# Public documentation
+/doc @fricklerhandwerk
+*.md @fricklerhandwerk
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
index e6d346bc1..984f9a9ea 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.md
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -30,3 +30,7 @@ A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here.
+
+**Priorities**
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
index 4fe86d5ec..42c658b52 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.md
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -18,3 +18,7 @@ A clear and concise description of any alternative solutions or features you've
**Additional context**
Add any other context or screenshots about the feature request here.
+
+**Priorities**
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/ISSUE_TEMPLATE/installer.md b/.github/ISSUE_TEMPLATE/installer.md
new file mode 100644
index 000000000..3768a49c9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/installer.md
@@ -0,0 +1,36 @@
+---
+name: Installer issue
+about: Report problems with installation
+title: ''
+labels: installer
+assignees: ''
+
+---
+
+## Platform
+
+
+
+- [ ] Linux:
+- [ ] macOS
+- [ ] WSL
+
+## Additional information
+
+
+
+## Output
+
+Output
+
+```log
+
+
+
+```
+
+
+
+## Priorities
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/ISSUE_TEMPLATE/missing_documentation.md b/.github/ISSUE_TEMPLATE/missing_documentation.md
index fbabd868e..942d7a971 100644
--- a/.github/ISSUE_TEMPLATE/missing_documentation.md
+++ b/.github/ISSUE_TEMPLATE/missing_documentation.md
@@ -26,3 +26,6 @@ assignees: ''
+## Priorities
+
+Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
index 537aa0909..5311be01f 100644
--- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
+++ b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md
@@ -5,3 +5,7 @@ Please include relevant [release notes](https://github.com/NixOS/nix/blob/master
**Testing**
If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master).
+
+**Priorities**
+
+Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc).
diff --git a/.github/assign-by-files.yml b/.github/assign-by-files.yml
deleted file mode 100644
index f13b71776..000000000
--- a/.github/assign-by-files.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# This files is used by https://github.com/marketplace/actions/auto-assign-reviewer-by-files
-# to assign maintainers
-"doc/**/*":
- - fricklerhandwerk
diff --git a/.github/workflows/assign-reviewer.yml b/.github/workflows/assign-reviewer.yml
deleted file mode 100644
index 4371cbff4..000000000
--- a/.github/workflows/assign-reviewer.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-name: "Auto Assign"
-on:
- - pull_request
-
-jobs:
- assign_reviewer:
- runs-on: ubuntu-latest
- steps:
- - uses: shufo/auto-assign-reviewer-by-files@v1.1.4
- with:
- config: ".github/assign-by-files.yml"
- token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.version b/.version
index 3ca2c9b2c..fb2c0766b 100644
--- a/.version
+++ b/.version
@@ -1 +1 @@
-2.12.0
\ No newline at end of file
+2.13.0
diff --git a/configure.ac b/configure.ac
index 64fa12fc7..c0e989d85 100644
--- a/configure.ac
+++ b/configure.ac
@@ -177,7 +177,7 @@ fi
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
-# Checks for libarchive
+# Look for libarchive.
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
# Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
if test "$shared" != yes; then
diff --git a/doc/manual/generate-options.nix b/doc/manual/generate-options.nix
index 680b709c8..a4ec36477 100644
--- a/doc/manual/generate-options.nix
+++ b/doc/manual/generate-options.nix
@@ -1,29 +1,41 @@
-with builtins;
-with import ./utils.nix;
+let
+ inherit (builtins) attrNames concatStringsSep isAttrs isBool;
+ inherit (import ./utils.nix) concatStrings squash splitLines;
+in
-options:
+optionsInfo:
+let
+ showOption = name:
+ let
+ inherit (optionsInfo.${name}) description documentDefault defaultValue aliases;
+ result = squash ''
+ - [`${name}`](#conf-${name})
-concatStrings (map
- (name:
- let option = options.${name}; in
- " - [`${name}`](#conf-${name})"
- + "
\n\n"
- + concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n"
- + (if option.documentDefault
- then " **Default:** " + (
- if option.defaultValue == "" || option.defaultValue == []
- then "*empty*"
- else if isBool option.defaultValue
- then (if option.defaultValue then "`true`" else "`false`")
- else
- # n.b. a StringMap value type is specified as a string, but
- # this shows the value type. The empty stringmap is "null" in
- # JSON, but that converts to "{ }" here.
- (if isAttrs option.defaultValue then "`\"\"`"
- else "`" + toString option.defaultValue + "`")) + "\n\n"
- else " **Default:** *machine-specific*\n")
- + (if option.aliases != []
- then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n"
- else "")
- )
- (attrNames options))
+ ${indent " " body}
+ '';
+ # separate body to cleanly handle indentation
+ body = ''
+ ${description}
+
+ **Default:** ${showDefault documentDefault defaultValue}
+
+ ${showAliases aliases}
+ '';
+ showDefault = documentDefault: defaultValue:
+ if documentDefault then
+ # a StringMap value type is specified as a string, but
+ # this shows the value type. The empty stringmap is `null` in
+ # JSON, but that converts to `{ }` here.
+ if defaultValue == "" || defaultValue == [] || isAttrs defaultValue
+ then "*empty*"
+ else if isBool defaultValue then
+ if defaultValue then "`true`" else "`false`"
+ else "`${toString defaultValue}`"
+ else "*machine-specific*";
+ showAliases = aliases:
+ if aliases == [] then "" else
+ "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}";
+ indent = prefix: s:
+ concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s));
+ in result;
+in concatStrings (map showOption (attrNames optionsInfo))
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index 486dbd7a2..c0f69e00f 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -29,19 +29,19 @@ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -
$(d)/%.1: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
@cat $^ >> $^.tmp
- $(trace-gen) lowdown -sT man -M section=1 $^.tmp -o $@
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@
@rm $^.tmp
$(d)/%.8: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
@cat $^ >> $^.tmp
- $(trace-gen) lowdown -sT man -M section=8 $^.tmp -o $@
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=8 $^.tmp -o $@
@rm $^.tmp
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
@cat $^ >> $^.tmp
- $(trace-gen) lowdown -sT man -M section=5 $^.tmp -o $@
+ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=5 $^.tmp -o $@
@rm $^.tmp
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js
index 2e77edd0f..69f75d3a0 100644
--- a/doc/manual/redirects.js
+++ b/doc/manual/redirects.js
@@ -35,7 +35,6 @@ const redirects = {
"conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs",
"conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size",
"conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time",
- "conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat",
"conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout",
"conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot",
"conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox",
@@ -47,7 +46,6 @@ const redirects = {
"conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout",
"conf-cores": "command-ref/conf-file.html#conf-cores",
"conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook",
- "conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism",
"conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations",
"conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches",
"conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms",
@@ -74,7 +72,6 @@ const redirects = {
"conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files",
"conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook",
"conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook",
- "conf-repeat": "command-ref/conf-file.html#conf-repeat",
"conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs",
"conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval",
"conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook",
diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in
index 908e7e3d9..6a514fa2c 100644
--- a/doc/manual/src/SUMMARY.md.in
+++ b/doc/manual/src/SUMMARY.md.in
@@ -65,6 +65,7 @@
- [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md)
- [Release X.Y (202?-??-??)](release-notes/rl-next.md)
+ - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md)
- [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md)
- [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md)
- [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md)
diff --git a/doc/manual/src/advanced-topics/diff-hook.md b/doc/manual/src/advanced-topics/diff-hook.md
index 161e64b2a..4a742c160 100644
--- a/doc/manual/src/advanced-topics/diff-hook.md
+++ b/doc/manual/src/advanced-topics/diff-hook.md
@@ -121,37 +121,3 @@ error:
are not valid, so checking is not possible
Run the build without `--check`, and then try with `--check` again.
-
-# Automatic and Optionally Enforced Determinism Verification
-
-Automatically verify every build at build time by executing the build
-multiple times.
-
-Setting `repeat` and `enforce-determinism` in your `nix.conf` permits
-the automated verification of every build Nix performs.
-
-The following configuration will run each build three times, and will
-require the build to be deterministic:
-
- enforce-determinism = true
- repeat = 2
-
-Setting `enforce-determinism` to false as in the following
-configuration will run the build multiple times, execute the build
-hook, but will allow the build to succeed even if it does not build
-reproducibly:
-
- enforce-determinism = false
- repeat = 1
-
-An example output of this configuration:
-
-```console
-$ nix-build ./test.nix -A unstable
-this derivation will be built:
- /nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv
-building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 1/2)...
-building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 2/2)...
-output '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable' of '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' differs from '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable.check' from previous round
-/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable
-```
diff --git a/doc/manual/src/advanced-topics/post-build-hook.md b/doc/manual/src/advanced-topics/post-build-hook.md
index fcb52d878..1479cc3a4 100644
--- a/doc/manual/src/advanced-topics/post-build-hook.md
+++ b/doc/manual/src/advanced-topics/post-build-hook.md
@@ -33,12 +33,17 @@ distribute the public key for verifying the authenticity of the paths.
example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
```
-Then, add the public key and the cache URL to your `nix.conf`'s
-`trusted-public-keys` and `substituters` options:
+Then update [`nix.conf`](../command-ref/conf-file.md) on any machine that will access the cache.
+Add the cache URL to [`substituters`](../command-ref/conf-file.md#conf-substituters) and the public key to [`trusted-public-keys`](../command-ref/conf-file.md#conf-trusted-public-keys):
substituters = https://cache.nixos.org/ s3://example-nix-cache
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
+Machines that build for the cache must sign derivations using the private key.
+On those machines, add the path to the key file to the [`secret-key-files`](../command-ref/conf-file.md#conf-secret-key-files) field in their [`nix.conf`](../command-ref/conf-file.md):
+
+ secret-key-files = /etc/nix/key.private
+
We will restart the Nix daemon in a later step.
# Implementing the build hook
@@ -52,14 +57,12 @@ set -eu
set -f # disable globbing
export IFS=' '
-echo "Signing paths" $OUT_PATHS
-nix store sign --key-file /etc/nix/key.private $OUT_PATHS
echo "Uploading paths" $OUT_PATHS
-exec nix copy --to 's3://example-nix-cache' $OUT_PATHS
+exec nix copy --to "s3://example-nix-cache" $OUT_PATHS
```
> **Note**
->
+>
> The `$OUT_PATHS` variable is a space-separated list of Nix store
> paths. In this case, we expect and want the shell to perform word
> splitting to make each output path its own argument to `nix
diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md
index 3f3eb6915..5845bdc43 100644
--- a/doc/manual/src/command-ref/env-common.md
+++ b/doc/manual/src/command-ref/env-common.md
@@ -7,42 +7,11 @@ Most Nix commands interpret the following environment variables:
`nix-shell`. It can have the values `pure` or `impure`.
- [`NIX_PATH`]{#env-NIX_PATH}\
- A colon-separated list of directories used to look up Nix
- expressions enclosed in angle brackets (i.e., ``). For
- instance, the value
-
- /home/eelco/Dev:/etc/nixos
-
- will cause Nix to look for paths relative to `/home/eelco/Dev` and
- `/etc/nixos`, in this order. It is also possible to match paths
- against a prefix. For example, the value
-
- nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos
-
- will cause Nix to search for `` in
- `/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
-
- If a path in the Nix search path starts with `http://` or
- `https://`, it is interpreted as the URL of a tarball that will be
- downloaded and unpacked to a temporary location. The tarball must
- consist of a single top-level directory. For example, setting
- `NIX_PATH` to
-
- nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
-
- tells Nix to download and use the current contents of the
- `master` branch in the `nixpkgs` repository.
-
- The URLs of the tarballs from the official nixos.org channels (see
- [the manual for `nix-channel`](nix-channel.md)) can be abbreviated
- as `channel:`. For instance, the following two
- values of `NIX_PATH` are equivalent:
-
- nixpkgs=channel:nixos-21.05
- nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
-
- The Nix search path can also be extended using the `-I` option to
- many Nix commands, which takes precedence over `NIX_PATH`.
+ A colon-separated list of directories used to look up the location of Nix
+ expressions using [paths](../language/values.md#type-path)
+ enclosed in angle brackets (i.e., ``),
+ e.g. `/home/eelco/Dev:/etc/nixos`. It can be extended using the
+ [`-I` option](./opt-common#opt-I).
- [`NIX_IGNORE_SYMLINK_STORE`]{#env-NIX_IGNORE_SYMLINK_STORE}\
Normally, the Nix store directory (typically `/nix/store`) is not
diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md
index 49c6f3f55..3a47feaae 100644
--- a/doc/manual/src/command-ref/nix-build.md
+++ b/doc/manual/src/command-ref/nix-build.md
@@ -53,16 +53,18 @@ All options not listed here are passed to `nix-store
--realise`, except for `--arg` and `--attr` / `-A` which are passed to
`nix-instantiate`.
- - [`--no-out-link`]{#opt-no-out-link}\
+ - [`--no-out-link`](#opt-no-out-link)
+
Do not create a symlink to the output path. Note that as a result
the output does not become a root of the garbage collector, and so
- might be deleted by `nix-store
- --gc`.
+ might be deleted by `nix-store --gc`.
+
+ - [`--dry-run`](#opt-dry-run)
- - [`--dry-run`]{#opt-dry-run}\
Show what store paths would be built or downloaded.
- - [`--out-link`]{#opt-out-link} / `-o` *outlink*\
+ - [`--out-link`](#opt-out-link) / `-o` *outlink*
+
Change the name of the symlink to the output path created from
`result` to *outlink*.
diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md
index 1251888e9..b712a7463 100644
--- a/doc/manual/src/command-ref/nix-store.md
+++ b/doc/manual/src/command-ref/nix-store.md
@@ -22,7 +22,8 @@ This section lists the options that are common to all operations. These
options are allowed for every subcommand, though they may not always
have an effect.
- - [`--add-root`]{#opt-add-root} *path*\
+ - [`--add-root`](#opt-add-root) *path*
+
Causes the result of a realisation (`--realise` and
`--force-realise`) to be registered as a root of the garbage
collector. *path* will be created as a symlink to the resulting
@@ -104,10 +105,6 @@ The following flags are available:
previous build, the new output path is left in
`/nix/store/name.check.`
- See also the `build-repeat` configuration option, which repeats a
- derivation a number of times and prevents its outputs from being
- registered as “valid” in the Nix store unless they are identical.
-
Special exit codes:
- `100`\
diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md
index 31faeadc2..a9378681d 100644
--- a/doc/manual/src/installation/installing-binary.md
+++ b/doc/manual/src/installation/installing-binary.md
@@ -88,6 +88,29 @@ extension. The installer will also create `/etc/profile.d/nix.sh`.
### Linux
+If you are on Linux with systemd:
+
+1. Remove the Nix daemon service:
+
+ ```console
+ sudo systemctl stop nix-daemon.service
+ sudo systemctl disable nix-daemon.socket nix-daemon.service
+ sudo systemctl daemon-reload
+ ```
+
+1. Remove systemd service files:
+
+ ```console
+ sudo rm /etc/systemd/system/nix-daemon.service /etc/systemd/system/nix-daemon.socket
+ ```
+
+1. The installer script uses systemd-tmpfiles to create the socket directory.
+ You may also want to remove the configuration for that:
+
+ ```console
+ sudo rm /etc/tmpfiles.d/nix-daemon.conf
+ ```
+
Remove files created by Nix:
```console
@@ -103,16 +126,6 @@ done
sudo groupdel 30000
```
-If you are on Linux with systemd, remove the Nix daemon service:
-
-```console
-sudo systemctl stop nix-daemon.socket
-sudo systemctl stop nix-daemon.service
-sudo systemctl disable nix-daemon.socket
-sudo systemctl disable nix-daemon.service
-sudo systemctl daemon-reload
-```
-
There may also be references to Nix in
- `/etc/profile`
diff --git a/doc/manual/src/release-notes/rl-2.12.md b/doc/manual/src/release-notes/rl-2.12.md
new file mode 100644
index 000000000..e2045d7bf
--- /dev/null
+++ b/doc/manual/src/release-notes/rl-2.12.md
@@ -0,0 +1,43 @@
+# Release 2.12 (2022-12-06)
+
+* On Linux, Nix can now run builds in a user namespace where they run
+ as root (UID 0) and have 65,536 UIDs available.
+
+ This is primarily useful for running containers such as `systemd-nspawn`
+ inside a Nix build. For an example, see [`tests/systemd-nspawn/nix`][nspawn].
+
+ [nspawn]: https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
+
+ A build can enable this by setting the derivation attribute:
+
+ ```
+ requiredSystemFeatures = [ "uid-range" ];
+ ```
+
+ The `uid-range` [system feature] requires the [`auto-allocate-uids`]
+ setting to be enabled.
+
+ [system feature]: ../command-ref/conf-file.md#conf-system-features
+
+* Nix can now automatically pick UIDs for builds, removing the need to
+ create `nixbld*` user accounts. See [`auto-allocate-uids`].
+
+ [`auto-allocate-uids`]: ../command-ref/conf-file.md#conf-auto-allocate-uids
+
+* On Linux, Nix has experimental support for running builds inside a
+ cgroup. See
+ [`use-cgroups`](../command-ref/conf-file.md#conf-use-cgroups).
+
+* `` now accepts an additional argument `impure` which
+ defaults to `false`. If it is set to `true`, the `hash` and `sha256`
+ arguments will be ignored and the resulting derivation will have
+ `__impure` set to `true`, making it an impure derivation.
+
+* If `builtins.readFile` is called on a file with context, then only
+ the parts of the context that appear in the content of the file are
+ retained. This avoids a lot of spurious errors where strings end up
+ having a context just because they are read from a store path
+ ([#7260](https://github.com/NixOS/nix/pull/7260)).
+
+* `nix build --json` now prints some statistics about top-level
+ derivations, such as CPU statistics when cgroups are enabled.
diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md
index 8b314b5f6..6c169bd09 100644
--- a/doc/manual/src/release-notes/rl-next.md
+++ b/doc/manual/src/release-notes/rl-next.md
@@ -1,54 +1,27 @@
# Release X.Y (202?-??-??)
-* `` now accepts an additional argument `impure` which
- defaults to `false`. If it is set to `true`, the `hash` and `sha256`
- arguments will be ignored and the resulting derivation will have
- `__impure` set to `true`, making it an impure derivation.
+* The `repeat` and `enforce-determinism` options have been removed
+ since they had been broken under many circumstances for a long time.
-* If `builtins.readFile` is called on a file with context, then only the parts
- of that context that appear in the content of the file are retained.
- This avoids a lot of spurious errors where some benign strings end-up having
- a context just because they are read from a store path
- ([#7260](https://github.com/NixOS/nix/pull/7260)).
+* You can now use [flake references] in the [old command line interface], e.g.
-* Nix can now automatically pick UIDs for builds, removing the need to
- create `nixbld*` user accounts. These UIDs are allocated starting at
- 872415232 (0x34000000) on Linux and 56930 on macOS.
-
- This is an experimental feature. To enable it, add the following to
- `nix.conf`:
+ [flake references]: ../command-ref/new-cli/nix3-flake.md#flake-references
+ [old command line interface]: ../command-ref/main-commands.md
```
- extra-experimental-features = auto-allocate-uids
- auto-allocate-uids = true
+ # nix-build flake:nixpkgs -A hello
+ # nix-build -I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05 \
+ '' -A hello
+ # NIX_PATH=nixpkgs=flake:nixpkgs nix-build '' -A hello
```
-* On Linux, Nix can now run builds in a user namespace where the build
- runs as root (UID 0) and has 65,536 UIDs available. This is
- primarily useful for running containers such as `systemd-nspawn`
- inside a Nix build. For an example, see
- https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
-
- A build can enable this by requiring the `uid-range` system feature,
- i.e. by setting the derivation attribute
-
+* Allow explicitly selecting outputs in a store derivation installable, just like we can do with other sorts of installables.
+ For example,
+ ```shell-session
+ $ nix-build /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev`
```
- requiredSystemFeatures = [ "uid-range" ];
+ now works just as
+ ```shell-session
+ $ nix-build glibc^dev`
```
-
- The `uid-range` system feature requires the `auto-allocate-uids`
- setting to be enabled (see above).
-
-* On Linux, Nix has experimental support for running builds inside a
- cgroup. It can be enabled by adding
-
- ```
- extra-experimental-features = cgroups
- use-cgroups = true
- ```
-
- to `nix.conf`. Cgroups are required for derivations that require the
- `uid-range` system feature.
-
-* `nix build --json` now prints some statistics about top-level
- derivations, such as CPU statistics when cgroups are enabled.
+ does already.
diff --git a/flake.nix b/flake.nix
index 2ca8cfadb..86f182d55 100644
--- a/flake.nix
+++ b/flake.nix
@@ -9,14 +9,14 @@
let
- version = builtins.readFile ./.version + versionSuffix;
+ officialRelease = false;
+
+ version = nixpkgs.lib.fileContents ./.version + versionSuffix;
versionSuffix =
if officialRelease
then ""
else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}_${self.shortRev or "dirty"}";
- officialRelease = false;
-
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
@@ -420,6 +420,8 @@
buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-super-${crossSystem}"));
+ buildNoGc = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.overrideAttrs (a: { configureFlags = (a.configureFlags or []) ++ ["--enable-gc=no"];}));
+
# Perl bindings for various platforms.
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);
diff --git a/maintainers/README.md b/maintainers/README.md
new file mode 100644
index 000000000..60768db0a
--- /dev/null
+++ b/maintainers/README.md
@@ -0,0 +1,79 @@
+# Nix maintainers team
+
+## Motivation
+
+The goal of the team is to help other people to contribute to Nix.
+
+## Members
+
+- Eelco Dolstra (@edolstra) – Team lead
+- Théophane Hufschmitt (@thufschmitt)
+- Valentin Gagarin (@fricklerhandwerk)
+- Thomas Bereknyei (@tomberek)
+- Robert Hensing (@roberth)
+
+## Meeting protocol
+
+The team meets twice a week:
+
+- Discussion meeting: [Fridays 13:00-14:00 CET](https://calendar.google.com/calendar/event?eid=MHNtOGVuNWtrZXNpZHR2bW1sM3QyN2ZjaGNfMjAyMjExMjVUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
+
+ 1. Triage issues and pull requests from the _No Status_ column (30 min)
+ 2. Discuss issues and pull requests from the _To discuss_ column (30 min)
+
+- Work meeting: [Mondays 13:00-15:00 CET](https://calendar.google.com/calendar/event?eid=NTM1MG1wNGJnOGpmOTZhYms3bTB1bnY5cWxfMjAyMjExMjFUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
+
+ 1. Code review on pull requests from _In review_.
+ 2. Other chores and tasks.
+
+Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.us/Cv7FpYx-Ri-4VjUykQOLAw), and published on Discourse under the [Nix category](https://discourse.nixos.org/c/dev/nix/50).
+
+## Project board protocol
+
+The team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19/views/1) for tracking its work.
+
+Issues on the board progress through the following states:
+
+- No Status
+
+ Team members can add pull requests or issues to discuss or review together.
+
+ During the discussion meeting, the team triages new items.
+ If there is disagreement on the general idea behind an issue or pull request, it is moved to _To discuss_, otherwise to _In review_.
+
+- To discuss
+
+ Pull requests and issues that are important and controversial are discussed by the team during discussion meetings.
+
+ This may be where the merit of the change itself or the implementation strategy is contested by a team member.
+
+- In review
+
+ Pull requests in this column are reviewed together during work meetings.
+ This is both for spreading implementation knowledge and for establishing common values in code reviews.
+
+ When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
+
+- Assigned for merging
+
+ One team member is assigned to each of these pull requests.
+ They will communicate with the authors, and make the final approval once all remaining issues are addressed.
+
+ If more substantive issues arise, the assignee can move the pull request back to _To discuss_ to involve the team again.
+
+The process is illustrated in the following diagram:
+
+```mermaid
+flowchart TD
+ discuss[To discuss]
+
+ review[To review]
+
+ New --> |Disagreement on idea| discuss
+ New & discuss --> |Consensus on idea| review
+
+ review --> |Consensus on implementation| Assigned
+
+ Assigned --> |Implementation issues arise| review
+ Assigned --> |Remaining issues fixed| Merged
+```
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index da1970f69..5fa489b20 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -28,7 +28,7 @@
SoftResourceLimitsNumberOfFiles
- 4096
+ 1048576
diff --git a/misc/systemd/nix-daemon.service.in b/misc/systemd/nix-daemon.service.in
index e3ac42beb..f46413630 100644
--- a/misc/systemd/nix-daemon.service.in
+++ b/misc/systemd/nix-daemon.service.in
@@ -9,7 +9,7 @@ ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
[Service]
ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
KillMode=process
-LimitNOFILE=4096
+LimitNOFILE=1048576
[Install]
WantedBy=multi-user.target
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index 96c0f302b..ec82e0560 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -97,13 +97,10 @@ is_os_darwin() {
}
contact_us() {
- echo "You can open an issue at https://github.com/nixos/nix/issues"
+ echo "You can open an issue at"
+ echo "https://github.com/NixOS/nix/issues/new?labels=installer&template=installer.md"
echo ""
- echo "Or feel free to contact the team:"
- echo " - Matrix: #nix:nixos.org"
- echo " - IRC: in #nixos on irc.libera.chat"
- echo " - twitter: @nixos_org"
- echo " - forum: https://discourse.nixos.org"
+ echo "Or get in touch with the community: https://nixos.org/community"
}
get_help() {
echo "We'd love to help if you need it."
diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc
index 140ed3b88..0e321e5e4 100644
--- a/src/libcmd/common-eval-args.cc
+++ b/src/libcmd/common-eval-args.cc
@@ -32,7 +32,77 @@ MixEvalArgs::MixEvalArgs()
addFlag({
.longName = "include",
.shortName = 'I',
- .description = "Add *path* to the list of locations used to look up `<...>` file names.",
+ .description = R"(
+ Add *path* to the Nix search path. The Nix search path is
+ initialized from the colon-separated [`NIX_PATH`](./env-common.md#env-NIX_PATH) environment
+ variable, and is used to look up the location of Nix expressions using [paths](../language/values.md#type-path) enclosed in angle
+ brackets (i.e., ``).
+
+ For instance, passing
+
+ ```
+ -I /home/eelco/Dev
+ -I /etc/nixos
+ ```
+
+ will cause Nix to look for paths relative to `/home/eelco/Dev` and
+ `/etc/nixos`, in that order. This is equivalent to setting the
+ `NIX_PATH` environment variable to
+
+ ```
+ /home/eelco/Dev:/etc/nixos
+ ```
+
+ It is also possible to match paths against a prefix. For example,
+ passing
+
+ ```
+ -I nixpkgs=/home/eelco/Dev/nixpkgs-branch
+ -I /etc/nixos
+ ```
+
+ will cause Nix to search for `` in
+ `/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
+
+ If a path in the Nix search path starts with `http://` or `https://`,
+ it is interpreted as the URL of a tarball that will be downloaded and
+ unpacked to a temporary location. The tarball must consist of a single
+ top-level directory. For example, passing
+
+ ```
+ -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
+ ```
+
+ tells Nix to download and use the current contents of the `master`
+ branch in the `nixpkgs` repository.
+
+ The URLs of the tarballs from the official `nixos.org` channels
+ (see [the manual page for `nix-channel`](../nix-channel.md)) can be
+ abbreviated as `channel:`. For instance, the
+ following two flags are equivalent:
+
+ ```
+ -I nixpkgs=channel:nixos-21.05
+ -I nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
+ ```
+
+ You can also fetch source trees using [flake URLs](./nix3-flake.md#url-like-syntax) and add them to the
+ search path. For instance,
+
+ ```
+ -I nixpkgs=flake:nixpkgs
+ ```
+
+ specifies that the prefix `nixpkgs` shall refer to the source tree
+ downloaded from the `nixpkgs` entry in the flake registry. Similarly,
+
+ ```
+ -I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05
+ ```
+
+ makes `` refer to a particular branch of the
+ `NixOS/nixpkgs` repository on GitHub.
+ )",
.category = category,
.labels = {"path"},
.handler = {[&](std::string s) { searchPath.push_back(s); }}
@@ -89,14 +159,25 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
Path lookupFileArg(EvalState & state, std::string_view s)
{
- if (isUri(s)) {
- return state.store->toRealPath(
- fetchers::downloadTarball(
- state.store, resolveUri(s), "source", false).first.storePath);
- } else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
+ if (EvalSettings::isPseudoUrl(s)) {
+ auto storePath = fetchers::downloadTarball(
+ state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
+ return state.store->toRealPath(storePath);
+ }
+
+ else if (hasPrefix(s, "flake:")) {
+ settings.requireExperimentalFeature(Xp::Flakes);
+ auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false);
+ auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath;
+ return state.store->toRealPath(storePath);
+ }
+
+ else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
Path p(s.substr(1, s.size() - 2));
return state.findFile(p);
- } else
+ }
+
+ else
return absPath(std::string(s));
}
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index ca656f3c5..8609a8c8b 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -475,44 +475,56 @@ static StorePath getDeriver(
struct InstallableStorePath : Installable
{
ref store;
- StorePath storePath;
+ DerivedPath req;
InstallableStorePath(ref store, StorePath && storePath)
- : store(store), storePath(std::move(storePath)) { }
+ : store(store),
+ req(storePath.isDerivation()
+ ? (DerivedPath) DerivedPath::Built {
+ .drvPath = std::move(storePath),
+ .outputs = {},
+ }
+ : (DerivedPath) DerivedPath::Opaque {
+ .path = std::move(storePath),
+ })
+ { }
- std::string what() const override { return store->printStorePath(storePath); }
+ InstallableStorePath(ref store, DerivedPath && req)
+ : store(store), req(std::move(req))
+ { }
+
+ std::string what() const override
+ {
+ return req.to_string(*store);
+ }
DerivedPaths toDerivedPaths() override
{
- if (storePath.isDerivation()) {
- auto drv = store->readDerivation(storePath);
- return {
- DerivedPath::Built {
- .drvPath = storePath,
- .outputs = drv.outputNames(),
- }
- };
- } else {
- return {
- DerivedPath::Opaque {
- .path = storePath,
- }
- };
- }
+ return { req };
}
StorePathSet toDrvPaths(ref store) override
{
- if (storePath.isDerivation()) {
- return {storePath};
- } else {
- return {getDeriver(store, *this, storePath)};
- }
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & bfd) -> StorePathSet {
+ return { bfd.drvPath };
+ },
+ [&](const DerivedPath::Opaque & bo) -> StorePathSet {
+ return { getDeriver(store, *this, bo.path) };
+ },
+ }, req.raw());
}
std::optional getStorePath() override
{
- return storePath;
+ return std::visit(overloaded {
+ [&](const DerivedPath::Built & bfd) {
+ return bfd.drvPath;
+ },
+ [&](const DerivedPath::Opaque & bo) {
+ return bo.path;
+ },
+ }, req.raw());
}
};
@@ -889,7 +901,8 @@ std::vector> SourceExprCommand::parseInstallables(
if (file == "-") {
auto e = state->parseStdin();
state->eval(e, *vFile);
- } else if (file)
+ }
+ else if (file)
state->evalFile(lookupFileArg(*state, *file), *vFile);
else {
auto e = state->parseExprFromString(*expr, absPath("."));
@@ -953,6 +966,22 @@ std::vector> SourceExprCommand::parseInstallables(
/*if (modifyInstallable) {
throw Error("cannot apply function: installable cannot be evaluated");
}*/
+ auto found = s.rfind('^');
+ if (found != std::string::npos) {
+ try {
+ result.push_back(std::make_shared(
+ store,
+ DerivedPath::Built::parse(*store, s.substr(0, found), s.substr(found + 1))));
+ continue;
+ } catch (BadStorePath &) {
+ } catch (...) {
+ if (!ex)
+ ex = std::current_exception();
+ }
+ }
+
+ found = s.find('/');
+ if (found != std::string::npos) {
try {
result.push_back(std::make_shared(store, store->followLinksToStorePath(s)));
continue;
diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc
index f18a07ff4..80f63323e 100644
--- a/src/libcmd/repl.cc
+++ b/src/libcmd/repl.cc
@@ -787,7 +787,7 @@ void NixRepl::loadFlake(const std::string & flakeRefS)
flake::LockFlags {
.updateLockFile = false,
.useRegistries = !evalSettings.pureEval,
- .allowMutable = !evalSettings.pureEval,
+ .allowUnlocked = !evalSettings.pureEval,
}),
v);
addAttrsToScope(v);
diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc
index b259eec63..3e2a8665e 100644
--- a/src/libexpr/eval-cache.cc
+++ b/src/libexpr/eval-cache.cc
@@ -645,17 +645,17 @@ NixInt AttrCursor::getInt()
cachedValue = root->db->getAttr(getKey());
if (cachedValue && !std::get_if(&cachedValue->second)) {
if (auto i = std::get_if(&cachedValue->second)) {
- debug("using cached Integer attribute '%s'", getAttrPathStr());
+ debug("using cached integer attribute '%s'", getAttrPathStr());
return i->x;
} else
- throw TypeError("'%s' is not an Integer", getAttrPathStr());
+ throw TypeError("'%s' is not an integer", getAttrPathStr());
}
}
auto & v = forceValue();
if (v.type() != nInt)
- throw TypeError("'%s' is not an Integer", getAttrPathStr());
+ throw TypeError("'%s' is not an integer", getAttrPathStr());
return v.integer;
}
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index c759acc10..aee0636b0 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -45,7 +45,7 @@ static char * allocString(size_t size)
#if HAVE_BOEHMGC
t = (char *) GC_MALLOC_ATOMIC(size);
#else
- t = malloc(size);
+ t = (char *) malloc(size);
#endif
if (!t) throw std::bad_alloc();
return t;
@@ -402,7 +402,8 @@ static Strings parseNixPath(const std::string & s)
}
if (*p == ':') {
- if (isUri(std::string(start2, s.end()))) {
+ auto prefix = std::string(start2, s.end());
+ if (EvalSettings::isPseudoUrl(prefix) || hasPrefix(prefix, "flake:")) {
++p;
while (p != s.end() && *p != ':') ++p;
}
@@ -470,9 +471,6 @@ EvalState::EvalState(
#if HAVE_BOEHMGC
, valueAllocCache(std::allocate_shared(traceable_allocator(), nullptr))
, env1AllocCache(std::allocate_shared(traceable_allocator(), nullptr))
-#else
- , valueAllocCache(std::make_shared(nullptr))
- , env1AllocCache(std::make_shared(nullptr))
#endif
, baseEnv(allocEnv(128))
, staticBaseEnv{std::make_shared(false, nullptr)}
@@ -1646,7 +1644,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
auto dts = debugRepl
? makeDebugTraceStacker(
*this, *lambda.body, env2, positions[lambda.pos],
- "while evaluating %s",
+ "while calling %s",
lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda")
@@ -1655,11 +1653,11 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
lambda.body->eval(*this, env2, vCur);
} catch (Error & e) {
if (loggerSettings.showTrace.get()) {
- addErrorTrace(e, lambda.pos, "while evaluating %s",
+ addErrorTrace(e, lambda.pos, "while calling %s",
(lambda.name
? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda"));
- addErrorTrace(e, pos, "from call site%s", "");
+ addErrorTrace(e, pos, "while evaluating call site%s", "");
}
throw;
}
@@ -1806,7 +1804,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
Nix attempted to evaluate a function as a top level expression; in
this case it must have its arguments supplied either by default
values, or passed explicitly with '--arg' or '--argstr'. See
-https://nixos.org/manual/nix/stable/expressions/language-constructs.html#functions.)", symbols[i.name],
+https://nixos.org/manual/nix/stable/language/constructs.html#functions.)", symbols[i.name],
*fun.lambda.env, *fun.lambda.fun);
}
}
@@ -2583,6 +2581,23 @@ Strings EvalSettings::getDefaultNixPath()
return res;
}
+bool EvalSettings::isPseudoUrl(std::string_view s)
+{
+ if (s.compare(0, 8, "channel:") == 0) return true;
+ size_t pos = s.find("://");
+ if (pos == std::string::npos) return false;
+ std::string scheme(s, 0, pos);
+ return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
+}
+
+std::string EvalSettings::resolvePseudoUrl(std::string_view url)
+{
+ if (hasPrefix(url, "channel:"))
+ return "https://nixos.org/channels/" + std::string(url.substr(8)) + "/nixexprs.tar.xz";
+ else
+ return std::string(url);
+}
+
EvalSettings evalSettings;
static GlobalConfig::Register rEvalSettings(&evalSettings);
diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh
index f07f15d43..cf307d820 100644
--- a/src/libexpr/eval.hh
+++ b/src/libexpr/eval.hh
@@ -590,6 +590,10 @@ struct EvalSettings : Config
static Strings getDefaultNixPath();
+ static bool isPseudoUrl(std::string_view s);
+
+ static std::string resolvePseudoUrl(std::string_view url);
+
Setting enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
"Whether builtin functions that allow executing native code should be enabled."};
diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc
index b3a77b860..f960dd1f2 100644
--- a/src/libexpr/flake/flake.cc
+++ b/src/libexpr/flake/flake.cc
@@ -143,7 +143,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
} catch (Error & e) {
e.addTrace(
state.positions[attr.pos],
- hintfmt("in flake attribute '%s'", state.symbols[attr.name]));
+ hintfmt("while evaluating flake attribute '%s'", state.symbols[attr.name]));
throw;
}
}
@@ -152,7 +152,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
try {
input.ref = FlakeRef::fromAttrs(attrs);
} catch (Error & e) {
- e.addTrace(state.positions[pos], hintfmt("in flake input"));
+ e.addTrace(state.positions[pos], hintfmt("while evaluating flake input"));
throw;
}
else {
@@ -353,7 +353,7 @@ LockedFlake lockFlake(
std::function node,
+ ref node,
const InputPath & inputPathPrefix,
std::shared_ptr oldNode,
const InputPath & lockRootPath,
@@ -362,9 +362,15 @@ LockedFlake lockFlake(
computeLocks;
computeLocks = [&](
+ /* The inputs of this node, either from flake.nix or
+ flake.lock. */
const FlakeInputs & flakeInputs,
- std::shared_ptr node,
+ /* The node whose locks are to be updated.*/
+ ref node,
+ /* The path to this node in the lock file graph. */
const InputPath & inputPathPrefix,
+ /* The old node, if any, from which locks can be
+ copied. */
std::shared_ptr oldNode,
const InputPath & lockRootPath,
const Path & parentPath,
@@ -452,7 +458,7 @@ LockedFlake lockFlake(
/* Copy the input from the old lock since its flakeref
didn't change and there is no override from a
higher level flake. */
- auto childNode = std::make_shared(
+ auto childNode = make_ref(
oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake);
node->inputs.insert_or_assign(id, childNode);
@@ -481,7 +487,7 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake,
});
} else if (auto follows = std::get_if<1>(&i.second)) {
- if (! trustLock) {
+ if (!trustLock) {
// It is possible that the flake has changed,
// so we must confirm all the follows that are in the lock file are also in the flake.
auto overridePath(inputPath);
@@ -521,8 +527,8 @@ LockedFlake lockFlake(
this input. */
debug("creating new input '%s'", inputPathS);
- if (!lockFlags.allowMutable && !input.ref->input.isLocked())
- throw Error("cannot update flake input '%s' in pure mode", inputPathS);
+ if (!lockFlags.allowUnlocked && !input.ref->input.isLocked())
+ throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS);
/* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the
@@ -544,7 +550,7 @@ LockedFlake lockFlake(
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath);
- auto childNode = std::make_shared(inputFlake.lockedRef, ref);
+ auto childNode = make_ref(inputFlake.lockedRef, ref);
node->inputs.insert_or_assign(id, childNode);
@@ -564,15 +570,19 @@ LockedFlake lockFlake(
oldLock
? std::dynamic_pointer_cast(oldLock)
: LockFile::read(
- inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root,
- oldLock ? lockRootPath : inputPath, localPath, false);
+ inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root.get_ptr(),
+ oldLock ? lockRootPath : inputPath,
+ localPath,
+ false);
}
else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache);
- node->inputs.insert_or_assign(id,
- std::make_shared(lockedRef, ref, false));
+
+ auto childNode = make_ref(lockedRef, ref, false);
+
+ node->inputs.insert_or_assign(id, childNode);
}
}
@@ -587,8 +597,13 @@ LockedFlake lockFlake(
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir, true);
computeLocks(
- flake.inputs, newLockFile.root, {},
- lockFlags.recreateLockFile ? nullptr : oldLockFile.root, {}, parentPath, false);
+ flake.inputs,
+ newLockFile.root,
+ {},
+ lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(),
+ {},
+ parentPath,
+ false);
for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first))
@@ -611,9 +626,9 @@ LockedFlake lockFlake(
if (lockFlags.writeLockFile) {
if (auto sourcePath = topRef.input.getSourcePath()) {
- if (!newLockFile.isImmutable()) {
+ if (auto unlockedInput = newLockFile.isUnlocked()) {
if (fetchSettings.warnDirty)
- warn("will not write lock file of flake '%s' because it has a mutable input", topRef);
+ warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
} else {
if (!lockFlags.updateLockFile)
throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);
@@ -737,7 +752,7 @@ static void prim_getFlake(EvalState & state, const PosIdx pos, Value * * args, V
.updateLockFile = false,
.writeLockFile = false,
.useRegistries = !evalSettings.pureEval && fetchSettings.useRegistries,
- .allowMutable = !evalSettings.pureEval,
+ .allowUnlocked = !evalSettings.pureEval,
}),
v);
}
diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh
index 524b18af1..10301d8aa 100644
--- a/src/libexpr/flake/flake.hh
+++ b/src/libexpr/flake/flake.hh
@@ -108,11 +108,11 @@ struct LockFlags
bool applyNixConfig = false;
- /* Whether mutable flake references (i.e. those without a Git
+ /* Whether unlocked flake references (i.e. those without a Git
revision or similar) without a corresponding lock are
- allowed. Mutable flake references with a lock are always
+ allowed. Unlocked flake references with a lock are always
allowed. */
- bool allowMutable = true;
+ bool allowUnlocked = true;
/* Whether to commit changes to flake.lock. */
bool commitLockFile = false;
diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh
index fe4f67193..a36d852a8 100644
--- a/src/libexpr/flake/flakeref.hh
+++ b/src/libexpr/flake/flakeref.hh
@@ -35,7 +35,7 @@ typedef std::string FlakeId;
struct FlakeRef
{
- /* fetcher-specific representation of the input, sufficient to
+ /* Fetcher-specific representation of the input, sufficient to
perform the fetch operation. */
fetchers::Input input;
diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc
index 629d2e669..a3ed90e1f 100644
--- a/src/libexpr/flake/lockfile.cc
+++ b/src/libexpr/flake/lockfile.cc
@@ -31,7 +31,7 @@ FlakeRef getFlakeRef(
}
LockedNode::LockedNode(const nlohmann::json & json)
- : lockedRef(getFlakeRef(json, "locked", "info"))
+ : lockedRef(getFlakeRef(json, "locked", "info")) // FIXME: remove "info"
, originalRef(getFlakeRef(json, "original", nullptr))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
{
@@ -49,15 +49,15 @@ std::shared_ptr LockFile::findInput(const InputPath & path)
{
auto pos = root;
- if (!pos) return {};
-
for (auto & elem : path) {
if (auto i = get(pos->inputs, elem)) {
if (auto node = std::get_if<0>(&*i))
pos = *node;
else if (auto follows = std::get_if<1>(&*i)) {
- pos = findInput(*follows);
- if (!pos) return {};
+ if (auto p = findInput(*follows))
+ pos = ref(p);
+ else
+ return {};
}
} else
return {};
@@ -72,7 +72,7 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
if (version < 5 || version > 7)
throw Error("lock file '%s' has unsupported version %d", path, version);
- std::unordered_map> nodeMap;
+ std::map> nodeMap;
std::function getInputs;
@@ -93,12 +93,12 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
auto jsonNode2 = nodes.find(inputKey);
if (jsonNode2 == nodes.end())
throw Error("lock file references missing node '%s'", inputKey);
- auto input = std::make_shared(*jsonNode2);
+ auto input = make_ref(*jsonNode2);
k = nodeMap.insert_or_assign(inputKey, input).first;
getInputs(*input, *jsonNode2);
}
- if (auto child = std::dynamic_pointer_cast(k->second))
- node.inputs.insert_or_assign(i.key(), child);
+ if (auto child = k->second.dynamic_pointer_cast())
+ node.inputs.insert_or_assign(i.key(), ref(child));
else
// FIXME: replace by follows node
throw Error("lock file contains cycle to root node");
@@ -122,9 +122,9 @@ nlohmann::json LockFile::toJSON() const
std::unordered_map, std::string> nodeKeys;
std::unordered_set keys;
- std::function node)> dumpNode;
+ std::function node)> dumpNode;
- dumpNode = [&](std::string key, std::shared_ptr node) -> std::string
+ dumpNode = [&](std::string key, ref node) -> std::string
{
auto k = nodeKeys.find(node);
if (k != nodeKeys.end())
@@ -159,10 +159,11 @@ nlohmann::json LockFile::toJSON() const
n["inputs"] = std::move(inputs);
}
- if (auto lockedNode = std::dynamic_pointer_cast(node)) {
+ if (auto lockedNode = node.dynamic_pointer_cast()) {
n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs());
n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs());
- if (!lockedNode->isFlake) n["flake"] = false;
+ if (!lockedNode->isFlake)
+ n["flake"] = false;
}
nodes[key] = std::move(n);
@@ -201,13 +202,13 @@ void LockFile::write(const Path & path) const
writeFile(path, fmt("%s\n", *this));
}
-bool LockFile::isImmutable() const
+std::optional LockFile::isUnlocked() const
{
- std::unordered_set> nodes;
+ std::set> nodes;
- std::function node)> visit;
+ std::function node)> visit;
- visit = [&](std::shared_ptr node)
+ visit = [&](ref node)
{
if (!nodes.insert(node).second) return;
for (auto & i : node->inputs)
@@ -219,11 +220,12 @@ bool LockFile::isImmutable() const
for (auto & i : nodes) {
if (i == root) continue;
- auto lockedNode = std::dynamic_pointer_cast(i);
- if (lockedNode && !lockedNode->lockedRef.input.isLocked()) return false;
+ auto node = i.dynamic_pointer_cast();
+ if (node && !node->lockedRef.input.isLocked())
+ return node->lockedRef;
}
- return true;
+ return {};
}
bool LockFile::operator ==(const LockFile & other) const
@@ -247,12 +249,12 @@ InputPath parseInputPath(std::string_view s)
std::map LockFile::getAllInputs() const
{
- std::unordered_set> done;
+ std::set> done;
std::map res;
- std::function node)> recurse;
+ std::function node)> recurse;
- recurse = [&](const InputPath & prefix, std::shared_ptr node)
+ recurse = [&](const InputPath & prefix, ref node)
{
if (!done.insert(node).second) return;
diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh
index 96f1edc76..02e9bdfbc 100644
--- a/src/libexpr/flake/lockfile.hh
+++ b/src/libexpr/flake/lockfile.hh
@@ -20,7 +20,7 @@ struct LockedNode;
type LockedNode. */
struct Node : std::enable_shared_from_this
{
- typedef std::variant, InputPath> Edge;
+ typedef std::variant, InputPath> Edge;
std::map inputs;
@@ -47,11 +47,13 @@ struct LockedNode : Node
struct LockFile
{
- std::shared_ptr root = std::make_shared();
+ ref root = make_ref();
LockFile() {};
LockFile(const nlohmann::json & json, const Path & path);
+ typedef std::map, std::string> KeyMap;
+
nlohmann::json toJSON() const;
std::string to_string() const;
@@ -60,7 +62,8 @@ struct LockFile
void write(const Path & path) const;
- bool isImmutable() const;
+ /* Check whether this lock file has any unlocked inputs. */
+ std::optional isUnlocked() const;
bool operator ==(const LockFile & other) const;
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
index 346741dd5..5ad5d1fd4 100644
--- a/src/libexpr/get-drvs.cc
+++ b/src/libexpr/get-drvs.cc
@@ -150,7 +150,7 @@ DrvInfo::Outputs DrvInfo::queryOutputs(bool withPaths, bool onlyOutputsToInstall
/* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */
const Value * outTI = queryMeta("outputsToInstall");
if (!outTI) return outputs;
- const auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
+ auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
/* ^ this shows during `nix-env -i` right under the bad derivation */
if (!outTI->isList()) throw errMsg;
Outputs result;
diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc
index 7c623a07d..2be560d76 100644
--- a/src/libexpr/nixexpr.cc
+++ b/src/libexpr/nixexpr.cc
@@ -289,7 +289,6 @@ std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath)
}
-
/* Computing levels/displacements for variables. */
void Expr::bindVars(EvalState & es, const std::shared_ptr & env)
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index b68d4620b..33f208639 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -645,6 +645,7 @@ formal
#include "filetransfer.hh"
#include "fetchers.hh"
#include "store-api.hh"
+#include "flake/flake.hh"
namespace nix {
@@ -807,17 +808,28 @@ std::pair EvalState::resolveSearchPathElem(const SearchPathEl
std::pair res;
- if (isUri(elem.second)) {
+ if (EvalSettings::isPseudoUrl(elem.second)) {
try {
- res = { true, store->toRealPath(fetchers::downloadTarball(
- store, resolveUri(elem.second), "source", false).first.storePath) };
+ auto storePath = fetchers::downloadTarball(
+ store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath;
+ res = { true, store->toRealPath(storePath) };
} catch (FileTransferError & e) {
logWarning({
.msg = hintfmt("Nix search path entry '%1%' cannot be downloaded, ignoring", elem.second)
});
res = { false, "" };
}
- } else {
+ }
+
+ else if (hasPrefix(elem.second, "flake:")) {
+ settings.requireExperimentalFeature(Xp::Flakes);
+ auto flakeRef = parseFlakeRef(elem.second.substr(6), {}, true, false);
+ debug("fetching flake search path element '%s''", elem.second);
+ auto storePath = flakeRef.resolve(store).fetchTree(store).first.storePath;
+ res = { true, store->toRealPath(storePath) };
+ }
+
+ else {
auto path = absPath(elem.second);
if (pathExists(path))
res = { true, path };
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index d6bbfc8fc..e920c0ef1 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -1461,10 +1461,10 @@ static RegisterPrimOp primop_storePath({
static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
/* We don’t check the path right now, because we don’t want to
- throw if the path isn’t allowed, but just return false (and we
- can’t just catch the exception here because we still want to
- throw if something in the evaluation of `*args[0]` tries to
- access an unauthorized path). */
+ throw if the path isn’t allowed, but just return false (and we
+ can’t just catch the exception here because we still want to
+ throw if something in the evaluation of `*args[0]` tries to
+ access an unauthorized path). */
auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false });
try {
@@ -2420,12 +2420,18 @@ static RegisterPrimOp primop_listToAttrs({
Construct a set from a list specifying the names and values of each
attribute. Each element of the list should be a set consisting of a
string-valued attribute `name` specifying the name of the attribute,
- and an attribute `value` specifying its value. Example:
+ and an attribute `value` specifying its value.
+
+ In case of duplicate occurrences of the same name, the first
+ takes precedence.
+
+ Example:
```nix
builtins.listToAttrs
[ { name = "foo"; value = 123; }
{ name = "bar"; value = 456; }
+ { name = "bar"; value = 420; }
]
```
diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc
index 84e7f5c02..680446787 100644
--- a/src/libexpr/primops/fetchTree.cc
+++ b/src/libexpr/primops/fetchTree.cc
@@ -220,8 +220,6 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
} else
url = state.forceStringNoCtx(*args[0], pos);
- url = resolveUri(*url);
-
state.checkURI(*url);
if (name == "")
diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh
index 6452143a1..f33cbdcfc 100644
--- a/src/libfetchers/fetch-settings.hh
+++ b/src/libfetchers/fetch-settings.hh
@@ -71,7 +71,12 @@ struct FetchSettings : public Config
"Whether to warn about dirty Git/Mercurial trees."};
Setting flakeRegistry{this, "https://channels.nixos.org/flake-registry.json", "flake-registry",
- "Path or URI of the global flake registry."};
+ R"(
+ Path or URI of the global flake registry.
+
+ When empty, disables the global flake registry.
+ )"};
+
Setting useRegistries{this, true, "use-registries",
"Whether to use flake registries to resolve flake references."};
diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc
index 6957d2da4..c767e72e5 100644
--- a/src/libfetchers/fetchers.cc
+++ b/src/libfetchers/fetchers.cc
@@ -266,7 +266,7 @@ std::optional Input::getLastModified() const
return {};
}
-ParsedURL InputScheme::toURL(const Input & input)
+ParsedURL InputScheme::toURL(const Input & input) const
{
throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs));
}
@@ -274,7 +274,7 @@ ParsedURL InputScheme::toURL(const Input & input)
Input InputScheme::applyOverrides(
const Input & input,
std::optional ref,
- std::optional rev)
+ std::optional rev) const
{
if (ref)
throw Error("don't know how to set branch/tag name of input '%s' to '%s'", input.to_string(), *ref);
@@ -293,7 +293,7 @@ void InputScheme::markChangedFile(const Input & input, std::string_view file, st
assert(false);
}
-void InputScheme::clone(const Input & input, const Path & destDir)
+void InputScheme::clone(const Input & input, const Path & destDir) const
{
throw Error("do not know how to clone input '%s'", input.to_string());
}
diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh
index bc9a76b0b..17da37f47 100644
--- a/src/libfetchers/fetchers.hh
+++ b/src/libfetchers/fetchers.hh
@@ -107,26 +107,25 @@ public:
* recognized. The Input object contains the information the fetcher
* needs to actually perform the "fetch()" when called.
*/
-
struct InputScheme
{
virtual ~InputScheme()
{ }
- virtual std::optional inputFromURL(const ParsedURL & url) = 0;
+ virtual std::optional inputFromURL(const ParsedURL & url) const = 0;
- virtual std::optional inputFromAttrs(const Attrs & attrs) = 0;
+ virtual std::optional inputFromAttrs(const Attrs & attrs) const = 0;
- virtual ParsedURL toURL(const Input & input);
+ virtual ParsedURL toURL(const Input & input) const;
- virtual bool hasAllInfo(const Input & input) = 0;
+ virtual bool hasAllInfo(const Input & input) const = 0;
virtual Input applyOverrides(
const Input & input,
std::optional ref,
- std::optional rev);
+ std::optional rev) const;
- virtual void clone(const Input & input, const Path & destDir);
+ virtual void clone(const Input & input, const Path & destDir) const;
virtual std::optional getSourcePath(const Input & input);
diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc
index d7df57cf8..a6b8f15eb 100644
--- a/src/libfetchers/git.cc
+++ b/src/libfetchers/git.cc
@@ -18,6 +18,7 @@
using namespace std::string_literals;
namespace nix::fetchers {
+
namespace {
// Explicit initial branch of our bare repo to suppress warnings from new version of git.
@@ -26,23 +27,23 @@ namespace {
// old version of git, which will ignore unrecognized `-c` options.
const std::string gitInitialBranch = "__nix_dummy_branch";
-bool isCacheFileWithinTtl(const time_t now, const struct stat & st)
+bool isCacheFileWithinTtl(time_t now, const struct stat & st)
{
return st.st_mtime + settings.tarballTtl > now;
}
-bool touchCacheFile(const Path& path, const time_t& touch_time)
+bool touchCacheFile(const Path & path, time_t touch_time)
{
- struct timeval times[2];
- times[0].tv_sec = touch_time;
- times[0].tv_usec = 0;
- times[1].tv_sec = touch_time;
- times[1].tv_usec = 0;
+ struct timeval times[2];
+ times[0].tv_sec = touch_time;
+ times[0].tv_usec = 0;
+ times[1].tv_sec = touch_time;
+ times[1].tv_usec = 0;
- return lutimes(path.c_str(), times) == 0;
+ return lutimes(path.c_str(), times) == 0;
}
-Path getCachePath(std::string key)
+Path getCachePath(std::string_view key)
{
return getCacheDir() + "/nix/gitv3/" +
hashString(htSHA256, key).to_string(Base32, false);
@@ -57,13 +58,12 @@ Path getCachePath(std::string key)
// ...
std::optional readHead(const Path & path)
{
- auto [exit_code, output] = runProgram(RunOptions {
+ auto [status, output] = runProgram(RunOptions {
.program = "git",
+ // FIXME: use 'HEAD' to avoid returning all refs
.args = {"ls-remote", "--symref", path},
});
- if (exit_code != 0) {
- return std::nullopt;
- }
+ if (status != 0) return std::nullopt;
std::string_view line = output;
line = line.substr(0, line.find("\n"));
@@ -82,12 +82,11 @@ std::optional readHead(const Path & path)
}
// Persist the HEAD ref from the remote repo in the local cached repo.
-bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
+bool storeCachedHead(const std::string & actualUrl, const std::string & headRef)
{
Path cacheDir = getCachePath(actualUrl);
- auto gitDir = ".";
try {
- runProgram("git", true, { "-C", cacheDir, "--git-dir", gitDir, "symbolic-ref", "--", "HEAD", headRef });
+ runProgram("git", true, { "-C", cacheDir, "--git-dir", ".", "symbolic-ref", "--", "HEAD", headRef });
} catch (ExecError &e) {
if (!WIFEXITED(e.status)) throw;
return false;
@@ -96,7 +95,7 @@ bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
return true;
}
-std::optional readHeadCached(const std::string& actualUrl)
+std::optional readHeadCached(const std::string & actualUrl)
{
// Create a cache path to store the branch of the HEAD ref. Append something
// in front of the URL to prevent collision with the repository itself.
@@ -110,16 +109,15 @@ std::optional readHeadCached(const std::string& actualUrl)
cachedRef = readHead(cacheDir);
if (cachedRef != std::nullopt &&
*cachedRef != gitInitialBranch &&
- isCacheFileWithinTtl(now, st)) {
+ isCacheFileWithinTtl(now, st))
+ {
debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl);
return cachedRef;
}
}
auto ref = readHead(actualUrl);
- if (ref) {
- return ref;
- }
+ if (ref) return ref;
if (cachedRef) {
// If the cached git ref is expired in fetch() below, and the 'git fetch'
@@ -250,7 +248,7 @@ std::pair fetchFromWorkdir(ref store, Input & input, co
struct GitInputScheme : InputScheme
{
- std::optional inputFromURL(const ParsedURL & url) override
+ std::optional inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "git" &&
url.scheme != "git+http" &&
@@ -266,7 +264,7 @@ struct GitInputScheme : InputScheme
Attrs attrs;
attrs.emplace("type", "git");
- for (auto &[name, value] : url.query) {
+ for (auto & [name, value] : url.query) {
if (name == "rev" || name == "ref")
attrs.emplace(name, value);
else if (name == "shallow" || name == "submodules")
@@ -280,7 +278,7 @@ struct GitInputScheme : InputScheme
return inputFromAttrs(attrs);
}
- std::optional inputFromAttrs(const Attrs & attrs) override
+ std::optional inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "git") return {};
@@ -303,7 +301,7 @@ struct GitInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
if (url.scheme != "git") url.scheme = "git+" + url.scheme;
@@ -314,7 +312,7 @@ struct GitInputScheme : InputScheme
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
bool maybeDirty = !input.getRef();
bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false);
@@ -326,7 +324,7 @@ struct GitInputScheme : InputScheme
Input applyOverrides(
const Input & input,
std::optional ref,
- std::optional rev) override
+ std::optional rev) const override
{
auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());
@@ -336,7 +334,7 @@ struct GitInputScheme : InputScheme
return res;
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
auto [isLocal, actualUrl] = getActualUrl(input);
@@ -604,9 +602,9 @@ struct GitInputScheme : InputScheme
{
throw Error(
"Cannot find Git revision '%s' in ref '%s' of repository '%s'! "
- "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
- ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
- "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
+ "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
+ ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
+ "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
input.getRev()->gitRev(),
*input.getRef(),
actualUrl
diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc
index 2115ce2f5..1ed09d30d 100644
--- a/src/libfetchers/github.cc
+++ b/src/libfetchers/github.cc
@@ -26,11 +26,11 @@ std::regex hostRegex(hostRegexS, std::regex::ECMAScript);
struct GitArchiveInputScheme : InputScheme
{
- virtual std::string type() = 0;
+ virtual std::string type() const = 0;
virtual std::optional> accessHeaderFromToken(const std::string & token) const = 0;
- std::optional inputFromURL(const ParsedURL & url) override
+ std::optional inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != type()) return {};
@@ -100,7 +100,7 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
- std::optional inputFromAttrs(const Attrs & attrs) override
+ std::optional inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != type()) return {};
@@ -116,7 +116,7 @@ struct GitArchiveInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto owner = getStrAttr(input.attrs, "owner");
auto repo = getStrAttr(input.attrs, "repo");
@@ -132,7 +132,7 @@ struct GitArchiveInputScheme : InputScheme
};
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return input.getRev() && maybeGetIntAttr(input.attrs, "lastModified");
}
@@ -140,7 +140,7 @@ struct GitArchiveInputScheme : InputScheme
Input applyOverrides(
const Input & _input,
std::optional ref,
- std::optional rev) override
+ std::optional rev) const override
{
auto input(_input);
if (rev && ref)
@@ -227,7 +227,7 @@ struct GitArchiveInputScheme : InputScheme
struct GitHubInputScheme : GitArchiveInputScheme
{
- std::string type() override { return "github"; }
+ std::string type() const override { return "github"; }
std::optional> accessHeaderFromToken(const std::string & token) const override
{
@@ -240,14 +240,29 @@ struct GitHubInputScheme : GitArchiveInputScheme
return std::pair("Authorization", fmt("token %s", token));
}
+ std::string getHost(const Input & input) const
+ {
+ return maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ }
+
+ std::string getOwner(const Input & input) const
+ {
+ return getStrAttr(input.attrs, "owner");
+ }
+
+ std::string getRepo(const Input & input) const
+ {
+ return getStrAttr(input.attrs, "repo");
+ }
+
Hash getRevFromRef(nix::ref store, const Input & input) const override
{
- auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ auto host = getHost(input);
auto url = fmt(
host == "github.com"
? "https://api.%s/repos/%s/%s/commits/%s"
: "https://%s/api/v3/repos/%s/%s/commits/%s",
- host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef());
+ host, getOwner(input), getRepo(input), *input.getRef());
Headers headers = makeHeadersWithAuthTokens(host);
@@ -262,8 +277,10 @@ struct GitHubInputScheme : GitArchiveInputScheme
DownloadUrl getDownloadUrl(const Input & input) const override
{
- auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ auto host = getHost(input);
+
Headers headers = makeHeadersWithAuthTokens(host);
+
// If we have no auth headers then we default to the public archive
// urls so we do not run into rate limits.
const auto urlFmt =
@@ -273,17 +290,17 @@ struct GitHubInputScheme : GitArchiveInputScheme
? "https://%s/%s/%s/archive/%s.tar.gz"
: "https://api.%s/repos/%s/%s/tarball/%s";
- const auto url = fmt(urlFmt, host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
+ const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input),
input.getRev()->to_string(Base16, false));
return DownloadUrl { url, headers };
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
- auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
+ auto host = getHost(input);
Input::fromURL(fmt("git+https://%s/%s/%s.git",
- host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo")))
+ host, getOwner(input), getRepo(input)))
.applyOverrides(input.getRef(), input.getRev())
.clone(destDir);
}
@@ -291,7 +308,7 @@ struct GitHubInputScheme : GitArchiveInputScheme
struct GitLabInputScheme : GitArchiveInputScheme
{
- std::string type() override { return "gitlab"; }
+ std::string type() const override { return "gitlab"; }
std::optional> accessHeaderFromToken(const std::string & token) const override
{
@@ -346,7 +363,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers };
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere
@@ -359,7 +376,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
struct SourceHutInputScheme : GitArchiveInputScheme
{
- std::string type() override { return "sourcehut"; }
+ std::string type() const override { return "sourcehut"; }
std::optional> accessHeaderFromToken(const std::string & token) const override
{
@@ -433,7 +450,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers };
}
- void clone(const Input & input, const Path & destDir) override
+ void clone(const Input & input, const Path & destDir) const override
{
auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht");
Input::fromURL(fmt("git+https://%s/%s/%s",
diff --git a/src/libfetchers/indirect.cc b/src/libfetchers/indirect.cc
index 9288fc6cf..b99504a16 100644
--- a/src/libfetchers/indirect.cc
+++ b/src/libfetchers/indirect.cc
@@ -7,7 +7,7 @@ std::regex flakeRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript);
struct IndirectInputScheme : InputScheme
{
- std::optional inputFromURL(const ParsedURL & url) override
+ std::optional inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "flake") return {};
@@ -50,7 +50,7 @@ struct IndirectInputScheme : InputScheme
return input;
}
- std::optional inputFromAttrs(const Attrs & attrs) override
+ std::optional inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "indirect") return {};
@@ -68,7 +68,7 @@ struct IndirectInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
ParsedURL url;
url.scheme = "flake";
@@ -78,7 +78,7 @@ struct IndirectInputScheme : InputScheme
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return false;
}
@@ -86,7 +86,7 @@ struct IndirectInputScheme : InputScheme
Input applyOverrides(
const Input & _input,
std::optional ref,
- std::optional rev) override
+ std::optional rev) const override
{
auto input(_input);
if (rev) input.attrs.insert_or_assign("rev", rev->gitRev());
diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc
index 5c5671681..86e8f81f4 100644
--- a/src/libfetchers/mercurial.cc
+++ b/src/libfetchers/mercurial.cc
@@ -43,7 +43,7 @@ static std::string runHg(const Strings & args, const std::optional
struct MercurialInputScheme : InputScheme
{
- std::optional inputFromURL(const ParsedURL & url) override
+ std::optional inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "hg+http" &&
url.scheme != "hg+https" &&
@@ -69,7 +69,7 @@ struct MercurialInputScheme : InputScheme
return inputFromAttrs(attrs);
}
- std::optional inputFromAttrs(const Attrs & attrs) override
+ std::optional inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "hg") return {};
@@ -89,7 +89,7 @@ struct MercurialInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
url.scheme = "hg+" + url.scheme;
@@ -98,7 +98,7 @@ struct MercurialInputScheme : InputScheme
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
// FIXME: ugly, need to distinguish between dirty and clean
// default trees.
@@ -108,7 +108,7 @@ struct MercurialInputScheme : InputScheme
Input applyOverrides(
const Input & input,
std::optional ref,
- std::optional rev) override
+ std::optional rev) const override
{
auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());
diff --git a/src/libfetchers/path.cc b/src/libfetchers/path.cc
index f0ef97da5..61541e69d 100644
--- a/src/libfetchers/path.cc
+++ b/src/libfetchers/path.cc
@@ -6,7 +6,7 @@ namespace nix::fetchers {
struct PathInputScheme : InputScheme
{
- std::optional inputFromURL(const ParsedURL & url) override
+ std::optional inputFromURL(const ParsedURL & url) const override
{
if (url.scheme != "path") return {};
@@ -32,7 +32,7 @@ struct PathInputScheme : InputScheme
return input;
}
- std::optional inputFromAttrs(const Attrs & attrs) override
+ std::optional inputFromAttrs(const Attrs & attrs) const override
{
if (maybeGetStrAttr(attrs, "type") != "path") return {};
@@ -54,7 +54,7 @@ struct PathInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto query = attrsToQuery(input.attrs);
query.erase("path");
@@ -66,7 +66,7 @@ struct PathInputScheme : InputScheme
};
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return true;
}
diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc
index acd1ff866..43c03beec 100644
--- a/src/libfetchers/registry.cc
+++ b/src/libfetchers/registry.cc
@@ -153,6 +153,9 @@ static std::shared_ptr getGlobalRegistry(ref store)
{
static auto reg = [&]() {
auto path = fetchSettings.flakeRegistry.get();
+ if (path == "") {
+ return std::make_shared(Registry::Global); // empty registry
+ }
if (!hasPrefix(path, "/")) {
auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath;
diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc
index 6c551bd93..e9686262a 100644
--- a/src/libfetchers/tarball.cc
+++ b/src/libfetchers/tarball.cc
@@ -185,7 +185,7 @@ struct CurlInputScheme : InputScheme
virtual bool isValidURL(const ParsedURL & url) const = 0;
- std::optional inputFromURL(const ParsedURL & url) override
+ std::optional inputFromURL(const ParsedURL & url) const override
{
if (!isValidURL(url))
return std::nullopt;
@@ -203,7 +203,7 @@ struct CurlInputScheme : InputScheme
return input;
}
- std::optional inputFromAttrs(const Attrs & attrs) override
+ std::optional inputFromAttrs(const Attrs & attrs) const override
{
auto type = maybeGetStrAttr(attrs, "type");
if (type != inputType()) return {};
@@ -220,16 +220,17 @@ struct CurlInputScheme : InputScheme
return input;
}
- ParsedURL toURL(const Input & input) override
+ ParsedURL toURL(const Input & input) const override
{
auto url = parseURL(getStrAttr(input.attrs, "url"));
- // NAR hashes are preferred over file hashes since tar/zip files // don't have a canonical representation.
+ // NAR hashes are preferred over file hashes since tar/zip
+ // files don't have a canonical representation.
if (auto narHash = input.getNarHash())
url.query.insert_or_assign("narHash", narHash->to_string(SRI, true));
return url;
}
- bool hasAllInfo(const Input & input) override
+ bool hasAllInfo(const Input & input) const override
{
return true;
}
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index 961f4e18a..e9205a5e5 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -132,7 +132,7 @@ public:
log(*state, lvl, fs.s);
}
- void logEI(const ErrorInfo &ei) override
+ void logEI(const ErrorInfo & ei) override
{
auto state(state_.lock());
@@ -180,10 +180,12 @@ public:
auto machineName = getS(fields, 1);
if (machineName != "")
i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName);
- auto curRound = getI(fields, 2);
- auto nrRounds = getI(fields, 3);
- if (nrRounds != 1)
- i->s += fmt(" (round %d/%d)", curRound, nrRounds);
+
+ // Used to be curRound and nrRounds, but the
+ // implementation was broken for a long time.
+ if (getI(fields, 2) != 1 || getI(fields, 3) != 1) {
+ throw Error("log message indicated repeating builds, but this is not currently implemented");
+ }
i->name = DrvName(name).name;
}
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 12d0c32fb..149d414d3 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -346,7 +346,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
try {
getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile & e) {
- throw SubstituteGone(e.info());
+ throw SubstituteGone(std::move(e.info()));
}
decompressor->finish();
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index 67cfc38af..d3b995a4f 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -134,7 +134,7 @@ void DerivationGoal::killChild()
void DerivationGoal::timedOut(Error && ex)
{
killChild();
- done(BuildResult::TimedOut, {}, ex);
+ done(BuildResult::TimedOut, {}, std::move(ex));
}
@@ -501,6 +501,14 @@ void DerivationGoal::inputsRealised()
now-known results of dependencies. If so, we become a
stub goal aliasing that resolved derivation goal. */
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
+ if (!attempt) {
+ /* TODO (impure derivations-induced tech debt) (see below):
+ The above attempt should have found it, but because we manage
+ inputDrvOutputs statefully, sometimes it gets out of sync with
+ the real source of truth (store). So we query the store
+ directly if there's a problem. */
+ attempt = fullDrv.tryResolve(worker.store);
+ }
assert(attempt);
Derivation drvResolved { *std::move(attempt) };
@@ -563,10 +571,6 @@ void DerivationGoal::inputsRealised()
/* What type of derivation are we building? */
derivationType = drv->type();
- /* Don't repeat fixed-output derivations since they're already
- verified by their output hash.*/
- nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
-
/* Okay, try to build. Note that here we don't wait for a build
slot to become available, since we don't need one if there is a
build hook. */
@@ -581,12 +585,11 @@ void DerivationGoal::started()
auto msg = fmt(
buildMode == bmRepair ? "repairing outputs of '%s'" :
buildMode == bmCheck ? "checking outputs of '%s'" :
- nrRounds > 1 ? "building '%s' (round %d/%d)" :
- "building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
+ "building '%s'", worker.store.printStorePath(drvPath));
fmt("building '%s'", worker.store.printStorePath(drvPath));
if (hook) msg += fmt(" on '%s'", machineName);
act = std::make_unique(*logger, lvlInfo, actBuild, msg,
- Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
+ Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1});
mcRunningBuilds = std::make_unique>(worker.runningBuilds);
worker.updateProgress();
}
@@ -940,14 +943,6 @@ void DerivationGoal::buildDone()
cleanupPostOutputsRegisteredModeNonCheck();
- /* Repeat the build if necessary. */
- if (curRound++ < nrRounds) {
- outputLocks.unlock();
- state = &DerivationGoal::tryToBuild;
- worker.wakeUp(shared_from_this());
- return;
- }
-
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old
@@ -976,7 +971,7 @@ void DerivationGoal::buildDone()
BuildResult::PermanentFailure;
}
- done(st, {}, e);
+ done(st, {}, std::move(e));
return;
}
}
@@ -1008,22 +1003,34 @@ void DerivationGoal::resolvedFinished()
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
worker.store.printStorePath(drvPath), wantedOutput);
- auto realisation = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
- if (!realisation)
- throw Error(
- "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
- worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+
+ auto realisation = [&]{
+ auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
+ if (take1) return *take1;
+
+ /* The above `get` should work. But sateful tracking of
+ outputs in resolvedResult, this can get out of sync with the
+ store, which is our actual source of truth. For now we just
+ check the store directly if it fails. */
+ auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
+ if (take2) return *take2;
+
+ throw Error(
+ "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
+ worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
+ }();
+
if (drv->type().isPure()) {
- auto newRealisation = *realisation;
+ auto newRealisation = realisation;
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
newRealisation.signatures.clear();
if (!drv->type().isFixed())
- newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
+ newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation);
}
- outputPaths.insert(realisation->outPath);
- builtOutputs.emplace(realisation->id, *realisation);
+ outputPaths.insert(realisation.outPath);
+ builtOutputs.emplace(realisation.id, realisation);
}
runPostBuildHook(
@@ -1427,7 +1434,7 @@ void DerivationGoal::done(
fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl;
}
- amDone(buildResult.success() ? ecSuccess : ecFailed, ex);
+ amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex));
}
diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh
index 2d8bfd592..d33e04cbc 100644
--- a/src/libstore/build/derivation-goal.hh
+++ b/src/libstore/build/derivation-goal.hh
@@ -115,11 +115,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode;
- /* The current round, if we're building multiple times. */
- size_t curRound = 1;
-
- size_t nrRounds;
-
std::unique_ptr> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr act;
diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc
index bea7363db..e1b80165e 100644
--- a/src/libstore/build/entry-points.cc
+++ b/src/libstore/build/entry-points.cc
@@ -30,7 +30,7 @@ void Store::buildPaths(const std::vector & reqs, BuildMode buildMod
if (ex)
logError(i->ex->info());
else
- ex = i->ex;
+ ex = std::move(i->ex);
}
if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast(i.get())) failed.insert(i2->drvPath);
@@ -40,7 +40,7 @@ void Store::buildPaths(const std::vector & reqs, BuildMode buildMod
if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus();
- throw *ex;
+ throw std::move(*ex);
} else if (!failed.empty()) {
if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
@@ -109,7 +109,7 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) {
goal->ex->status = worker.exitStatus();
- throw *goal->ex;
+ throw std::move(*goal->ex);
} else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
}
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index c9b7b24f3..dccd096ec 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -230,7 +230,7 @@ void LocalDerivationGoal::tryLocalBuild() {
outputLocks.unlock();
buildUser.reset();
worker.permanentFailure = true;
- done(BuildResult::InputRejected, {}, e);
+ done(BuildResult::InputRejected, {}, std::move(e));
return;
}
@@ -409,12 +409,16 @@ void LocalDerivationGoal::startBuilder()
#if __linux__
settings.requireExperimentalFeature(Xp::Cgroups);
+ auto cgroupFS = getCgroupFS();
+ if (!cgroupFS)
+ throw Error("cannot determine the cgroups file system");
+
auto ourCgroups = getCgroups("/proc/self/cgroup");
auto ourCgroup = ourCgroups[""];
if (ourCgroup == "")
throw Error("cannot determine cgroup name from /proc/self/cgroup");
- auto ourCgroupPath = canonPath("/sys/fs/cgroup/" + ourCgroup);
+ auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
if (!pathExists(ourCgroupPath))
throw Error("expected cgroup directory '%s'", ourCgroupPath);
@@ -2256,7 +2260,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
InodesSeen inodesSeen;
Path checkSuffix = ".check";
- bool keepPreviousRound = settings.keepFailed || settings.runDiffHook;
std::exception_ptr delayedException;
@@ -2684,10 +2687,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
}
- if (curRound == nrRounds) {
- localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
- worker.markContentsGood(newInfo.path);
- }
+ localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
+ worker.markContentsGood(newInfo.path);
newInfo.deriver = drvPath;
newInfo.ultimate = true;
@@ -2716,61 +2717,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Apply output checks. */
checkOutputs(infos);
- /* Compare the result with the previous round, and report which
- path is different, if any.*/
- if (curRound > 1 && prevInfos != infos) {
- assert(prevInfos.size() == infos.size());
- for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
- if (!(*i == *j)) {
- buildResult.isNonDeterministic = true;
- Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
- bool prevExists = keepPreviousRound && pathExists(prev);
- hintformat hint = prevExists
- ? hintfmt("output '%s' of '%s' differs from '%s' from previous round",
- worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath), prev)
- : hintfmt("output '%s' of '%s' differs from previous round",
- worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath));
-
- handleDiffHook(
- buildUser ? buildUser->getUID() : getuid(),
- buildUser ? buildUser->getGID() : getgid(),
- prev, worker.store.printStorePath(i->second.path),
- worker.store.printStorePath(drvPath), tmpDir);
-
- if (settings.enforceDeterminism)
- throw NotDeterministic(hint);
-
- printError(hint);
-
- curRound = nrRounds; // we know enough, bail out early
- }
- }
-
- /* If this is the first round of several, then move the output out of the way. */
- if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
- for (auto & [_, outputStorePath] : finalOutputs) {
- auto path = worker.store.printStorePath(outputStorePath);
- Path prev = path + checkSuffix;
- deletePath(prev);
- Path dst = path + checkSuffix;
- renameFile(path, dst);
- }
- }
-
- if (curRound < nrRounds) {
- prevInfos = std::move(infos);
- return {};
- }
-
- /* Remove the .check directories if we're done. FIXME: keep them
- if the result was not determistic? */
- if (curRound == nrRounds) {
- for (auto & [_, outputStorePath] : finalOutputs) {
- Path prev = worker.store.printStorePath(outputStorePath) + checkSuffix;
- deletePath(prev);
- }
- }
-
/* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the
outputs, this will fail. */
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index 48dd5c247..12596ba49 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -238,7 +238,6 @@ struct ClientSettings
}
else if (trusted
|| name == settings.buildTimeout.name
- || name == settings.buildRepeat.name
|| name == settings.maxSilentTime.name
|| name == settings.pollInterval.name
|| name == "connect-timeout"
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index fe99c3c5e..42a53912e 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -448,7 +448,7 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
// FIXME: remove
-bool isDerivation(const std::string & fileName)
+bool isDerivation(std::string_view fileName)
{
return hasSuffix(fileName, drvExtension);
}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index af198a767..f3cd87fb1 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -224,7 +224,7 @@ StorePath writeDerivation(Store & store,
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
// FIXME: remove
-bool isDerivation(const std::string & fileName);
+bool isDerivation(std::string_view fileName);
/* Calculate the name that will be used for the store path for this
output.
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
index 88b59f615..3fa5ae4f7 100644
--- a/src/libstore/derived-path.cc
+++ b/src/libstore/derived-path.cc
@@ -20,11 +20,12 @@ nlohmann::json DerivedPath::Built::toJSON(ref store) const {
// Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so let’s do it
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
- for (const auto& output : outputs) {
+ for (const auto & output : outputs) {
auto knownOutput = get(knownOutputs, output);
- res["outputs"][output] = (knownOutput && *knownOutput)
- ? store->printStorePath(**knownOutput)
- : nullptr;
+ if (knownOutput && *knownOutput)
+ res["outputs"][output] = store->printStorePath(**knownOutput);
+ else
+ res["outputs"][output] = nullptr;
}
return res;
}
@@ -78,15 +79,16 @@ DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_
return {store.parseStorePath(s)};
}
-DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s)
+DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view drvS, std::string_view outputsS)
{
- size_t n = s.find("!");
- assert(n != s.npos);
- auto drvPath = store.parseStorePath(s.substr(0, n));
- auto outputsS = s.substr(n + 1);
+ auto drvPath = store.parseStorePath(drvS);
std::set outputs;
- if (outputsS != "*")
+ if (outputsS != "*") {
outputs = tokenizeString>(outputsS, ",");
+ if (outputs.empty())
+ throw Error(
+ "Explicit list of wanted outputs '%s' must not be empty. Consider using '*' as a wildcard meaning all outputs if no output in particular is wanted.", outputsS);
+ }
return {drvPath, outputs};
}
@@ -95,7 +97,7 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
size_t n = s.find("!");
return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
- : (DerivedPath) DerivedPath::Built::parse(store, s);
+ : (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
}
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh
index 878696136..706e5dcb4 100644
--- a/src/libstore/derived-path.hh
+++ b/src/libstore/derived-path.hh
@@ -47,7 +47,7 @@ struct DerivedPathBuilt {
std::set outputs;
std::string to_string(const Store & store) const;
- static DerivedPathBuilt parse(const Store & store, std::string_view);
+ static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
nlohmann::json toJSON(ref store) const;
bool operator < (const DerivedPathBuilt & b) const
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 5746c32a3..756bd4423 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -33,14 +33,6 @@ FileTransferSettings fileTransferSettings;
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
-std::string resolveUri(std::string_view uri)
-{
- if (uri.compare(0, 8, "channel:") == 0)
- return "https://nixos.org/channels/" + std::string(uri.substr(8)) + "/nixexprs.tar.xz";
- else
- return std::string(uri);
-}
-
struct curlFileTransfer : public FileTransfer
{
CURLM * curlm = 0;
@@ -142,9 +134,9 @@ struct curlFileTransfer : public FileTransfer
}
template
- void fail(const T & e)
+ void fail(T && e)
{
- failEx(std::make_exception_ptr(e));
+ failEx(std::make_exception_ptr(std::move(e)));
}
LambdaSink finalSink;
@@ -472,7 +464,7 @@ struct curlFileTransfer : public FileTransfer
fileTransfer.enqueueItem(shared_from_this());
}
else
- fail(exc);
+ fail(std::move(exc));
}
}
};
@@ -873,14 +865,4 @@ FileTransferError::FileTransferError(FileTransfer::Error error, std::optional response, const Args & ... args);
};
-bool isUri(std::string_view s);
-
-/* Resolve deprecated 'channel:' URLs. */
-std::string resolveUri(std::string_view uri);
-
}
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index b40dcfa77..274a15dd7 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -281,10 +281,28 @@ public:
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users.
- )"};
+
+ Defaults to `nixbld` when running as root, *empty* otherwise.
+ )",
+ {}, false};
Setting autoAllocateUids{this, false, "auto-allocate-uids",
- "Whether to allocate UIDs for builders automatically."};
+ R"(
+ Whether to select UIDs for builds automatically, instead of using the
+ users in `build-users-group`.
+
+ UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
+
+ > **Warning**
+ > This is an experimental feature.
+
+ To enable it, add the following to [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = auto-allocate-uids
+ auto-allocate-uids = true
+ ```
+ )"};
Setting startId{this,
#if __linux__
@@ -308,11 +326,22 @@ public:
Setting useCgroups{
this, false, "use-cgroups",
R"(
- Whether to execute builds inside cgroups. Cgroups are
- enabled automatically for derivations that require the
- `uid-range` system feature.
- )"
- };
+ Whether to execute builds inside cgroups.
+ This is only supported on Linux.
+
+ Cgroups are required and enabled automatically for derivations
+ that require the `uid-range` system feature.
+
+ > **Warning**
+ > This is an experimental feature.
+
+ To enable it, add the following to [`nix.conf`](#):
+
+ ```
+ extra-experimental-features = cgroups
+ use-cgroups = true
+ ```
+ )"};
#endif
Setting impersonateLinux26{this, false, "impersonate-linux-26",
@@ -347,11 +376,6 @@ public:
)",
{"build-max-log-size"}};
- /* When buildRepeat > 0 and verboseBuild == true, whether to print
- repeated builds (i.e. builds other than the first one) to
- stderr. Hack to prevent Hydra logs from being polluted. */
- bool printRepeatedBuilds = true;
-
Setting pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
@@ -475,19 +499,6 @@ public:
Setting sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
- Setting buildRepeat{
- this, 0, "repeat",
- R"(
- How many times to repeat builds to check whether they are
- deterministic. The default value is 0. If the value is non-zero,
- every build is repeated the specified number of times. If the
- contents of any of the runs differs from the previous ones and
- `enforce-determinism` is true, the build is rejected and the
- resulting store paths are not registered as “valid” in Nix’s
- database.
- )",
- {"build-repeat"}};
-
#if __linux__
Setting sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
@@ -551,10 +562,6 @@ public:
configuration file, and cannot be passed at the command line.
)"};
- Setting enforceDeterminism{
- this, true, "enforce-determinism",
- "Whether to fail if repeated builds produce different output. See `repeat`."};
-
Setting trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index dd34b19c6..4d398b21d 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -255,8 +255,8 @@ private:
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to
- << settings.buildRepeat
- << settings.enforceDeterminism;
+ << 0 // buildRepeat hasn't worked for ages anyway
+ << 0;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed);
diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc
index 2858137d6..d02d20b4c 100644
--- a/src/libstore/lock.cc
+++ b/src/libstore/lock.cc
@@ -185,7 +185,7 @@ std::unique_ptr acquireUserLock(uid_t nrIds, bool useChroot)
bool useBuildUsers()
{
#if __linux__
- static bool b = (settings.buildUsersGroup != "" || settings.startId.get() != 0) && getuid() == 0;
+ static bool b = (settings.buildUsersGroup != "" || settings.autoAllocateUids) && getuid() == 0;
return b;
#elif __APPLE__
static bool b = settings.buildUsersGroup != "" && getuid() == 0;
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 96a29155c..48cf731a8 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -447,7 +447,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
} catch (Error & e) {
// Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos)
- throw InvalidPath(e.info());
+ throw InvalidPath(std::move(e.info()));
throw;
}
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 8811ab578..80b60ca1b 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -19,21 +19,21 @@ using json = nlohmann::json;
namespace nix {
-bool Store::isInStore(const Path & path) const
+bool Store::isInStore(PathView path) const
{
return isInDir(path, storeDir);
}
-std::pair Store::toStorePath(const Path & path) const
+std::pair Store::toStorePath(PathView path) const
{
if (!isInStore(path))
throw Error("path '%1%' is not in the Nix store", path);
- Path::size_type slash = path.find('/', storeDir.size() + 1);
+ auto slash = path.find('/', storeDir.size() + 1);
if (slash == Path::npos)
return {parseStorePath(path), ""};
else
- return {parseStorePath(std::string_view(path).substr(0, slash)), path.substr(slash)};
+ return {parseStorePath(path.substr(0, slash)), (Path) path.substr(slash)};
}
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 151ec10d6..4a88d7216 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -179,7 +179,7 @@ public:
/* Return true if ‘path’ is in the Nix store (but not the Nix
store itself). */
- bool isInStore(const Path & path) const;
+ bool isInStore(PathView path) const;
/* Return true if ‘path’ is a store path, i.e. a direct child of
the Nix store. */
@@ -187,7 +187,7 @@ public:
/* Split a path like /nix/store/-/ into
/nix/store/- and /. */
- std::pair toStorePath(const Path & path) const;
+ std::pair toStorePath(PathView path) const;
/* Follow symlinks until we end up with a path in the Nix store. */
Path followLinksToStore(std::string_view path) const;
diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc
index 4b0636129..0e2b9d12c 100644
--- a/src/libutil/archive.cc
+++ b/src/libutil/archive.cc
@@ -35,10 +35,6 @@ static ArchiveSettings archiveSettings;
static GlobalConfig::Register rArchiveSettings(&archiveSettings);
-const std::string narVersionMagic1 = "nix-archive-1";
-
-static std::string caseHackSuffix = "~nix~case~hack~";
-
PathFilter defaultPathFilter = [](const Path &) { return true; };
diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh
index ac4183bf5..e42dea540 100644
--- a/src/libutil/archive.hh
+++ b/src/libutil/archive.hh
@@ -103,7 +103,9 @@ void copyNAR(Source & source, Sink & sink);
void copyPath(const Path & from, const Path & to);
-extern const std::string narVersionMagic1;
+inline constexpr std::string_view narVersionMagic1 = "nix-archive-1";
+
+inline constexpr std::string_view caseHackSuffix = "~nix~case~hack~";
}
diff --git a/src/libstore/cgroup.cc b/src/libutil/cgroup.cc
similarity index 89%
rename from src/libstore/cgroup.cc
rename to src/libutil/cgroup.cc
index f693d77be..a008481ca 100644
--- a/src/libstore/cgroup.cc
+++ b/src/libutil/cgroup.cc
@@ -2,6 +2,7 @@
#include "cgroup.hh"
#include "util.hh"
+#include "finally.hh"
#include
#include
@@ -10,9 +11,25 @@
#include
#include
+#include
namespace nix {
+std::optional getCgroupFS()
+{
+ static auto res = [&]() -> std::optional {
+ auto fp = fopen("/proc/mounts", "r");
+ if (!fp) return std::nullopt;
+ Finally delFP = [&]() { fclose(fp); };
+ while (auto ent = getmntent(fp))
+ if (std::string_view(ent->mnt_type) == "cgroup2")
+ return ent->mnt_dir;
+
+ return std::nullopt;
+ }();
+ return res;
+}
+
// FIXME: obsolete, check for cgroup2
std::map getCgroups(const Path & cgroupFile)
{
diff --git a/src/libstore/cgroup.hh b/src/libutil/cgroup.hh
similarity index 93%
rename from src/libstore/cgroup.hh
rename to src/libutil/cgroup.hh
index 3ead4735f..d08c8ad29 100644
--- a/src/libstore/cgroup.hh
+++ b/src/libutil/cgroup.hh
@@ -9,6 +9,8 @@
namespace nix {
+std::optional getCgroupFS();
+
std::map getCgroups(const Path & cgroupFile);
struct CgroupStats
diff --git a/src/libutil/error.cc b/src/libutil/error.cc
index 9172f67a6..3bb3efb0e 100644
--- a/src/libutil/error.cc
+++ b/src/libutil/error.cc
@@ -262,6 +262,28 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
prefix += ":" ANSI_NORMAL " ";
std::ostringstream oss;
+
+ // traces
+ if (showTrace && !einfo.traces.empty()) {
+ for (const auto & trace : einfo.traces) {
+ oss << "\n" << "… " << trace.hint.str() << "\n";
+
+ if (trace.pos.has_value() && (*trace.pos)) {
+ auto pos = trace.pos.value();
+ oss << "\n";
+ printAtPos(pos, oss);
+
+ auto loc = getCodeLines(pos);
+ if (loc.has_value()) {
+ oss << "\n";
+ printCodeLines(oss, "", pos, *loc);
+ oss << "\n";
+ }
+ }
+ }
+ oss << "\n" << prefix;
+ }
+
oss << einfo.msg << "\n";
if (einfo.errPos.has_value() && *einfo.errPos) {
@@ -285,26 +307,6 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
"?" << std::endl;
}
- // traces
- if (showTrace && !einfo.traces.empty()) {
- for (auto iter = einfo.traces.rbegin(); iter != einfo.traces.rend(); ++iter) {
- oss << "\n" << "… " << iter->hint.str() << "\n";
-
- if (iter->pos.has_value() && (*iter->pos)) {
- auto pos = iter->pos.value();
- oss << "\n";
- printAtPos(pos, oss);
-
- auto loc = getCodeLines(pos);
- if (loc.has_value()) {
- oss << "\n";
- printCodeLines(oss, "", pos, *loc);
- oss << "\n";
- }
- }
- }
- }
-
out << indent(prefix, std::string(filterANSIEscapes(prefix, true).size(), ' '), chomp(oss.str()));
return out;
diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh
index 7664e5c04..e879fd3b8 100644
--- a/src/libutil/fmt.hh
+++ b/src/libutil/fmt.hh
@@ -148,7 +148,7 @@ inline hintformat hintfmt(const std::string & fs, const Args & ... args)
return f;
}
-inline hintformat hintfmt(std::string plain_string)
+inline hintformat hintfmt(const std::string & plain_string)
{
// we won't be receiving any args in this case, so just print the original string
return hintfmt("%s", normaltxt(plain_string));
diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc
index cb2b15b41..ac86d8ac2 100644
--- a/src/libutil/logging.cc
+++ b/src/libutil/logging.cc
@@ -105,14 +105,6 @@ public:
Verbosity verbosity = lvlInfo;
-void warnOnce(bool & haveWarned, const FormatOrString & fs)
-{
- if (!haveWarned) {
- warn(fs.s);
- haveWarned = true;
- }
-}
-
void writeToStderr(std::string_view s)
{
try {
@@ -130,11 +122,11 @@ Logger * makeSimpleLogger(bool printBuildLogs)
return new SimpleLogger(printBuildLogs);
}
-std::atomic nextId{(uint64_t) getpid() << 32};
+std::atomic nextId{0};
Activity::Activity(Logger & logger, Verbosity lvl, ActivityType type,
const std::string & s, const Logger::Fields & fields, ActivityId parent)
- : logger(logger), id(nextId++)
+ : logger(logger), id(nextId++ + (((uint64_t) getpid()) << 32))
{
logger.startActivity(id, lvl, type, s, fields, parent);
}
diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh
index d0817b4a9..4642c49f7 100644
--- a/src/libutil/logging.hh
+++ b/src/libutil/logging.hh
@@ -82,7 +82,7 @@ public:
log(lvlInfo, fs);
}
- virtual void logEI(const ErrorInfo &ei) = 0;
+ virtual void logEI(const ErrorInfo & ei) = 0;
void logEI(Verbosity lvl, ErrorInfo ei)
{
@@ -225,7 +225,11 @@ inline void warn(const std::string & fs, const Args & ... args)
logger->warn(f.str());
}
-void warnOnce(bool & haveWarned, const FormatOrString & fs);
+#define warnOnce(haveWarned, args...) \
+ if (!haveWarned) { \
+ haveWarned = true; \
+ warn(args); \
+ }
void writeToStderr(std::string_view s);
diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh
index bf26321db..7d38b059c 100644
--- a/src/libutil/ref.hh
+++ b/src/libutil/ref.hh
@@ -83,6 +83,11 @@ public:
return p != other.p;
}
+ bool operator < (const ref & other) const
+ {
+ return p < other.p;
+ }
+
private:
template
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index 2c3597775..c653db9d0 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -338,7 +338,7 @@ Sink & operator << (Sink & sink, const StringSet & s)
Sink & operator << (Sink & sink, const Error & ex)
{
- auto info = ex.info();
+ auto & info = ex.info();
sink
<< "Error"
<< info.level
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 84847835a..7da5b07fd 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -331,17 +331,9 @@ T readNum(Source & source)
unsigned char buf[8];
source((char *) buf, sizeof(buf));
- uint64_t n =
- ((uint64_t) buf[0]) |
- ((uint64_t) buf[1] << 8) |
- ((uint64_t) buf[2] << 16) |
- ((uint64_t) buf[3] << 24) |
- ((uint64_t) buf[4] << 32) |
- ((uint64_t) buf[5] << 40) |
- ((uint64_t) buf[6] << 48) |
- ((uint64_t) buf[7] << 56);
+ auto n = readLittleEndian(buf);
- if (n > (uint64_t)std::numeric_limits::max())
+ if (n > (uint64_t) std::numeric_limits::max())
throw SerialisationError("serialised integer %d is too large for type '%s'", n, typeid(T).name());
return (T) n;
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index 623b74bdd..993dc1cb6 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -2,6 +2,7 @@
#include "sync.hh"
#include "finally.hh"
#include "serialise.hh"
+#include "cgroup.hh"
#include
#include
@@ -36,7 +37,6 @@
#include
#include
-#include
#include
#endif
@@ -727,45 +727,22 @@ unsigned int getMaxCPU()
{
#if __linux__
try {
- FILE *fp = fopen("/proc/mounts", "r");
- if (!fp)
- return 0;
+ auto cgroupFS = getCgroupFS();
+ if (!cgroupFS) return 0;
- Strings cgPathParts;
+ auto cgroups = getCgroups("/proc/self/cgroup");
+ auto cgroup = cgroups[""];
+ if (cgroup == "") return 0;
- struct mntent *ent;
- while ((ent = getmntent(fp))) {
- std::string mountType, mountPath;
+ auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max";
- mountType = ent->mnt_type;
- mountPath = ent->mnt_dir;
-
- if (mountType == "cgroup2") {
- cgPathParts.push_back(mountPath);
- break;
- }
- }
-
- fclose(fp);
-
- if (cgPathParts.size() > 0 && pathExists("/proc/self/cgroup")) {
- std::string currentCgroup = readFile("/proc/self/cgroup");
- Strings cgValues = tokenizeString(currentCgroup, ":");
- cgPathParts.push_back(trim(cgValues.back(), "\n"));
- cgPathParts.push_back("cpu.max");
- std::string fullCgPath = canonPath(concatStringsSep("/", cgPathParts));
-
- if (pathExists(fullCgPath)) {
- std::string cpuMax = readFile(fullCgPath);
- std::vector cpuMaxParts = tokenizeString>(cpuMax, " ");
- std::string quota = cpuMaxParts[0];
- std::string period = trim(cpuMaxParts[1], "\n");
-
- if (quota != "max")
- return std::ceil(std::stoi(quota) / std::stof(period));
- }
- }
- } catch (Error &) { ignoreException(); }
+ auto cpuMax = readFile(cpuFile);
+ auto cpuMaxParts = tokenizeString>(cpuMax, " \n");
+ auto quota = cpuMaxParts[0];
+ auto period = cpuMaxParts[1];
+ if (quota != "max")
+ return std::ceil(std::stoi(quota) / std::stof(period));
+ } catch (Error &) { ignoreException(lvlDebug); }
#endif
return 0;
@@ -1427,7 +1404,7 @@ std::string shellEscape(const std::string_view s)
}
-void ignoreException()
+void ignoreException(Verbosity lvl)
{
/* Make sure no exceptions leave this function.
printError() also throws when remote is closed. */
@@ -1435,7 +1412,7 @@ void ignoreException()
try {
throw;
} catch (std::exception & e) {
- printError("error (ignored): %1%", e.what());
+ printMsg(lvl, "error (ignored): %1%", e.what());
}
} catch (...) { }
}
@@ -1617,6 +1594,21 @@ std::string stripIndentation(std::string_view s)
}
+std::pair getLine(std::string_view s)
+{
+ auto newline = s.find('\n');
+
+ if (newline == s.npos) {
+ return {s, ""};
+ } else {
+ auto line = s.substr(0, newline);
+ if (!line.empty() && line[line.size() - 1] == '\r')
+ line = line.substr(0, line.size() - 1);
+ return {line, s.substr(newline + 1)};
+ }
+}
+
+
//////////////////////////////////////////////////////////////////////
static Sync> windowSize{{0, 0}};
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index e5c678682..9b149de80 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -510,6 +510,18 @@ std::optional string2Float(const std::string_view s)
}
+/* Convert a little-endian integer to host order. */
+template
+T readLittleEndian(unsigned char * p)
+{
+ T x = 0;
+ for (size_t i = 0; i < sizeof(x); ++i, ++p) {
+ x |= ((T) *p) << (i * 8);
+ }
+ return x;
+}
+
+
/* Return true iff `s' starts with `prefix'. */
bool hasPrefix(std::string_view s, std::string_view prefix);
@@ -528,7 +540,7 @@ std::string shellEscape(const std::string_view s);
/* Exception handling in destructors: print an error message, then
ignore the exception. */
-void ignoreException();
+void ignoreException(Verbosity lvl = lvlError);
@@ -563,6 +575,12 @@ std::string base64Decode(std::string_view s);
std::string stripIndentation(std::string_view s);
+/* Get the prefix of 's' up to and excluding the next line break (LF
+ optionally preceded by CR), and the remainder following the line
+ break. */
+std::pair getLine(std::string_view s);
+
+
/* Get a value for the specified key from an associate container. */
template
const typename T::mapped_type * get(const T & map, const typename T::key_type & key)
@@ -737,4 +755,13 @@ inline std::string operator + (std::string && s, std::string_view s2)
return std::move(s);
}
+inline std::string operator + (std::string_view s1, const char * s2)
+{
+ std::string s;
+ s.reserve(s1.size() + strlen(s2));
+ s.append(s1);
+ s.append(s2);
+ return s;
+}
+
}
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index b59a6d026..3bbefedbe 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -808,14 +808,23 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
settings.maxLogSize = readNum(in);
if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
- settings.buildRepeat = readInt(in);
- settings.enforceDeterminism = readInt(in);
+ auto nrRepeats = readInt(in);
+ if (nrRepeats != 0) {
+ throw Error("client requested repeating builds, but this is not currently implemented");
+ }
+ // Ignore 'enforceDeterminism'. It used to be true by
+ // default, but also only never had any effect when
+ // `nrRepeats == 0`. We have already asserted that
+ // `nrRepeats` in fact is 0, so we can safely ignore this
+ // without doing something other than what the client
+ // asked for.
+ readInt(in);
+
settings.runDiffHook = true;
}
if (GET_PROTOCOL_MINOR(clientVersion) >= 7) {
settings.keepFailed = (bool) readInt(in);
}
- settings.printRepeatedBuilds = false;
};
while (true) {
@@ -926,7 +935,6 @@ static void opServe(Strings opFlags, Strings opArgs)
worker_proto::write(*store, out, status.builtOutputs);
}
-
break;
}
diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc
index 940923d3b..c527fdb0a 100644
--- a/src/nix/daemon.cc
+++ b/src/nix/daemon.cc
@@ -257,7 +257,7 @@ static void daemonLoop()
} catch (Interrupted & e) {
return;
} catch (Error & error) {
- ErrorInfo ei = error.info();
+ auto ei = error.info();
// FIXME: add to trace?
ei.msg = hintfmt("error processing connection: %1%", ei.msg.str());
logError(ei);
diff --git a/src/nix/flake-update.md b/src/nix/flake-update.md
index 2ee8a707d..8c6042d94 100644
--- a/src/nix/flake-update.md
+++ b/src/nix/flake-update.md
@@ -16,7 +16,7 @@ R""(
# Description
This command recreates the lock file of a flake (`flake.lock`), thus
-updating the lock for every mutable input (like `nixpkgs`) to its
+updating the lock for every unlocked input (like `nixpkgs`) to its
current version. This is equivalent to passing `--recreate-lock-file`
to any command that operates on a flake. That is,
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index fbeb137f4..95a9e1774 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -215,7 +215,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
if (!lockedFlake.lockFile.root->inputs.empty())
logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
- std::unordered_set> visited;
+ std::set> visited;
std::function recurse;
@@ -227,7 +227,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
if (auto lockedNode = std::get_if<0>(&input.second)) {
logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s",
prefix + (last ? treeLast : treeConn), input.first,
- *lockedNode ? (*lockedNode)->lockedRef : flake.lockedRef);
+ (*lockedNode)->lockedRef);
bool firstVisit = visited.insert(*lockedNode).second;
diff --git a/src/nix/flake.md b/src/nix/flake.md
index a1ab43281..810e9ebea 100644
--- a/src/nix/flake.md
+++ b/src/nix/flake.md
@@ -18,51 +18,56 @@ values such as packages or NixOS modules provided by the flake).
Flake references (*flakerefs*) are a way to specify the location of a
flake. These have two different forms:
-* An attribute set representation, e.g.
- ```nix
- {
- type = "github";
- owner = "NixOS";
- repo = "nixpkgs";
- }
- ```
+## Attribute set representation
- The only required attribute is `type`. The supported types are
- listed below.
+Example:
-* A URL-like syntax, e.g.
+```nix
+{
+ type = "github";
+ owner = "NixOS";
+ repo = "nixpkgs";
+}
+```
- ```
- github:NixOS/nixpkgs
- ```
+The only required attribute is `type`. The supported types are
+listed below.
- These are used on the command line as a more convenient alternative
- to the attribute set representation. For instance, in the command
+## URL-like syntax
- ```console
- # nix build github:NixOS/nixpkgs#hello
- ```
+Example:
- `github:NixOS/nixpkgs` is a flake reference (while `hello` is an
- output attribute). They are also allowed in the `inputs` attribute
- of a flake, e.g.
+```
+github:NixOS/nixpkgs
+```
- ```nix
- inputs.nixpkgs.url = github:NixOS/nixpkgs;
- ```
+These are used on the command line as a more convenient alternative
+to the attribute set representation. For instance, in the command
- is equivalent to
+```console
+# nix build github:NixOS/nixpkgs#hello
+```
- ```nix
- inputs.nixpkgs = {
- type = "github";
- owner = "NixOS";
- repo = "nixpkgs";
- };
- ```
+`github:NixOS/nixpkgs` is a flake reference (while `hello` is an
+output attribute). They are also allowed in the `inputs` attribute
+of a flake, e.g.
-## Examples
+```nix
+inputs.nixpkgs.url = github:NixOS/nixpkgs;
+```
+
+is equivalent to
+
+```nix
+inputs.nixpkgs = {
+ type = "github";
+ owner = "NixOS";
+ repo = "nixpkgs";
+};
+```
+
+### Examples
Here are some examples of flake references in their URL-like representation:
diff --git a/src/nix/nix.md b/src/nix/nix.md
index d48682a94..723d3c87e 100644
--- a/src/nix/nix.md
+++ b/src/nix/nix.md
@@ -164,6 +164,13 @@ operate are determined as follows:
…
```
+ and likewise, using a store path to a "drv" file to specify the derivation:
+
+ ```console
+ # nix build '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev,static'
+ …
+ ```
+
* You can also specify that *all* outputs should be used using the
syntax *installable*`^*`. For example, the following shows the size
of all outputs of the `glibc` package in the binary cache:
@@ -177,6 +184,12 @@ operate are determined as follows:
/nix/store/q6580lr01jpcsqs4r5arlh4ki2c1m9rv-glibc-2.33-123-dev 44200560
```
+ and likewise, using a store path to a "drv" file to specify the derivation:
+
+ ```console
+ # nix path-info -S '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*'
+ …
+ ```
* If you didn't specify the desired outputs, but the derivation has an
attribute `meta.outputsToInstall`, Nix will use those outputs. For
example, since the package `nixpkgs#libxml2` has this attribute:
@@ -189,6 +202,9 @@ operate are determined as follows:
a command like `nix shell nixpkgs#libxml2` will provide only those
two outputs by default.
+ Note that a store derivation (given by `.drv` file store path) doesn't have
+ any attributes like `meta`, and thus this case doesn't apply to it.
+
* Otherwise, Nix will use all outputs of the derivation.
# Nix stores
diff --git a/src/nix/profile-list.md b/src/nix/profile-list.md
index bdab9a208..fa786162f 100644
--- a/src/nix/profile-list.md
+++ b/src/nix/profile-list.md
@@ -20,11 +20,11 @@ following fields:
* An integer that can be used to unambiguously identify the package in
invocations of `nix profile remove` and `nix profile upgrade`.
-* The original ("mutable") flake reference and output attribute path
+* The original ("unlocked") flake reference and output attribute path
used at installation time.
-* The immutable flake reference to which the mutable flake reference
- was resolved.
+* The locked flake reference to which the unlocked flake reference was
+ resolved.
* The store path(s) of the package.
diff --git a/src/nix/profile-upgrade.md b/src/nix/profile-upgrade.md
index e06e74abe..39cca428b 100644
--- a/src/nix/profile-upgrade.md
+++ b/src/nix/profile-upgrade.md
@@ -2,7 +2,7 @@ R""(
# Examples
-* Upgrade all packages that were installed using a mutable flake
+* Upgrade all packages that were installed using an unlocked flake
reference:
```console
@@ -32,9 +32,9 @@ the package was installed.
> **Warning**
>
-> This only works if you used a *mutable* flake reference at
+> This only works if you used an *unlocked* flake reference at
> installation time, e.g. `nixpkgs#hello`. It does not work if you
-> used an *immutable* flake reference
+> used a *locked* flake reference
> (e.g. `github:NixOS/nixpkgs/13d0c311e3ae923a00f734b43fd1d35b47d8943a#hello`),
> since in that case the "latest version" is always the same.
diff --git a/src/nix/profile.md b/src/nix/profile.md
index be3c5ba1a..273e02280 100644
--- a/src/nix/profile.md
+++ b/src/nix/profile.md
@@ -88,8 +88,7 @@ has the following fields:
the user at the time of installation (e.g. `nixpkgs`). This is also
the flake reference that will be used by `nix profile upgrade`.
-* `uri`: The immutable flake reference to which `originalUrl`
- resolved.
+* `uri`: The locked flake reference to which `originalUrl` resolved.
* `attrPath`: The flake output attribute that provided this
package. Note that this is not necessarily the attribute that the
diff --git a/src/nix/registry.cc b/src/nix/registry.cc
index c496f94f8..b5bdfba95 100644
--- a/src/nix/registry.cc
+++ b/src/nix/registry.cc
@@ -183,14 +183,12 @@ struct CmdRegistryPin : RegistryCommand, EvalCommand
void run(nix::ref store) override
{
- if (locked.empty()) {
- locked = url;
- }
+ if (locked.empty()) locked = url;
auto registry = getRegistry();
auto ref = parseFlakeRef(url);
- auto locked_ref = parseFlakeRef(locked);
+ auto lockedRef = parseFlakeRef(locked);
registry->remove(ref.input);
- auto [tree, resolved] = locked_ref.resolve(store).input.fetch(store);
+ auto [tree, resolved] = lockedRef.resolve(store).input.fetch(store);
fetchers::Attrs extraAttrs;
if (ref.subdir != "") extraAttrs["dir"] = ref.subdir;
registry->add(ref.input, resolved, extraAttrs);
diff --git a/tests/build.sh b/tests/build.sh
index c7db039b4..036fb037e 100644
--- a/tests/build.sh
+++ b/tests/build.sh
@@ -8,13 +8,15 @@ set -o pipefail
nix build -f multiple-outputs.nix --json a b --no-link | jq --exit-status '
(.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and
- (.outputs | keys | length == 2) and
- (.outputs.first | match(".*multiple-outputs-a-first")) and
- (.outputs.second | match(".*multiple-outputs-a-second")))
+ (.outputs |
+ (keys | length == 2) and
+ (.first | match(".*multiple-outputs-a-first")) and
+ (.second | match(".*multiple-outputs-a-second"))))
and (.[1] |
(.drvPath | match(".*multiple-outputs-b.drv")) and
- (.outputs | keys | length == 1) and
- (.outputs.out | match(".*multiple-outputs-b")))
+ (.outputs |
+ (keys | length == 1) and
+ (.out | match(".*multiple-outputs-b"))))
'
# Test output selection using the '^' syntax.
@@ -56,6 +58,48 @@ nix build -f multiple-outputs.nix --json 'e^*' --no-link | jq --exit-status '
(.outputs | keys == ["a", "b", "c"]))
'
+# Test building from raw store path to drv not expression.
+
+drv=$(nix eval -f multiple-outputs.nix --raw a.drvPath)
+if nix build "$drv^not-an-output" --no-link --json; then
+ fail "'not-an-output' should fail to build"
+fi
+
+if nix build "$drv^" --no-link --json; then
+ fail "'empty outputs list' should fail to build"
+fi
+
+if nix build "$drv^*nope" --no-link --json; then
+ fail "'* must be entire string' should fail to build"
+fi
+
+nix build "$drv^first" --no-link --json | jq --exit-status '
+ (.[0] |
+ (.drvPath | match(".*multiple-outputs-a.drv")) and
+ (.outputs |
+ (keys | length == 1) and
+ (.first | match(".*multiple-outputs-a-first")) and
+ (has("second") | not)))
+'
+
+nix build "$drv^first,second" --no-link --json | jq --exit-status '
+ (.[0] |
+ (.drvPath | match(".*multiple-outputs-a.drv")) and
+ (.outputs |
+ (keys | length == 2) and
+ (.first | match(".*multiple-outputs-a-first")) and
+ (.second | match(".*multiple-outputs-a-second"))))
+'
+
+nix build "$drv^*" --no-link --json | jq --exit-status '
+ (.[0] |
+ (.drvPath | match(".*multiple-outputs-a.drv")) and
+ (.outputs |
+ (keys | length == 2) and
+ (.first | match(".*multiple-outputs-a-first")) and
+ (.second | match(".*multiple-outputs-a-second"))))
+'
+
# Make sure that `--impure` works (regression test for https://github.com/NixOS/nix/issues/6488)
nix build --impure -f multiple-outputs.nix --json e --no-link | jq --exit-status '
(.[0] |
diff --git a/tests/check.nix b/tests/check.nix
index ed91ff845..ddab8eea9 100644
--- a/tests/check.nix
+++ b/tests/check.nix
@@ -44,7 +44,7 @@ with import ./config.nix;
};
hashmismatch = import {
- url = "file://" + builtins.getEnv "TMPDIR" + "/dummy";
+ url = "file://" + builtins.getEnv "TEST_ROOT" + "/dummy";
sha256 = "0mdqa9w1p6cmli6976v4wi0sw9r4p5prkj7lzfd1877wk11c9c73";
};
diff --git a/tests/check.sh b/tests/check.sh
index 495202781..e77c0405d 100644
--- a/tests/check.sh
+++ b/tests/check.sh
@@ -40,14 +40,6 @@ nix-build check.nix -A deterministic --argstr checkBuildId $checkBuildId \
if grep -q 'may not be deterministic' $TEST_ROOT/log; then false; fi
checkBuildTempDirRemoved $TEST_ROOT/log
-nix build -f check.nix deterministic --rebuild --repeat 1 \
- --argstr checkBuildId $checkBuildId --keep-failed --no-link \
- 2> $TEST_ROOT/log
-if grep -q 'checking is not possible' $TEST_ROOT/log; then false; fi
-# Repeat is set to 1, ie. nix should build deterministic twice.
-if [ "$(grep "checking outputs" $TEST_ROOT/log | wc -l)" -ne 2 ]; then false; fi
-checkBuildTempDirRemoved $TEST_ROOT/log
-
nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \
--no-out-link 2> $TEST_ROOT/log
checkBuildTempDirRemoved $TEST_ROOT/log
@@ -58,12 +50,6 @@ grep 'may not be deterministic' $TEST_ROOT/log
[ "$status" = "104" ]
checkBuildTempDirRemoved $TEST_ROOT/log
-nix build -f check.nix nondeterministic --rebuild --repeat 1 \
- --argstr checkBuildId $checkBuildId --keep-failed --no-link \
- 2> $TEST_ROOT/log || status=$?
-grep 'may not be deterministic' $TEST_ROOT/log
-checkBuildTempDirRemoved $TEST_ROOT/log
-
nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \
--no-out-link --check --keep-failed 2> $TEST_ROOT/log || status=$?
grep 'may not be deterministic' $TEST_ROOT/log
@@ -72,12 +58,6 @@ if checkBuildTempDirRemoved $TEST_ROOT/log; then false; fi
clearStore
-nix-build dependencies.nix --no-out-link --repeat 3
-
-nix-build check.nix -A nondeterministic --no-out-link --repeat 1 2> $TEST_ROOT/log || status=$?
-[ "$status" = "1" ]
-grep 'differs from previous round' $TEST_ROOT/log
-
path=$(nix-build check.nix -A fetchurl --no-out-link)
chmod +w $path
@@ -91,13 +71,13 @@ nix-build check.nix -A fetchurl --no-out-link --check
nix-build check.nix -A fetchurl --no-out-link --repair
[[ $(cat $path) != foo ]]
-echo 'Hello World' > $TMPDIR/dummy
+echo 'Hello World' > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link || status=$?
[ "$status" = "102" ]
-echo -n > $TMPDIR/dummy
+echo -n > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link
-echo 'Hello World' > $TMPDIR/dummy
+echo 'Hello World' > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link --check || status=$?
[ "$status" = "102" ]
diff --git a/tests/eval.sh b/tests/eval.sh
index d74976019..ffae08a6a 100644
--- a/tests/eval.sh
+++ b/tests/eval.sh
@@ -29,3 +29,7 @@ nix-instantiate --eval -E 'assert 1 + 2 == 3; true'
[[ $(nix-instantiate -A attr --eval "./eval.nix") == '{ foo = "bar"; }' ]]
[[ $(nix-instantiate -A attr --eval --json "./eval.nix") == '{"foo":"bar"}' ]]
[[ $(nix-instantiate -A int --eval - < "./eval.nix") == 123 ]]
+
+# Check that symlink cycles don't cause a hang.
+ln -sfn cycle.nix $TEST_ROOT/cycle.nix
+(! nix eval --file $TEST_ROOT/cycle.nix)
diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh
index 4ceba0293..da09c3f37 100644
--- a/tests/fetchGit.sh
+++ b/tests/fetchGit.sh
@@ -122,6 +122,7 @@ git -C $repo commit -m 'Bla3' -a
path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchGit file://$repo).outPath")
[[ $path2 = $path4 ]]
+status=0
nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-B5yIPHhEm0eysJKEsO7nqxprh9vcblFxpJG11gXJus1=\"; }).outPath" || status=$?
[[ "$status" = "102" ]]
diff --git a/tests/flakes/absolute-paths.sh b/tests/flakes/absolute-paths.sh
new file mode 100644
index 000000000..e7bfba12d
--- /dev/null
+++ b/tests/flakes/absolute-paths.sh
@@ -0,0 +1,17 @@
+source ./common.sh
+
+requireGit
+
+flake1Dir=$TEST_ROOT/flake1
+flake2Dir=$TEST_ROOT/flake2
+
+createGitRepo $flake1Dir
+cat > $flake1Dir/flake.nix < $flake3Dir/flake.nix < $flake3Dir/default.nix < $nonFlakeDir/README.md < $badFlakeDir/flake.nix
@@ -468,3 +485,9 @@ nix store delete $(nix store add-path $badFlakeDir)
[[ $(nix path-info $(nix store add-path $flake1Dir)) =~ flake1 ]]
[[ $(nix path-info path:$(nix store add-path $flake1Dir)) =~ simple ]]
+
+# Test fetching flakerefs in the legacy CLI.
+[[ $(nix-instantiate --eval flake:flake3 -A x) = 123 ]]
+[[ $(nix-instantiate --eval flake:git+file://$flake3Dir -A x) = 123 ]]
+[[ $(nix-instantiate -I flake3=flake:flake3 --eval '' -A x) = 123 ]]
+[[ $(NIX_PATH=flake3=flake:flake3 nix-instantiate --eval '' -A x) = 123 ]]
diff --git a/tests/flakes/unlocked-override.sh b/tests/flakes/unlocked-override.sh
new file mode 100644
index 000000000..8abc8b7d3
--- /dev/null
+++ b/tests/flakes/unlocked-override.sh
@@ -0,0 +1,30 @@
+source ./common.sh
+
+requireGit
+
+flake1Dir=$TEST_ROOT/flake1
+flake2Dir=$TEST_ROOT/flake2
+
+createGitRepo $flake1Dir
+cat > $flake1Dir/flake.nix < $flake1Dir/x.nix
+git -C $flake1Dir add flake.nix x.nix
+git -C $flake1Dir commit -m Initial
+
+createGitRepo $flake2Dir
+cat > $flake2Dir/flake.nix < $flake1Dir/x.nix
+
+[[ $(nix eval --json $flake2Dir#x --override-input flake1 $TEST_ROOT/flake1) = 456 ]]
diff --git a/tests/function-trace.sh b/tests/function-trace.sh
index 0b7f49d82..d68e10df5 100755
--- a/tests/function-trace.sh
+++ b/tests/function-trace.sh
@@ -11,7 +11,7 @@ expect_trace() {
--expr "$expr" 2>&1 \
| grep "function-trace" \
| sed -e 's/ [0-9]*$//'
- );
+ )
echo -n "Tracing expression '$expr'"
set +e
diff --git a/tests/impure-derivations.sh b/tests/impure-derivations.sh
index 7ca9ce742..23a193833 100644
--- a/tests/impure-derivations.sh
+++ b/tests/impure-derivations.sh
@@ -12,6 +12,7 @@ clearStore
# Basic test of impure derivations: building one a second time should not use the previous result.
printf 0 > $TEST_ROOT/counter
+nix build --dry-run --json --file ./impure-derivations.nix impure.all
json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all)
path1=$(echo $json | jq -r .[].outputs.out)
path1_stuff=$(echo $json | jq -r .[].outputs.stuff)
diff --git a/tests/lang.sh b/tests/lang.sh
index c0b0fc58c..95e795e2e 100644
--- a/tests/lang.sh
+++ b/tests/lang.sh
@@ -2,6 +2,7 @@ source common.sh
export TEST_VAR=foo # for eval-okay-getenv.nix
export NIX_REMOTE=dummy://
+export NIX_STORE_DIR=/nix/store
nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>&1 | grep -q Hello
nix-instantiate --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1
@@ -50,10 +51,10 @@ for i in lang/eval-okay-*.nix; do
if test -e lang/$i.flags; then
flags=$(cat lang/$i.flags)
fi
- if ! expect 0 env NIX_PATH=lang/dir3:lang/dir4 nix-instantiate $flags --eval --strict lang/$i.nix > lang/$i.out; then
+ if ! expect 0 env NIX_PATH=lang/dir3:lang/dir4 HOME=/fake-home nix-instantiate $flags --eval --strict lang/$i.nix > lang/$i.out; then
echo "FAIL: $i should evaluate"
fail=1
- elif ! diff lang/$i.out lang/$i.exp; then
+ elif ! diff <(< lang/$i.out sed -e "s|$(pwd)|/pwd|g") lang/$i.exp; then
echo "FAIL: evaluation result of $i not as expected"
fail=1
fi
diff --git a/tests/lang/eval-okay-closure.exp b/tests/lang/eval-okay-closure.exp
new file mode 100644
index 000000000..e7dbf9781
--- /dev/null
+++ b/tests/lang/eval-okay-closure.exp
@@ -0,0 +1 @@
+[ { foo = true; key = -13; } { foo = true; key = -12; } { foo = true; key = -11; } { foo = true; key = -9; } { foo = true; key = -8; } { foo = true; key = -7; } { foo = true; key = -5; } { foo = true; key = -4; } { foo = true; key = -3; } { key = -1; } { foo = true; key = 0; } { foo = true; key = 1; } { foo = true; key = 2; } { foo = true; key = 4; } { foo = true; key = 5; } { foo = true; key = 6; } { key = 8; } { foo = true; key = 9; } { foo = true; key = 10; } { foo = true; key = 13; } { foo = true; key = 14; } { foo = true; key = 15; } { key = 17; } { foo = true; key = 18; } { foo = true; key = 19; } { foo = true; key = 22; } { foo = true; key = 23; } { key = 26; } { foo = true; key = 27; } { foo = true; key = 28; } { foo = true; key = 31; } { foo = true; key = 32; } { key = 35; } { foo = true; key = 36; } { foo = true; key = 40; } { foo = true; key = 41; } { key = 44; } { foo = true; key = 45; } { foo = true; key = 49; } { key = 53; } { foo = true; key = 54; } { foo = true; key = 58; } { key = 62; } { foo = true; key = 67; } { key = 71; } { key = 80; } ]
diff --git a/tests/lang/eval-okay-functionargs.exp b/tests/lang/eval-okay-functionargs.exp
new file mode 100644
index 000000000..c1c9f8ffa
--- /dev/null
+++ b/tests/lang/eval-okay-functionargs.exp
@@ -0,0 +1 @@
+[ "stdenv" "fetchurl" "aterm-stdenv" "aterm-stdenv2" "libX11" "libXv" "mplayer-stdenv2.libXv-libX11" "mplayer-stdenv2.libXv-libX11_2" "nix-stdenv-aterm-stdenv" "nix-stdenv2-aterm2-stdenv2" ]
diff --git a/tests/lang/eval-okay-path-antiquotation.exp b/tests/lang/eval-okay-path-antiquotation.exp
new file mode 100644
index 000000000..5b8ea0243
--- /dev/null
+++ b/tests/lang/eval-okay-path-antiquotation.exp
@@ -0,0 +1 @@
+{ absolute = /foo; expr = /pwd/lang/foo/bar; home = /fake-home/foo; notfirst = /pwd/lang/bar/foo; simple = /pwd/lang/foo; slashes = /foo/bar; surrounded = /pwd/lang/a-foo-b; }
diff --git a/tests/lang/eval-okay-path.exp b/tests/lang/eval-okay-path.exp
new file mode 100644
index 000000000..3ce7f8283
--- /dev/null
+++ b/tests/lang/eval-okay-path.exp
@@ -0,0 +1 @@
+"/nix/store/ya937r4ydw0l6kayq8jkyqaips9c75jm-output"
diff --git a/tests/local.mk b/tests/local.mk
index 027f4780c..911d1458d 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -7,6 +7,8 @@ nix_tests = \
flakes/follow-paths.sh \
flakes/bundle.sh \
flakes/check.sh \
+ flakes/unlocked-override.sh \
+ flakes/absolute-paths.sh \
ca/gc.sh \
gc.sh \
remote-store.sh \
@@ -123,7 +125,8 @@ nix_tests = \
fetchClosure.sh \
completions.sh \
impure-derivations.sh \
- path-from-hash-part.sh
+ path-from-hash-part.sh \
+ toString-path.sh
ifeq ($(HAVE_LIBCPUID), 1)
nix_tests += compute-levels.sh
diff --git a/tests/nix_path.sh b/tests/nix_path.sh
index d3657abf0..2b222b4a1 100644
--- a/tests/nix_path.sh
+++ b/tests/nix_path.sh
@@ -9,3 +9,6 @@ nix-instantiate --eval -E '' --restrict-eval
# Should ideally also test this, but there’s no pure way to do it, so just trust me that it works
# nix-instantiate --eval -E '' -I nixpkgs=channel:nixos-unstable --restrict-eval
+
+[[ $(nix-instantiate --find-file by-absolute-path/simple.nix) = $PWD/simple.nix ]]
+[[ $(nix-instantiate --find-file by-relative-path/simple.nix) = $PWD/simple.nix ]]
diff --git a/tests/restricted.sh b/tests/restricted.sh
index 242b901dd..9bd16cf51 100644
--- a/tests/restricted.sh
+++ b/tests/restricted.sh
@@ -3,7 +3,7 @@ source common.sh
clearStore
nix-instantiate --restrict-eval --eval -E '1 + 2'
-(! nix-instantiate --restrict-eval ./restricted.nix)
+(! nix-instantiate --eval --restrict-eval ./restricted.nix)
(! nix-instantiate --eval --restrict-eval <(echo '1 + 2'))
nix-instantiate --restrict-eval ./simple.nix -I src=.
nix-instantiate --restrict-eval ./simple.nix -I src1=simple.nix -I src2=config.nix -I src3=./simple.builder.sh
diff --git a/tests/toString-path.sh b/tests/toString-path.sh
new file mode 100644
index 000000000..07eb87465
--- /dev/null
+++ b/tests/toString-path.sh
@@ -0,0 +1,8 @@
+source common.sh
+
+mkdir -p $TEST_ROOT/foo
+echo bla > $TEST_ROOT/foo/bar
+
+[[ $(nix eval --raw --impure --expr "builtins.readFile (builtins.toString (builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; } + \"/bar\"))") = bla ]]
+
+[[ $(nix eval --json --impure --expr "builtins.readDir (builtins.toString (builtins.fetchTree { type = \"path\"; path = \"$TEST_ROOT/foo\"; }))") = '{"bar":"regular"}' ]]