Merge remote-tracking branch 'nixos/master'

This commit is contained in:
Max Headroom 2022-12-17 13:36:27 +01:00
commit 74ecb58b74
114 changed files with 1155 additions and 754 deletions

15
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1,15 @@
# Pull requests concerning the listed files will automatically invite the respective maintainers as reviewers.
# This file is not used for denoting any kind of ownership, but is merely a tool for handling notifications.
#
# Merge permissions are required for maintaining an entry in this file.
# For documentation on this mechanism, see https://help.github.com/articles/about-codeowners/
# Default reviewers if nothing else matches
* @edolstra @thufschmitt
# This file
.github/CODEOWNERS @edolstra
# Public documentation
/doc @fricklerhandwerk
*.md @fricklerhandwerk

View file

@ -30,3 +30,7 @@ A clear and concise description of what you expected to happen.
**Additional context** **Additional context**
Add any other context about the problem here. Add any other context about the problem here.
**Priorities**
Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).

View file

@ -18,3 +18,7 @@ A clear and concise description of any alternative solutions or features you've
**Additional context** **Additional context**
Add any other context or screenshots about the feature request here. Add any other context or screenshots about the feature request here.
**Priorities**
Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).

36
.github/ISSUE_TEMPLATE/installer.md vendored Normal file
View file

@ -0,0 +1,36 @@
---
name: Installer issue
about: Report problems with installation
title: ''
labels: installer
assignees: ''
---
## Platform
<!-- select the platform on which you tried to install Nix -->
- [ ] Linux: <!-- state your distribution, e.g. Arch Linux, Ubuntu, ... -->
- [ ] macOS
- [ ] WSL
## Additional information
<!-- state special circumstances on your system or additional steps you have taken prior to installation -->
## Output
<details><summary>Output</summary>
```log
<!-- paste console output here and remove this comment -->
```
</details>
## Priorities
Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).

View file

@ -26,3 +26,6 @@ assignees: ''
<!-- propose a solution --> <!-- propose a solution -->
## Priorities
Add :+1: to [issues you find important](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc).

View file

@ -5,3 +5,7 @@ Please include relevant [release notes](https://github.com/NixOS/nix/blob/master
**Testing** **Testing**
If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master). If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master).
**Priorities**
Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc).

View file

@ -1,5 +0,0 @@
---
# This files is used by https://github.com/marketplace/actions/auto-assign-reviewer-by-files
# to assign maintainers
"doc/**/*":
- fricklerhandwerk

View file

@ -1,12 +0,0 @@
name: "Auto Assign"
on:
- pull_request
jobs:
assign_reviewer:
runs-on: ubuntu-latest
steps:
- uses: shufo/auto-assign-reviewer-by-files@v1.1.4
with:
config: ".github/assign-by-files.yml"
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1 +1 @@
2.12.0 2.13.0

View file

@ -177,7 +177,7 @@ fi
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
# Checks for libarchive # Look for libarchive.
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"]) PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
# Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed # Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
if test "$shared" != yes; then if test "$shared" != yes; then

View file

@ -1,29 +1,41 @@
with builtins; let
with import ./utils.nix; inherit (builtins) attrNames concatStringsSep isAttrs isBool;
inherit (import ./utils.nix) concatStrings squash splitLines;
in
options: optionsInfo:
let
showOption = name:
let
inherit (optionsInfo.${name}) description documentDefault defaultValue aliases;
result = squash ''
- <span id="conf-${name}">[`${name}`](#conf-${name})</span>
concatStrings (map ${indent " " body}
(name: '';
let option = options.${name}; in # separate body to cleanly handle indentation
" - [`${name}`](#conf-${name})" body = ''
+ "<p id=\"conf-${name}\"></p>\n\n" ${description}
+ concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n"
+ (if option.documentDefault **Default:** ${showDefault documentDefault defaultValue}
then " **Default:** " + (
if option.defaultValue == "" || option.defaultValue == [] ${showAliases aliases}
then "*empty*" '';
else if isBool option.defaultValue showDefault = documentDefault: defaultValue:
then (if option.defaultValue then "`true`" else "`false`") if documentDefault then
else # a StringMap value type is specified as a string, but
# n.b. a StringMap value type is specified as a string, but # this shows the value type. The empty stringmap is `null` in
# this shows the value type. The empty stringmap is "null" in # JSON, but that converts to `{ }` here.
# JSON, but that converts to "{ }" here. if defaultValue == "" || defaultValue == [] || isAttrs defaultValue
(if isAttrs option.defaultValue then "`\"\"`" then "*empty*"
else "`" + toString option.defaultValue + "`")) + "\n\n" else if isBool defaultValue then
else " **Default:** *machine-specific*\n") if defaultValue then "`true`" else "`false`"
+ (if option.aliases != [] else "`${toString defaultValue}`"
then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n" else "*machine-specific*";
else "") showAliases = aliases:
) if aliases == [] then "" else
(attrNames options)) "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}";
indent = prefix: s:
concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s));
in result;
in concatStrings (map showOption (attrNames optionsInfo))

View file

@ -29,19 +29,19 @@ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -
$(d)/%.1: $(d)/src/command-ref/%.md $(d)/%.1: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp @printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
@cat $^ >> $^.tmp @cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man -M section=1 $^.tmp -o $@ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@
@rm $^.tmp @rm $^.tmp
$(d)/%.8: $(d)/src/command-ref/%.md $(d)/%.8: $(d)/src/command-ref/%.md
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp @printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
@cat $^ >> $^.tmp @cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man -M section=8 $^.tmp -o $@ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=8 $^.tmp -o $@
@rm $^.tmp @rm $^.tmp
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md $(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp @printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
@cat $^ >> $^.tmp @cat $^ >> $^.tmp
$(trace-gen) lowdown -sT man -M section=5 $^.tmp -o $@ $(trace-gen) lowdown -sT man --nroff-nolinks -M section=5 $^.tmp -o $@
@rm $^.tmp @rm $^.tmp
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli

View file

@ -35,7 +35,6 @@ const redirects = {
"conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs", "conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs",
"conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size", "conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size",
"conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time", "conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time",
"conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat",
"conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout", "conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout",
"conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot", "conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot",
"conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox", "conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox",
@ -47,7 +46,6 @@ const redirects = {
"conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout", "conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout",
"conf-cores": "command-ref/conf-file.html#conf-cores", "conf-cores": "command-ref/conf-file.html#conf-cores",
"conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook", "conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook",
"conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism",
"conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations", "conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations",
"conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches", "conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches",
"conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms", "conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms",
@ -74,7 +72,6 @@ const redirects = {
"conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files", "conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files",
"conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook", "conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook",
"conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook", "conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook",
"conf-repeat": "command-ref/conf-file.html#conf-repeat",
"conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs", "conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs",
"conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval", "conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval",
"conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook", "conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook",

View file

@ -65,6 +65,7 @@
- [CLI guideline](contributing/cli-guideline.md) - [CLI guideline](contributing/cli-guideline.md)
- [Release Notes](release-notes/release-notes.md) - [Release Notes](release-notes/release-notes.md)
- [Release X.Y (202?-??-??)](release-notes/rl-next.md) - [Release X.Y (202?-??-??)](release-notes/rl-next.md)
- [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md)
- [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md) - [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md)
- [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md) - [Release 2.10 (2022-07-11)](release-notes/rl-2.10.md)
- [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md) - [Release 2.9 (2022-05-30)](release-notes/rl-2.9.md)

View file

@ -121,37 +121,3 @@ error:
are not valid, so checking is not possible are not valid, so checking is not possible
Run the build without `--check`, and then try with `--check` again. Run the build without `--check`, and then try with `--check` again.
# Automatic and Optionally Enforced Determinism Verification
Automatically verify every build at build time by executing the build
multiple times.
Setting `repeat` and `enforce-determinism` in your `nix.conf` permits
the automated verification of every build Nix performs.
The following configuration will run each build three times, and will
require the build to be deterministic:
enforce-determinism = true
repeat = 2
Setting `enforce-determinism` to false as in the following
configuration will run the build multiple times, execute the build
hook, but will allow the build to succeed even if it does not build
reproducibly:
enforce-determinism = false
repeat = 1
An example output of this configuration:
```console
$ nix-build ./test.nix -A unstable
this derivation will be built:
/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv
building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 1/2)...
building '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' (round 2/2)...
output '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable' of '/nix/store/ch6llwpr2h8c3jmnf3f2ghkhx59aa97f-unstable.drv' differs from '/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable.check' from previous round
/nix/store/6xg356v9gl03hpbbg8gws77n19qanh02-unstable
```

View file

@ -33,12 +33,17 @@ distribute the public key for verifying the authenticity of the paths.
example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
``` ```
Then, add the public key and the cache URL to your `nix.conf`'s Then update [`nix.conf`](../command-ref/conf-file.md) on any machine that will access the cache.
`trusted-public-keys` and `substituters` options: Add the cache URL to [`substituters`](../command-ref/conf-file.md#conf-substituters) and the public key to [`trusted-public-keys`](../command-ref/conf-file.md#conf-trusted-public-keys):
substituters = https://cache.nixos.org/ s3://example-nix-cache substituters = https://cache.nixos.org/ s3://example-nix-cache
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM= trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
Machines that build for the cache must sign derivations using the private key.
On those machines, add the path to the key file to the [`secret-key-files`](../command-ref/conf-file.md#conf-secret-key-files) field in their [`nix.conf`](../command-ref/conf-file.md):
secret-key-files = /etc/nix/key.private
We will restart the Nix daemon in a later step. We will restart the Nix daemon in a later step.
# Implementing the build hook # Implementing the build hook
@ -52,14 +57,12 @@ set -eu
set -f # disable globbing set -f # disable globbing
export IFS=' ' export IFS=' '
echo "Signing paths" $OUT_PATHS
nix store sign --key-file /etc/nix/key.private $OUT_PATHS
echo "Uploading paths" $OUT_PATHS echo "Uploading paths" $OUT_PATHS
exec nix copy --to 's3://example-nix-cache' $OUT_PATHS exec nix copy --to "s3://example-nix-cache" $OUT_PATHS
``` ```
> **Note** > **Note**
> >
> The `$OUT_PATHS` variable is a space-separated list of Nix store > The `$OUT_PATHS` variable is a space-separated list of Nix store
> paths. In this case, we expect and want the shell to perform word > paths. In this case, we expect and want the shell to perform word
> splitting to make each output path its own argument to `nix > splitting to make each output path its own argument to `nix

View file

@ -7,42 +7,11 @@ Most Nix commands interpret the following environment variables:
`nix-shell`. It can have the values `pure` or `impure`. `nix-shell`. It can have the values `pure` or `impure`.
- [`NIX_PATH`]{#env-NIX_PATH}\ - [`NIX_PATH`]{#env-NIX_PATH}\
A colon-separated list of directories used to look up Nix A colon-separated list of directories used to look up the location of Nix
expressions enclosed in angle brackets (i.e., `<path>`). For expressions using [paths](../language/values.md#type-path)
instance, the value enclosed in angle brackets (i.e., `<path>`),
e.g. `/home/eelco/Dev:/etc/nixos`. It can be extended using the
/home/eelco/Dev:/etc/nixos [`-I` option](./opt-common#opt-I).
will cause Nix to look for paths relative to `/home/eelco/Dev` and
`/etc/nixos`, in this order. It is also possible to match paths
against a prefix. For example, the value
nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos
will cause Nix to search for `<nixpkgs/path>` in
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
If a path in the Nix search path starts with `http://` or
`https://`, it is interpreted as the URL of a tarball that will be
downloaded and unpacked to a temporary location. The tarball must
consist of a single top-level directory. For example, setting
`NIX_PATH` to
nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
tells Nix to download and use the current contents of the
`master` branch in the `nixpkgs` repository.
The URLs of the tarballs from the official nixos.org channels (see
[the manual for `nix-channel`](nix-channel.md)) can be abbreviated
as `channel:<channel-name>`. For instance, the following two
values of `NIX_PATH` are equivalent:
nixpkgs=channel:nixos-21.05
nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
The Nix search path can also be extended using the `-I` option to
many Nix commands, which takes precedence over `NIX_PATH`.
- [`NIX_IGNORE_SYMLINK_STORE`]{#env-NIX_IGNORE_SYMLINK_STORE}\ - [`NIX_IGNORE_SYMLINK_STORE`]{#env-NIX_IGNORE_SYMLINK_STORE}\
Normally, the Nix store directory (typically `/nix/store`) is not Normally, the Nix store directory (typically `/nix/store`) is not

View file

@ -53,16 +53,18 @@ All options not listed here are passed to `nix-store
--realise`, except for `--arg` and `--attr` / `-A` which are passed to --realise`, except for `--arg` and `--attr` / `-A` which are passed to
`nix-instantiate`. `nix-instantiate`.
- [`--no-out-link`]{#opt-no-out-link}\ - <span id="opt-no-out-link">[`--no-out-link`](#opt-no-out-link)<span>
Do not create a symlink to the output path. Note that as a result Do not create a symlink to the output path. Note that as a result
the output does not become a root of the garbage collector, and so the output does not become a root of the garbage collector, and so
might be deleted by `nix-store might be deleted by `nix-store --gc`.
--gc`.
- <span id="opt-dry-run">[`--dry-run`](#opt-dry-run)</span>
- [`--dry-run`]{#opt-dry-run}\
Show what store paths would be built or downloaded. Show what store paths would be built or downloaded.
- [`--out-link`]{#opt-out-link} / `-o` *outlink*\ - <span id="opt-out-link">[`--out-link`](#opt-out-link)</span> / `-o` *outlink*
Change the name of the symlink to the output path created from Change the name of the symlink to the output path created from
`result` to *outlink*. `result` to *outlink*.

View file

@ -22,7 +22,8 @@ This section lists the options that are common to all operations. These
options are allowed for every subcommand, though they may not always options are allowed for every subcommand, though they may not always
have an effect. have an effect.
- [`--add-root`]{#opt-add-root} *path*\ - <span id="opt-add-root">[`--add-root`](#opt-add-root)</span> *path*
Causes the result of a realisation (`--realise` and Causes the result of a realisation (`--realise` and
`--force-realise`) to be registered as a root of the garbage `--force-realise`) to be registered as a root of the garbage
collector. *path* will be created as a symlink to the resulting collector. *path* will be created as a symlink to the resulting
@ -104,10 +105,6 @@ The following flags are available:
previous build, the new output path is left in previous build, the new output path is left in
`/nix/store/name.check.` `/nix/store/name.check.`
See also the `build-repeat` configuration option, which repeats a
derivation a number of times and prevents its outputs from being
registered as “valid” in the Nix store unless they are identical.
Special exit codes: Special exit codes:
- `100`\ - `100`\

View file

@ -88,6 +88,29 @@ extension. The installer will also create `/etc/profile.d/nix.sh`.
### Linux ### Linux
If you are on Linux with systemd:
1. Remove the Nix daemon service:
```console
sudo systemctl stop nix-daemon.service
sudo systemctl disable nix-daemon.socket nix-daemon.service
sudo systemctl daemon-reload
```
1. Remove systemd service files:
```console
sudo rm /etc/systemd/system/nix-daemon.service /etc/systemd/system/nix-daemon.socket
```
1. The installer script uses systemd-tmpfiles to create the socket directory.
You may also want to remove the configuration for that:
```console
sudo rm /etc/tmpfiles.d/nix-daemon.conf
```
Remove files created by Nix: Remove files created by Nix:
```console ```console
@ -103,16 +126,6 @@ done
sudo groupdel 30000 sudo groupdel 30000
``` ```
If you are on Linux with systemd, remove the Nix daemon service:
```console
sudo systemctl stop nix-daemon.socket
sudo systemctl stop nix-daemon.service
sudo systemctl disable nix-daemon.socket
sudo systemctl disable nix-daemon.service
sudo systemctl daemon-reload
```
There may also be references to Nix in There may also be references to Nix in
- `/etc/profile` - `/etc/profile`

View file

@ -0,0 +1,43 @@
# Release 2.12 (2022-12-06)
* On Linux, Nix can now run builds in a user namespace where they run
as root (UID 0) and have 65,536 UIDs available.
<!-- FIXME: move this to its own section about system features -->
This is primarily useful for running containers such as `systemd-nspawn`
inside a Nix build. For an example, see [`tests/systemd-nspawn/nix`][nspawn].
[nspawn]: https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
A build can enable this by setting the derivation attribute:
```
requiredSystemFeatures = [ "uid-range" ];
```
The `uid-range` [system feature] requires the [`auto-allocate-uids`]
setting to be enabled.
[system feature]: ../command-ref/conf-file.md#conf-system-features
* Nix can now automatically pick UIDs for builds, removing the need to
create `nixbld*` user accounts. See [`auto-allocate-uids`].
[`auto-allocate-uids`]: ../command-ref/conf-file.md#conf-auto-allocate-uids
* On Linux, Nix has experimental support for running builds inside a
cgroup. See
[`use-cgroups`](../command-ref/conf-file.md#conf-use-cgroups).
* `<nix/fetchurl.nix>` now accepts an additional argument `impure` which
defaults to `false`. If it is set to `true`, the `hash` and `sha256`
arguments will be ignored and the resulting derivation will have
`__impure` set to `true`, making it an impure derivation.
* If `builtins.readFile` is called on a file with context, then only
the parts of the context that appear in the content of the file are
retained. This avoids a lot of spurious errors where strings end up
having a context just because they are read from a store path
([#7260](https://github.com/NixOS/nix/pull/7260)).
* `nix build --json` now prints some statistics about top-level
derivations, such as CPU statistics when cgroups are enabled.

View file

@ -1,54 +1,27 @@
# Release X.Y (202?-??-??) # Release X.Y (202?-??-??)
* `<nix/fetchurl.nix>` now accepts an additional argument `impure` which * The `repeat` and `enforce-determinism` options have been removed
defaults to `false`. If it is set to `true`, the `hash` and `sha256` since they had been broken under many circumstances for a long time.
arguments will be ignored and the resulting derivation will have
`__impure` set to `true`, making it an impure derivation.
* If `builtins.readFile` is called on a file with context, then only the parts * You can now use [flake references] in the [old command line interface], e.g.
of that context that appear in the content of the file are retained.
This avoids a lot of spurious errors where some benign strings end-up having
a context just because they are read from a store path
([#7260](https://github.com/NixOS/nix/pull/7260)).
* Nix can now automatically pick UIDs for builds, removing the need to [flake references]: ../command-ref/new-cli/nix3-flake.md#flake-references
create `nixbld*` user accounts. These UIDs are allocated starting at [old command line interface]: ../command-ref/main-commands.md
872415232 (0x34000000) on Linux and 56930 on macOS.
This is an experimental feature. To enable it, add the following to
`nix.conf`:
``` ```
extra-experimental-features = auto-allocate-uids # nix-build flake:nixpkgs -A hello
auto-allocate-uids = true # nix-build -I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05 \
'<nixpkgs>' -A hello
# NIX_PATH=nixpkgs=flake:nixpkgs nix-build '<nixpkgs>' -A hello
``` ```
* On Linux, Nix can now run builds in a user namespace where the build * Allow explicitly selecting outputs in a store derivation installable, just like we can do with other sorts of installables.
runs as root (UID 0) and has 65,536 UIDs available. This is For example,
primarily useful for running containers such as `systemd-nspawn` ```shell-session
inside a Nix build. For an example, see $ nix-build /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev`
https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
A build can enable this by requiring the `uid-range` system feature,
i.e. by setting the derivation attribute
``` ```
requiredSystemFeatures = [ "uid-range" ]; now works just as
```shell-session
$ nix-build glibc^dev`
``` ```
does already.
The `uid-range` system feature requires the `auto-allocate-uids`
setting to be enabled (see above).
* On Linux, Nix has experimental support for running builds inside a
cgroup. It can be enabled by adding
```
extra-experimental-features = cgroups
use-cgroups = true
```
to `nix.conf`. Cgroups are required for derivations that require the
`uid-range` system feature.
* `nix build --json` now prints some statistics about top-level
derivations, such as CPU statistics when cgroups are enabled.

View file

@ -9,14 +9,14 @@
let let
version = builtins.readFile ./.version + versionSuffix; officialRelease = false;
version = nixpkgs.lib.fileContents ./.version + versionSuffix;
versionSuffix = versionSuffix =
if officialRelease if officialRelease
then "" then ""
else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}_${self.shortRev or "dirty"}"; else "pre${builtins.substring 0 8 (self.lastModifiedDate or self.lastModified or "19700101")}_${self.shortRev or "dirty"}";
officialRelease = false;
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ]; linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
linuxSystems = linux64BitSystems ++ [ "i686-linux" ]; linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ]; systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
@ -420,6 +420,8 @@
buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem: buildCross = nixpkgs.lib.genAttrs crossSystems (crossSystem:
nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-super-${crossSystem}")); nixpkgs.lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-super-${crossSystem}"));
buildNoGc = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.overrideAttrs (a: { configureFlags = (a.configureFlags or []) ++ ["--enable-gc=no"];}));
# Perl bindings for various platforms. # Perl bindings for various platforms.
perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings); perlBindings = nixpkgs.lib.genAttrs systems (system: self.packages.${system}.nix.perl-bindings);

79
maintainers/README.md Normal file
View file

@ -0,0 +1,79 @@
# Nix maintainers team
## Motivation
The goal of the team is to help other people to contribute to Nix.
## Members
- Eelco Dolstra (@edolstra) Team lead
- Théophane Hufschmitt (@thufschmitt)
- Valentin Gagarin (@fricklerhandwerk)
- Thomas Bereknyei (@tomberek)
- Robert Hensing (@roberth)
## Meeting protocol
The team meets twice a week:
- Discussion meeting: [Fridays 13:00-14:00 CET](https://calendar.google.com/calendar/event?eid=MHNtOGVuNWtrZXNpZHR2bW1sM3QyN2ZjaGNfMjAyMjExMjVUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
1. Triage issues and pull requests from the _No Status_ column (30 min)
2. Discuss issues and pull requests from the _To discuss_ column (30 min)
- Work meeting: [Mondays 13:00-15:00 CET](https://calendar.google.com/calendar/event?eid=NTM1MG1wNGJnOGpmOTZhYms3bTB1bnY5cWxfMjAyMjExMjFUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn)
1. Code review on pull requests from _In review_.
2. Other chores and tasks.
Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.us/Cv7FpYx-Ri-4VjUykQOLAw), and published on Discourse under the [Nix category](https://discourse.nixos.org/c/dev/nix/50).
## Project board protocol
The team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19/views/1) for tracking its work.
Issues on the board progress through the following states:
- No Status
Team members can add pull requests or issues to discuss or review together.
During the discussion meeting, the team triages new items.
If there is disagreement on the general idea behind an issue or pull request, it is moved to _To discuss_, otherwise to _In review_.
- To discuss
Pull requests and issues that are important and controversial are discussed by the team during discussion meetings.
This may be where the merit of the change itself or the implementation strategy is contested by a team member.
- In review
Pull requests in this column are reviewed together during work meetings.
This is both for spreading implementation knowledge and for establishing common values in code reviews.
When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member.
- Assigned for merging
One team member is assigned to each of these pull requests.
They will communicate with the authors, and make the final approval once all remaining issues are addressed.
If more substantive issues arise, the assignee can move the pull request back to _To discuss_ to involve the team again.
The process is illustrated in the following diagram:
```mermaid
flowchart TD
discuss[To discuss]
review[To review]
New --> |Disagreement on idea| discuss
New & discuss --> |Consensus on idea| review
review --> |Consensus on implementation| Assigned
Assigned --> |Implementation issues arise| review
Assigned --> |Remaining issues fixed| Merged
```

View file

@ -28,7 +28,7 @@
<key>SoftResourceLimits</key> <key>SoftResourceLimits</key>
<dict> <dict>
<key>NumberOfFiles</key> <key>NumberOfFiles</key>
<integer>4096</integer> <integer>1048576</integer>
</dict> </dict>
</dict> </dict>
</plist> </plist>

View file

@ -9,7 +9,7 @@ ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
[Service] [Service]
ExecStart=@@bindir@/nix-daemon nix-daemon --daemon ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
KillMode=process KillMode=process
LimitNOFILE=4096 LimitNOFILE=1048576
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View file

@ -97,13 +97,10 @@ is_os_darwin() {
} }
contact_us() { contact_us() {
echo "You can open an issue at https://github.com/nixos/nix/issues" echo "You can open an issue at"
echo "https://github.com/NixOS/nix/issues/new?labels=installer&template=installer.md"
echo "" echo ""
echo "Or feel free to contact the team:" echo "Or get in touch with the community: https://nixos.org/community"
echo " - Matrix: #nix:nixos.org"
echo " - IRC: in #nixos on irc.libera.chat"
echo " - twitter: @nixos_org"
echo " - forum: https://discourse.nixos.org"
} }
get_help() { get_help() {
echo "We'd love to help if you need it." echo "We'd love to help if you need it."

View file

@ -32,7 +32,77 @@ MixEvalArgs::MixEvalArgs()
addFlag({ addFlag({
.longName = "include", .longName = "include",
.shortName = 'I', .shortName = 'I',
.description = "Add *path* to the list of locations used to look up `<...>` file names.", .description = R"(
Add *path* to the Nix search path. The Nix search path is
initialized from the colon-separated [`NIX_PATH`](./env-common.md#env-NIX_PATH) environment
variable, and is used to look up the location of Nix expressions using [paths](../language/values.md#type-path) enclosed in angle
brackets (i.e., `<nixpkgs>`).
For instance, passing
```
-I /home/eelco/Dev
-I /etc/nixos
```
will cause Nix to look for paths relative to `/home/eelco/Dev` and
`/etc/nixos`, in that order. This is equivalent to setting the
`NIX_PATH` environment variable to
```
/home/eelco/Dev:/etc/nixos
```
It is also possible to match paths against a prefix. For example,
passing
```
-I nixpkgs=/home/eelco/Dev/nixpkgs-branch
-I /etc/nixos
```
will cause Nix to search for `<nixpkgs/path>` in
`/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
If a path in the Nix search path starts with `http://` or `https://`,
it is interpreted as the URL of a tarball that will be downloaded and
unpacked to a temporary location. The tarball must consist of a single
top-level directory. For example, passing
```
-I nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
```
tells Nix to download and use the current contents of the `master`
branch in the `nixpkgs` repository.
The URLs of the tarballs from the official `nixos.org` channels
(see [the manual page for `nix-channel`](../nix-channel.md)) can be
abbreviated as `channel:<channel-name>`. For instance, the
following two flags are equivalent:
```
-I nixpkgs=channel:nixos-21.05
-I nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
```
You can also fetch source trees using [flake URLs](./nix3-flake.md#url-like-syntax) and add them to the
search path. For instance,
```
-I nixpkgs=flake:nixpkgs
```
specifies that the prefix `nixpkgs` shall refer to the source tree
downloaded from the `nixpkgs` entry in the flake registry. Similarly,
```
-I nixpkgs=flake:github:NixOS/nixpkgs/nixos-22.05
```
makes `<nixpkgs>` refer to a particular branch of the
`NixOS/nixpkgs` repository on GitHub.
)",
.category = category, .category = category,
.labels = {"path"}, .labels = {"path"},
.handler = {[&](std::string s) { searchPath.push_back(s); }} .handler = {[&](std::string s) { searchPath.push_back(s); }}
@ -89,14 +159,25 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
Path lookupFileArg(EvalState & state, std::string_view s) Path lookupFileArg(EvalState & state, std::string_view s)
{ {
if (isUri(s)) { if (EvalSettings::isPseudoUrl(s)) {
return state.store->toRealPath( auto storePath = fetchers::downloadTarball(
fetchers::downloadTarball( state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath;
state.store, resolveUri(s), "source", false).first.storePath); return state.store->toRealPath(storePath);
} else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { }
else if (hasPrefix(s, "flake:")) {
settings.requireExperimentalFeature(Xp::Flakes);
auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false);
auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath;
return state.store->toRealPath(storePath);
}
else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
Path p(s.substr(1, s.size() - 2)); Path p(s.substr(1, s.size() - 2));
return state.findFile(p); return state.findFile(p);
} else }
else
return absPath(std::string(s)); return absPath(std::string(s));
} }

View file

@ -475,44 +475,56 @@ static StorePath getDeriver(
struct InstallableStorePath : Installable struct InstallableStorePath : Installable
{ {
ref<Store> store; ref<Store> store;
StorePath storePath; DerivedPath req;
InstallableStorePath(ref<Store> store, StorePath && storePath) InstallableStorePath(ref<Store> store, StorePath && storePath)
: store(store), storePath(std::move(storePath)) { } : store(store),
req(storePath.isDerivation()
? (DerivedPath) DerivedPath::Built {
.drvPath = std::move(storePath),
.outputs = {},
}
: (DerivedPath) DerivedPath::Opaque {
.path = std::move(storePath),
})
{ }
std::string what() const override { return store->printStorePath(storePath); } InstallableStorePath(ref<Store> store, DerivedPath && req)
: store(store), req(std::move(req))
{ }
std::string what() const override
{
return req.to_string(*store);
}
DerivedPaths toDerivedPaths() override DerivedPaths toDerivedPaths() override
{ {
if (storePath.isDerivation()) { return { req };
auto drv = store->readDerivation(storePath);
return {
DerivedPath::Built {
.drvPath = storePath,
.outputs = drv.outputNames(),
}
};
} else {
return {
DerivedPath::Opaque {
.path = storePath,
}
};
}
} }
StorePathSet toDrvPaths(ref<Store> store) override StorePathSet toDrvPaths(ref<Store> store) override
{ {
if (storePath.isDerivation()) { return std::visit(overloaded {
return {storePath}; [&](const DerivedPath::Built & bfd) -> StorePathSet {
} else { return { bfd.drvPath };
return {getDeriver(store, *this, storePath)}; },
} [&](const DerivedPath::Opaque & bo) -> StorePathSet {
return { getDeriver(store, *this, bo.path) };
},
}, req.raw());
} }
std::optional<StorePath> getStorePath() override std::optional<StorePath> getStorePath() override
{ {
return storePath; return std::visit(overloaded {
[&](const DerivedPath::Built & bfd) {
return bfd.drvPath;
},
[&](const DerivedPath::Opaque & bo) {
return bo.path;
},
}, req.raw());
} }
}; };
@ -889,7 +901,8 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
if (file == "-") { if (file == "-") {
auto e = state->parseStdin(); auto e = state->parseStdin();
state->eval(e, *vFile); state->eval(e, *vFile);
} else if (file) }
else if (file)
state->evalFile(lookupFileArg(*state, *file), *vFile); state->evalFile(lookupFileArg(*state, *file), *vFile);
else { else {
auto e = state->parseExprFromString(*expr, absPath(".")); auto e = state->parseExprFromString(*expr, absPath("."));
@ -953,6 +966,22 @@ std::vector<std::shared_ptr<Installable>> SourceExprCommand::parseInstallables(
/*if (modifyInstallable) { /*if (modifyInstallable) {
throw Error("cannot apply function: installable cannot be evaluated"); throw Error("cannot apply function: installable cannot be evaluated");
}*/ }*/
auto found = s.rfind('^');
if (found != std::string::npos) {
try {
result.push_back(std::make_shared<InstallableStorePath>(
store,
DerivedPath::Built::parse(*store, s.substr(0, found), s.substr(found + 1))));
continue;
} catch (BadStorePath &) {
} catch (...) {
if (!ex)
ex = std::current_exception();
}
}
found = s.find('/');
if (found != std::string::npos) {
try { try {
result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s))); result.push_back(std::make_shared<InstallableStorePath>(store, store->followLinksToStorePath(s)));
continue; continue;

View file

@ -787,7 +787,7 @@ void NixRepl::loadFlake(const std::string & flakeRefS)
flake::LockFlags { flake::LockFlags {
.updateLockFile = false, .updateLockFile = false,
.useRegistries = !evalSettings.pureEval, .useRegistries = !evalSettings.pureEval,
.allowMutable = !evalSettings.pureEval, .allowUnlocked = !evalSettings.pureEval,
}), }),
v); v);
addAttrsToScope(v); addAttrsToScope(v);

View file

@ -645,17 +645,17 @@ NixInt AttrCursor::getInt()
cachedValue = root->db->getAttr(getKey()); cachedValue = root->db->getAttr(getKey());
if (cachedValue && !std::get_if<placeholder_t>(&cachedValue->second)) { if (cachedValue && !std::get_if<placeholder_t>(&cachedValue->second)) {
if (auto i = std::get_if<int_t>(&cachedValue->second)) { if (auto i = std::get_if<int_t>(&cachedValue->second)) {
debug("using cached Integer attribute '%s'", getAttrPathStr()); debug("using cached integer attribute '%s'", getAttrPathStr());
return i->x; return i->x;
} else } else
throw TypeError("'%s' is not an Integer", getAttrPathStr()); throw TypeError("'%s' is not an integer", getAttrPathStr());
} }
} }
auto & v = forceValue(); auto & v = forceValue();
if (v.type() != nInt) if (v.type() != nInt)
throw TypeError("'%s' is not an Integer", getAttrPathStr()); throw TypeError("'%s' is not an integer", getAttrPathStr());
return v.integer; return v.integer;
} }

View file

@ -45,7 +45,7 @@ static char * allocString(size_t size)
#if HAVE_BOEHMGC #if HAVE_BOEHMGC
t = (char *) GC_MALLOC_ATOMIC(size); t = (char *) GC_MALLOC_ATOMIC(size);
#else #else
t = malloc(size); t = (char *) malloc(size);
#endif #endif
if (!t) throw std::bad_alloc(); if (!t) throw std::bad_alloc();
return t; return t;
@ -402,7 +402,8 @@ static Strings parseNixPath(const std::string & s)
} }
if (*p == ':') { if (*p == ':') {
if (isUri(std::string(start2, s.end()))) { auto prefix = std::string(start2, s.end());
if (EvalSettings::isPseudoUrl(prefix) || hasPrefix(prefix, "flake:")) {
++p; ++p;
while (p != s.end() && *p != ':') ++p; while (p != s.end() && *p != ':') ++p;
} }
@ -470,9 +471,6 @@ EvalState::EvalState(
#if HAVE_BOEHMGC #if HAVE_BOEHMGC
, valueAllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr)) , valueAllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
, env1AllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr)) , env1AllocCache(std::allocate_shared<void *>(traceable_allocator<void *>(), nullptr))
#else
, valueAllocCache(std::make_shared<void *>(nullptr))
, env1AllocCache(std::make_shared<void *>(nullptr))
#endif #endif
, baseEnv(allocEnv(128)) , baseEnv(allocEnv(128))
, staticBaseEnv{std::make_shared<StaticEnv>(false, nullptr)} , staticBaseEnv{std::make_shared<StaticEnv>(false, nullptr)}
@ -1646,7 +1644,7 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
auto dts = debugRepl auto dts = debugRepl
? makeDebugTraceStacker( ? makeDebugTraceStacker(
*this, *lambda.body, env2, positions[lambda.pos], *this, *lambda.body, env2, positions[lambda.pos],
"while evaluating %s", "while calling %s",
lambda.name lambda.name
? concatStrings("'", symbols[lambda.name], "'") ? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda") : "anonymous lambda")
@ -1655,11 +1653,11 @@ void EvalState::callFunction(Value & fun, size_t nrArgs, Value * * args, Value &
lambda.body->eval(*this, env2, vCur); lambda.body->eval(*this, env2, vCur);
} catch (Error & e) { } catch (Error & e) {
if (loggerSettings.showTrace.get()) { if (loggerSettings.showTrace.get()) {
addErrorTrace(e, lambda.pos, "while evaluating %s", addErrorTrace(e, lambda.pos, "while calling %s",
(lambda.name (lambda.name
? concatStrings("'", symbols[lambda.name], "'") ? concatStrings("'", symbols[lambda.name], "'")
: "anonymous lambda")); : "anonymous lambda"));
addErrorTrace(e, pos, "from call site%s", ""); addErrorTrace(e, pos, "while evaluating call site%s", "");
} }
throw; throw;
} }
@ -1806,7 +1804,7 @@ void EvalState::autoCallFunction(Bindings & args, Value & fun, Value & res)
Nix attempted to evaluate a function as a top level expression; in Nix attempted to evaluate a function as a top level expression; in
this case it must have its arguments supplied either by default this case it must have its arguments supplied either by default
values, or passed explicitly with '--arg' or '--argstr'. See values, or passed explicitly with '--arg' or '--argstr'. See
https://nixos.org/manual/nix/stable/expressions/language-constructs.html#functions.)", symbols[i.name], https://nixos.org/manual/nix/stable/language/constructs.html#functions.)", symbols[i.name],
*fun.lambda.env, *fun.lambda.fun); *fun.lambda.env, *fun.lambda.fun);
} }
} }
@ -2583,6 +2581,23 @@ Strings EvalSettings::getDefaultNixPath()
return res; return res;
} }
bool EvalSettings::isPseudoUrl(std::string_view s)
{
if (s.compare(0, 8, "channel:") == 0) return true;
size_t pos = s.find("://");
if (pos == std::string::npos) return false;
std::string scheme(s, 0, pos);
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
}
std::string EvalSettings::resolvePseudoUrl(std::string_view url)
{
if (hasPrefix(url, "channel:"))
return "https://nixos.org/channels/" + std::string(url.substr(8)) + "/nixexprs.tar.xz";
else
return std::string(url);
}
EvalSettings evalSettings; EvalSettings evalSettings;
static GlobalConfig::Register rEvalSettings(&evalSettings); static GlobalConfig::Register rEvalSettings(&evalSettings);

View file

@ -590,6 +590,10 @@ struct EvalSettings : Config
static Strings getDefaultNixPath(); static Strings getDefaultNixPath();
static bool isPseudoUrl(std::string_view s);
static std::string resolvePseudoUrl(std::string_view url);
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
"Whether builtin functions that allow executing native code should be enabled."}; "Whether builtin functions that allow executing native code should be enabled."};

View file

@ -143,7 +143,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
} catch (Error & e) { } catch (Error & e) {
e.addTrace( e.addTrace(
state.positions[attr.pos], state.positions[attr.pos],
hintfmt("in flake attribute '%s'", state.symbols[attr.name])); hintfmt("while evaluating flake attribute '%s'", state.symbols[attr.name]));
throw; throw;
} }
} }
@ -152,7 +152,7 @@ static FlakeInput parseFlakeInput(EvalState & state,
try { try {
input.ref = FlakeRef::fromAttrs(attrs); input.ref = FlakeRef::fromAttrs(attrs);
} catch (Error & e) { } catch (Error & e) {
e.addTrace(state.positions[pos], hintfmt("in flake input")); e.addTrace(state.positions[pos], hintfmt("while evaluating flake input"));
throw; throw;
} }
else { else {
@ -353,7 +353,7 @@ LockedFlake lockFlake(
std::function<void( std::function<void(
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node, ref<Node> node,
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
std::shared_ptr<const Node> oldNode, std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath, const InputPath & lockRootPath,
@ -362,9 +362,15 @@ LockedFlake lockFlake(
computeLocks; computeLocks;
computeLocks = [&]( computeLocks = [&](
/* The inputs of this node, either from flake.nix or
flake.lock. */
const FlakeInputs & flakeInputs, const FlakeInputs & flakeInputs,
std::shared_ptr<Node> node, /* The node whose locks are to be updated.*/
ref<Node> node,
/* The path to this node in the lock file graph. */
const InputPath & inputPathPrefix, const InputPath & inputPathPrefix,
/* The old node, if any, from which locks can be
copied. */
std::shared_ptr<const Node> oldNode, std::shared_ptr<const Node> oldNode,
const InputPath & lockRootPath, const InputPath & lockRootPath,
const Path & parentPath, const Path & parentPath,
@ -452,7 +458,7 @@ LockedFlake lockFlake(
/* Copy the input from the old lock since its flakeref /* Copy the input from the old lock since its flakeref
didn't change and there is no override from a didn't change and there is no override from a
higher level flake. */ higher level flake. */
auto childNode = std::make_shared<LockedNode>( auto childNode = make_ref<LockedNode>(
oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake); oldLock->lockedRef, oldLock->originalRef, oldLock->isFlake);
node->inputs.insert_or_assign(id, childNode); node->inputs.insert_or_assign(id, childNode);
@ -481,7 +487,7 @@ LockedFlake lockFlake(
.isFlake = (*lockedNode)->isFlake, .isFlake = (*lockedNode)->isFlake,
}); });
} else if (auto follows = std::get_if<1>(&i.second)) { } else if (auto follows = std::get_if<1>(&i.second)) {
if (! trustLock) { if (!trustLock) {
// It is possible that the flake has changed, // It is possible that the flake has changed,
// so we must confirm all the follows that are in the lock file are also in the flake. // so we must confirm all the follows that are in the lock file are also in the flake.
auto overridePath(inputPath); auto overridePath(inputPath);
@ -521,8 +527,8 @@ LockedFlake lockFlake(
this input. */ this input. */
debug("creating new input '%s'", inputPathS); debug("creating new input '%s'", inputPathS);
if (!lockFlags.allowMutable && !input.ref->input.isLocked()) if (!lockFlags.allowUnlocked && !input.ref->input.isLocked())
throw Error("cannot update flake input '%s' in pure mode", inputPathS); throw Error("cannot update unlocked flake input '%s' in pure mode", inputPathS);
/* Note: in case of an --override-input, we use /* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the the *original* ref (input2.ref) for the
@ -544,7 +550,7 @@ LockedFlake lockFlake(
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath); auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath);
auto childNode = std::make_shared<LockedNode>(inputFlake.lockedRef, ref); auto childNode = make_ref<LockedNode>(inputFlake.lockedRef, ref);
node->inputs.insert_or_assign(id, childNode); node->inputs.insert_or_assign(id, childNode);
@ -564,15 +570,19 @@ LockedFlake lockFlake(
oldLock oldLock
? std::dynamic_pointer_cast<const Node>(oldLock) ? std::dynamic_pointer_cast<const Node>(oldLock)
: LockFile::read( : LockFile::read(
inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root, inputFlake.sourceInfo->actualPath + "/" + inputFlake.lockedRef.subdir + "/flake.lock").root.get_ptr(),
oldLock ? lockRootPath : inputPath, localPath, false); oldLock ? lockRootPath : inputPath,
localPath,
false);
} }
else { else {
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree( auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache); state, *input.ref, useRegistries, flakeCache);
node->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, ref, false)); auto childNode = make_ref<LockedNode>(lockedRef, ref, false);
node->inputs.insert_or_assign(id, childNode);
} }
} }
@ -587,8 +597,13 @@ LockedFlake lockFlake(
auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir, true); auto parentPath = canonPath(flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir, true);
computeLocks( computeLocks(
flake.inputs, newLockFile.root, {}, flake.inputs,
lockFlags.recreateLockFile ? nullptr : oldLockFile.root, {}, parentPath, false); newLockFile.root,
{},
lockFlags.recreateLockFile ? nullptr : oldLockFile.root.get_ptr(),
{},
parentPath,
false);
for (auto & i : lockFlags.inputOverrides) for (auto & i : lockFlags.inputOverrides)
if (!overridesUsed.count(i.first)) if (!overridesUsed.count(i.first))
@ -611,9 +626,9 @@ LockedFlake lockFlake(
if (lockFlags.writeLockFile) { if (lockFlags.writeLockFile) {
if (auto sourcePath = topRef.input.getSourcePath()) { if (auto sourcePath = topRef.input.getSourcePath()) {
if (!newLockFile.isImmutable()) { if (auto unlockedInput = newLockFile.isUnlocked()) {
if (fetchSettings.warnDirty) if (fetchSettings.warnDirty)
warn("will not write lock file of flake '%s' because it has a mutable input", topRef); warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput);
} else { } else {
if (!lockFlags.updateLockFile) if (!lockFlags.updateLockFile)
throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef); throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef);
@ -737,7 +752,7 @@ static void prim_getFlake(EvalState & state, const PosIdx pos, Value * * args, V
.updateLockFile = false, .updateLockFile = false,
.writeLockFile = false, .writeLockFile = false,
.useRegistries = !evalSettings.pureEval && fetchSettings.useRegistries, .useRegistries = !evalSettings.pureEval && fetchSettings.useRegistries,
.allowMutable = !evalSettings.pureEval, .allowUnlocked = !evalSettings.pureEval,
}), }),
v); v);
} }

View file

@ -108,11 +108,11 @@ struct LockFlags
bool applyNixConfig = false; bool applyNixConfig = false;
/* Whether mutable flake references (i.e. those without a Git /* Whether unlocked flake references (i.e. those without a Git
revision or similar) without a corresponding lock are revision or similar) without a corresponding lock are
allowed. Mutable flake references with a lock are always allowed. Unlocked flake references with a lock are always
allowed. */ allowed. */
bool allowMutable = true; bool allowUnlocked = true;
/* Whether to commit changes to flake.lock. */ /* Whether to commit changes to flake.lock. */
bool commitLockFile = false; bool commitLockFile = false;

View file

@ -35,7 +35,7 @@ typedef std::string FlakeId;
struct FlakeRef struct FlakeRef
{ {
/* fetcher-specific representation of the input, sufficient to /* Fetcher-specific representation of the input, sufficient to
perform the fetch operation. */ perform the fetch operation. */
fetchers::Input input; fetchers::Input input;

View file

@ -31,7 +31,7 @@ FlakeRef getFlakeRef(
} }
LockedNode::LockedNode(const nlohmann::json & json) LockedNode::LockedNode(const nlohmann::json & json)
: lockedRef(getFlakeRef(json, "locked", "info")) : lockedRef(getFlakeRef(json, "locked", "info")) // FIXME: remove "info"
, originalRef(getFlakeRef(json, "original", nullptr)) , originalRef(getFlakeRef(json, "original", nullptr))
, isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true) , isFlake(json.find("flake") != json.end() ? (bool) json["flake"] : true)
{ {
@ -49,15 +49,15 @@ std::shared_ptr<Node> LockFile::findInput(const InputPath & path)
{ {
auto pos = root; auto pos = root;
if (!pos) return {};
for (auto & elem : path) { for (auto & elem : path) {
if (auto i = get(pos->inputs, elem)) { if (auto i = get(pos->inputs, elem)) {
if (auto node = std::get_if<0>(&*i)) if (auto node = std::get_if<0>(&*i))
pos = *node; pos = *node;
else if (auto follows = std::get_if<1>(&*i)) { else if (auto follows = std::get_if<1>(&*i)) {
pos = findInput(*follows); if (auto p = findInput(*follows))
if (!pos) return {}; pos = ref(p);
else
return {};
} }
} else } else
return {}; return {};
@ -72,7 +72,7 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
if (version < 5 || version > 7) if (version < 5 || version > 7)
throw Error("lock file '%s' has unsupported version %d", path, version); throw Error("lock file '%s' has unsupported version %d", path, version);
std::unordered_map<std::string, std::shared_ptr<Node>> nodeMap; std::map<std::string, ref<Node>> nodeMap;
std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs; std::function<void(Node & node, const nlohmann::json & jsonNode)> getInputs;
@ -93,12 +93,12 @@ LockFile::LockFile(const nlohmann::json & json, const Path & path)
auto jsonNode2 = nodes.find(inputKey); auto jsonNode2 = nodes.find(inputKey);
if (jsonNode2 == nodes.end()) if (jsonNode2 == nodes.end())
throw Error("lock file references missing node '%s'", inputKey); throw Error("lock file references missing node '%s'", inputKey);
auto input = std::make_shared<LockedNode>(*jsonNode2); auto input = make_ref<LockedNode>(*jsonNode2);
k = nodeMap.insert_or_assign(inputKey, input).first; k = nodeMap.insert_or_assign(inputKey, input).first;
getInputs(*input, *jsonNode2); getInputs(*input, *jsonNode2);
} }
if (auto child = std::dynamic_pointer_cast<LockedNode>(k->second)) if (auto child = k->second.dynamic_pointer_cast<LockedNode>())
node.inputs.insert_or_assign(i.key(), child); node.inputs.insert_or_assign(i.key(), ref(child));
else else
// FIXME: replace by follows node // FIXME: replace by follows node
throw Error("lock file contains cycle to root node"); throw Error("lock file contains cycle to root node");
@ -122,9 +122,9 @@ nlohmann::json LockFile::toJSON() const
std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys; std::unordered_map<std::shared_ptr<const Node>, std::string> nodeKeys;
std::unordered_set<std::string> keys; std::unordered_set<std::string> keys;
std::function<std::string(const std::string & key, std::shared_ptr<const Node> node)> dumpNode; std::function<std::string(const std::string & key, ref<const Node> node)> dumpNode;
dumpNode = [&](std::string key, std::shared_ptr<const Node> node) -> std::string dumpNode = [&](std::string key, ref<const Node> node) -> std::string
{ {
auto k = nodeKeys.find(node); auto k = nodeKeys.find(node);
if (k != nodeKeys.end()) if (k != nodeKeys.end())
@ -159,10 +159,11 @@ nlohmann::json LockFile::toJSON() const
n["inputs"] = std::move(inputs); n["inputs"] = std::move(inputs);
} }
if (auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(node)) { if (auto lockedNode = node.dynamic_pointer_cast<const LockedNode>()) {
n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs()); n["original"] = fetchers::attrsToJSON(lockedNode->originalRef.toAttrs());
n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs()); n["locked"] = fetchers::attrsToJSON(lockedNode->lockedRef.toAttrs());
if (!lockedNode->isFlake) n["flake"] = false; if (!lockedNode->isFlake)
n["flake"] = false;
} }
nodes[key] = std::move(n); nodes[key] = std::move(n);
@ -201,13 +202,13 @@ void LockFile::write(const Path & path) const
writeFile(path, fmt("%s\n", *this)); writeFile(path, fmt("%s\n", *this));
} }
bool LockFile::isImmutable() const std::optional<FlakeRef> LockFile::isUnlocked() const
{ {
std::unordered_set<std::shared_ptr<const Node>> nodes; std::set<ref<const Node>> nodes;
std::function<void(std::shared_ptr<const Node> node)> visit; std::function<void(ref<const Node> node)> visit;
visit = [&](std::shared_ptr<const Node> node) visit = [&](ref<const Node> node)
{ {
if (!nodes.insert(node).second) return; if (!nodes.insert(node).second) return;
for (auto & i : node->inputs) for (auto & i : node->inputs)
@ -219,11 +220,12 @@ bool LockFile::isImmutable() const
for (auto & i : nodes) { for (auto & i : nodes) {
if (i == root) continue; if (i == root) continue;
auto lockedNode = std::dynamic_pointer_cast<const LockedNode>(i); auto node = i.dynamic_pointer_cast<const LockedNode>();
if (lockedNode && !lockedNode->lockedRef.input.isLocked()) return false; if (node && !node->lockedRef.input.isLocked())
return node->lockedRef;
} }
return true; return {};
} }
bool LockFile::operator ==(const LockFile & other) const bool LockFile::operator ==(const LockFile & other) const
@ -247,12 +249,12 @@ InputPath parseInputPath(std::string_view s)
std::map<InputPath, Node::Edge> LockFile::getAllInputs() const std::map<InputPath, Node::Edge> LockFile::getAllInputs() const
{ {
std::unordered_set<std::shared_ptr<Node>> done; std::set<ref<Node>> done;
std::map<InputPath, Node::Edge> res; std::map<InputPath, Node::Edge> res;
std::function<void(const InputPath & prefix, std::shared_ptr<Node> node)> recurse; std::function<void(const InputPath & prefix, ref<Node> node)> recurse;
recurse = [&](const InputPath & prefix, std::shared_ptr<Node> node) recurse = [&](const InputPath & prefix, ref<Node> node)
{ {
if (!done.insert(node).second) return; if (!done.insert(node).second) return;

View file

@ -20,7 +20,7 @@ struct LockedNode;
type LockedNode. */ type LockedNode. */
struct Node : std::enable_shared_from_this<Node> struct Node : std::enable_shared_from_this<Node>
{ {
typedef std::variant<std::shared_ptr<LockedNode>, InputPath> Edge; typedef std::variant<ref<LockedNode>, InputPath> Edge;
std::map<FlakeId, Edge> inputs; std::map<FlakeId, Edge> inputs;
@ -47,11 +47,13 @@ struct LockedNode : Node
struct LockFile struct LockFile
{ {
std::shared_ptr<Node> root = std::make_shared<Node>(); ref<Node> root = make_ref<Node>();
LockFile() {}; LockFile() {};
LockFile(const nlohmann::json & json, const Path & path); LockFile(const nlohmann::json & json, const Path & path);
typedef std::map<ref<const Node>, std::string> KeyMap;
nlohmann::json toJSON() const; nlohmann::json toJSON() const;
std::string to_string() const; std::string to_string() const;
@ -60,7 +62,8 @@ struct LockFile
void write(const Path & path) const; void write(const Path & path) const;
bool isImmutable() const; /* Check whether this lock file has any unlocked inputs. */
std::optional<FlakeRef> isUnlocked() const;
bool operator ==(const LockFile & other) const; bool operator ==(const LockFile & other) const;

View file

@ -150,7 +150,7 @@ DrvInfo::Outputs DrvInfo::queryOutputs(bool withPaths, bool onlyOutputsToInstall
/* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */ /* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */
const Value * outTI = queryMeta("outputsToInstall"); const Value * outTI = queryMeta("outputsToInstall");
if (!outTI) return outputs; if (!outTI) return outputs;
const auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'"); auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
/* ^ this shows during `nix-env -i` right under the bad derivation */ /* ^ this shows during `nix-env -i` right under the bad derivation */
if (!outTI->isList()) throw errMsg; if (!outTI->isList()) throw errMsg;
Outputs result; Outputs result;

View file

@ -289,7 +289,6 @@ std::string showAttrPath(const SymbolTable & symbols, const AttrPath & attrPath)
} }
/* Computing levels/displacements for variables. */ /* Computing levels/displacements for variables. */
void Expr::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) void Expr::bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env)

View file

@ -645,6 +645,7 @@ formal
#include "filetransfer.hh" #include "filetransfer.hh"
#include "fetchers.hh" #include "fetchers.hh"
#include "store-api.hh" #include "store-api.hh"
#include "flake/flake.hh"
namespace nix { namespace nix {
@ -807,17 +808,28 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
std::pair<bool, std::string> res; std::pair<bool, std::string> res;
if (isUri(elem.second)) { if (EvalSettings::isPseudoUrl(elem.second)) {
try { try {
res = { true, store->toRealPath(fetchers::downloadTarball( auto storePath = fetchers::downloadTarball(
store, resolveUri(elem.second), "source", false).first.storePath) }; store, EvalSettings::resolvePseudoUrl(elem.second), "source", false).first.storePath;
res = { true, store->toRealPath(storePath) };
} catch (FileTransferError & e) { } catch (FileTransferError & e) {
logWarning({ logWarning({
.msg = hintfmt("Nix search path entry '%1%' cannot be downloaded, ignoring", elem.second) .msg = hintfmt("Nix search path entry '%1%' cannot be downloaded, ignoring", elem.second)
}); });
res = { false, "" }; res = { false, "" };
} }
} else { }
else if (hasPrefix(elem.second, "flake:")) {
settings.requireExperimentalFeature(Xp::Flakes);
auto flakeRef = parseFlakeRef(elem.second.substr(6), {}, true, false);
debug("fetching flake search path element '%s''", elem.second);
auto storePath = flakeRef.resolve(store).fetchTree(store).first.storePath;
res = { true, store->toRealPath(storePath) };
}
else {
auto path = absPath(elem.second); auto path = absPath(elem.second);
if (pathExists(path)) if (pathExists(path))
res = { true, path }; res = { true, path };

View file

@ -1461,10 +1461,10 @@ static RegisterPrimOp primop_storePath({
static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args, Value & v) static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{ {
/* We dont check the path right now, because we dont want to /* We dont check the path right now, because we dont want to
throw if the path isnt allowed, but just return false (and we throw if the path isnt allowed, but just return false (and we
cant just catch the exception here because we still want to cant just catch the exception here because we still want to
throw if something in the evaluation of `*args[0]` tries to throw if something in the evaluation of `*args[0]` tries to
access an unauthorized path). */ access an unauthorized path). */
auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false }); auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false });
try { try {
@ -2420,12 +2420,18 @@ static RegisterPrimOp primop_listToAttrs({
Construct a set from a list specifying the names and values of each Construct a set from a list specifying the names and values of each
attribute. Each element of the list should be a set consisting of a attribute. Each element of the list should be a set consisting of a
string-valued attribute `name` specifying the name of the attribute, string-valued attribute `name` specifying the name of the attribute,
and an attribute `value` specifying its value. Example: and an attribute `value` specifying its value.
In case of duplicate occurrences of the same name, the first
takes precedence.
Example:
```nix ```nix
builtins.listToAttrs builtins.listToAttrs
[ { name = "foo"; value = 123; } [ { name = "foo"; value = 123; }
{ name = "bar"; value = 456; } { name = "bar"; value = 456; }
{ name = "bar"; value = 420; }
] ]
``` ```

View file

@ -220,8 +220,6 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v
} else } else
url = state.forceStringNoCtx(*args[0], pos); url = state.forceStringNoCtx(*args[0], pos);
url = resolveUri(*url);
state.checkURI(*url); state.checkURI(*url);
if (name == "") if (name == "")

View file

@ -71,7 +71,12 @@ struct FetchSettings : public Config
"Whether to warn about dirty Git/Mercurial trees."}; "Whether to warn about dirty Git/Mercurial trees."};
Setting<std::string> flakeRegistry{this, "https://channels.nixos.org/flake-registry.json", "flake-registry", Setting<std::string> flakeRegistry{this, "https://channels.nixos.org/flake-registry.json", "flake-registry",
"Path or URI of the global flake registry."}; R"(
Path or URI of the global flake registry.
When empty, disables the global flake registry.
)"};
Setting<bool> useRegistries{this, true, "use-registries", Setting<bool> useRegistries{this, true, "use-registries",
"Whether to use flake registries to resolve flake references."}; "Whether to use flake registries to resolve flake references."};

View file

@ -266,7 +266,7 @@ std::optional<time_t> Input::getLastModified() const
return {}; return {};
} }
ParsedURL InputScheme::toURL(const Input & input) ParsedURL InputScheme::toURL(const Input & input) const
{ {
throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs)); throw Error("don't know how to convert input '%s' to a URL", attrsToJSON(input.attrs));
} }
@ -274,7 +274,7 @@ ParsedURL InputScheme::toURL(const Input & input)
Input InputScheme::applyOverrides( Input InputScheme::applyOverrides(
const Input & input, const Input & input,
std::optional<std::string> ref, std::optional<std::string> ref,
std::optional<Hash> rev) std::optional<Hash> rev) const
{ {
if (ref) if (ref)
throw Error("don't know how to set branch/tag name of input '%s' to '%s'", input.to_string(), *ref); throw Error("don't know how to set branch/tag name of input '%s' to '%s'", input.to_string(), *ref);
@ -293,7 +293,7 @@ void InputScheme::markChangedFile(const Input & input, std::string_view file, st
assert(false); assert(false);
} }
void InputScheme::clone(const Input & input, const Path & destDir) void InputScheme::clone(const Input & input, const Path & destDir) const
{ {
throw Error("do not know how to clone input '%s'", input.to_string()); throw Error("do not know how to clone input '%s'", input.to_string());
} }

View file

@ -107,26 +107,25 @@ public:
* recognized. The Input object contains the information the fetcher * recognized. The Input object contains the information the fetcher
* needs to actually perform the "fetch()" when called. * needs to actually perform the "fetch()" when called.
*/ */
struct InputScheme struct InputScheme
{ {
virtual ~InputScheme() virtual ~InputScheme()
{ } { }
virtual std::optional<Input> inputFromURL(const ParsedURL & url) = 0; virtual std::optional<Input> inputFromURL(const ParsedURL & url) const = 0;
virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) = 0; virtual std::optional<Input> inputFromAttrs(const Attrs & attrs) const = 0;
virtual ParsedURL toURL(const Input & input); virtual ParsedURL toURL(const Input & input) const;
virtual bool hasAllInfo(const Input & input) = 0; virtual bool hasAllInfo(const Input & input) const = 0;
virtual Input applyOverrides( virtual Input applyOverrides(
const Input & input, const Input & input,
std::optional<std::string> ref, std::optional<std::string> ref,
std::optional<Hash> rev); std::optional<Hash> rev) const;
virtual void clone(const Input & input, const Path & destDir); virtual void clone(const Input & input, const Path & destDir) const;
virtual std::optional<Path> getSourcePath(const Input & input); virtual std::optional<Path> getSourcePath(const Input & input);

View file

@ -18,6 +18,7 @@
using namespace std::string_literals; using namespace std::string_literals;
namespace nix::fetchers { namespace nix::fetchers {
namespace { namespace {
// Explicit initial branch of our bare repo to suppress warnings from new version of git. // Explicit initial branch of our bare repo to suppress warnings from new version of git.
@ -26,23 +27,23 @@ namespace {
// old version of git, which will ignore unrecognized `-c` options. // old version of git, which will ignore unrecognized `-c` options.
const std::string gitInitialBranch = "__nix_dummy_branch"; const std::string gitInitialBranch = "__nix_dummy_branch";
bool isCacheFileWithinTtl(const time_t now, const struct stat & st) bool isCacheFileWithinTtl(time_t now, const struct stat & st)
{ {
return st.st_mtime + settings.tarballTtl > now; return st.st_mtime + settings.tarballTtl > now;
} }
bool touchCacheFile(const Path& path, const time_t& touch_time) bool touchCacheFile(const Path & path, time_t touch_time)
{ {
struct timeval times[2]; struct timeval times[2];
times[0].tv_sec = touch_time; times[0].tv_sec = touch_time;
times[0].tv_usec = 0; times[0].tv_usec = 0;
times[1].tv_sec = touch_time; times[1].tv_sec = touch_time;
times[1].tv_usec = 0; times[1].tv_usec = 0;
return lutimes(path.c_str(), times) == 0; return lutimes(path.c_str(), times) == 0;
} }
Path getCachePath(std::string key) Path getCachePath(std::string_view key)
{ {
return getCacheDir() + "/nix/gitv3/" + return getCacheDir() + "/nix/gitv3/" +
hashString(htSHA256, key).to_string(Base32, false); hashString(htSHA256, key).to_string(Base32, false);
@ -57,13 +58,12 @@ Path getCachePath(std::string key)
// ... // ...
std::optional<std::string> readHead(const Path & path) std::optional<std::string> readHead(const Path & path)
{ {
auto [exit_code, output] = runProgram(RunOptions { auto [status, output] = runProgram(RunOptions {
.program = "git", .program = "git",
// FIXME: use 'HEAD' to avoid returning all refs
.args = {"ls-remote", "--symref", path}, .args = {"ls-remote", "--symref", path},
}); });
if (exit_code != 0) { if (status != 0) return std::nullopt;
return std::nullopt;
}
std::string_view line = output; std::string_view line = output;
line = line.substr(0, line.find("\n")); line = line.substr(0, line.find("\n"));
@ -82,12 +82,11 @@ std::optional<std::string> readHead(const Path & path)
} }
// Persist the HEAD ref from the remote repo in the local cached repo. // Persist the HEAD ref from the remote repo in the local cached repo.
bool storeCachedHead(const std::string& actualUrl, const std::string& headRef) bool storeCachedHead(const std::string & actualUrl, const std::string & headRef)
{ {
Path cacheDir = getCachePath(actualUrl); Path cacheDir = getCachePath(actualUrl);
auto gitDir = ".";
try { try {
runProgram("git", true, { "-C", cacheDir, "--git-dir", gitDir, "symbolic-ref", "--", "HEAD", headRef }); runProgram("git", true, { "-C", cacheDir, "--git-dir", ".", "symbolic-ref", "--", "HEAD", headRef });
} catch (ExecError &e) { } catch (ExecError &e) {
if (!WIFEXITED(e.status)) throw; if (!WIFEXITED(e.status)) throw;
return false; return false;
@ -96,7 +95,7 @@ bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
return true; return true;
} }
std::optional<std::string> readHeadCached(const std::string& actualUrl) std::optional<std::string> readHeadCached(const std::string & actualUrl)
{ {
// Create a cache path to store the branch of the HEAD ref. Append something // Create a cache path to store the branch of the HEAD ref. Append something
// in front of the URL to prevent collision with the repository itself. // in front of the URL to prevent collision with the repository itself.
@ -110,16 +109,15 @@ std::optional<std::string> readHeadCached(const std::string& actualUrl)
cachedRef = readHead(cacheDir); cachedRef = readHead(cacheDir);
if (cachedRef != std::nullopt && if (cachedRef != std::nullopt &&
*cachedRef != gitInitialBranch && *cachedRef != gitInitialBranch &&
isCacheFileWithinTtl(now, st)) { isCacheFileWithinTtl(now, st))
{
debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl); debug("using cached HEAD ref '%s' for repo '%s'", *cachedRef, actualUrl);
return cachedRef; return cachedRef;
} }
} }
auto ref = readHead(actualUrl); auto ref = readHead(actualUrl);
if (ref) { if (ref) return ref;
return ref;
}
if (cachedRef) { if (cachedRef) {
// If the cached git ref is expired in fetch() below, and the 'git fetch' // If the cached git ref is expired in fetch() below, and the 'git fetch'
@ -250,7 +248,7 @@ std::pair<StorePath, Input> fetchFromWorkdir(ref<Store> store, Input & input, co
struct GitInputScheme : InputScheme struct GitInputScheme : InputScheme
{ {
std::optional<Input> inputFromURL(const ParsedURL & url) override std::optional<Input> inputFromURL(const ParsedURL & url) const override
{ {
if (url.scheme != "git" && if (url.scheme != "git" &&
url.scheme != "git+http" && url.scheme != "git+http" &&
@ -266,7 +264,7 @@ struct GitInputScheme : InputScheme
Attrs attrs; Attrs attrs;
attrs.emplace("type", "git"); attrs.emplace("type", "git");
for (auto &[name, value] : url.query) { for (auto & [name, value] : url.query) {
if (name == "rev" || name == "ref") if (name == "rev" || name == "ref")
attrs.emplace(name, value); attrs.emplace(name, value);
else if (name == "shallow" || name == "submodules") else if (name == "shallow" || name == "submodules")
@ -280,7 +278,7 @@ struct GitInputScheme : InputScheme
return inputFromAttrs(attrs); return inputFromAttrs(attrs);
} }
std::optional<Input> inputFromAttrs(const Attrs & attrs) override std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{ {
if (maybeGetStrAttr(attrs, "type") != "git") return {}; if (maybeGetStrAttr(attrs, "type") != "git") return {};
@ -303,7 +301,7 @@ struct GitInputScheme : InputScheme
return input; return input;
} }
ParsedURL toURL(const Input & input) override ParsedURL toURL(const Input & input) const override
{ {
auto url = parseURL(getStrAttr(input.attrs, "url")); auto url = parseURL(getStrAttr(input.attrs, "url"));
if (url.scheme != "git") url.scheme = "git+" + url.scheme; if (url.scheme != "git") url.scheme = "git+" + url.scheme;
@ -314,7 +312,7 @@ struct GitInputScheme : InputScheme
return url; return url;
} }
bool hasAllInfo(const Input & input) override bool hasAllInfo(const Input & input) const override
{ {
bool maybeDirty = !input.getRef(); bool maybeDirty = !input.getRef();
bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false); bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false);
@ -326,7 +324,7 @@ struct GitInputScheme : InputScheme
Input applyOverrides( Input applyOverrides(
const Input & input, const Input & input,
std::optional<std::string> ref, std::optional<std::string> ref,
std::optional<Hash> rev) override std::optional<Hash> rev) const override
{ {
auto res(input); auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev()); if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());
@ -336,7 +334,7 @@ struct GitInputScheme : InputScheme
return res; return res;
} }
void clone(const Input & input, const Path & destDir) override void clone(const Input & input, const Path & destDir) const override
{ {
auto [isLocal, actualUrl] = getActualUrl(input); auto [isLocal, actualUrl] = getActualUrl(input);
@ -604,9 +602,9 @@ struct GitInputScheme : InputScheme
{ {
throw Error( throw Error(
"Cannot find Git revision '%s' in ref '%s' of repository '%s'! " "Cannot find Git revision '%s' in ref '%s' of repository '%s'! "
"Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the " "Please make sure that the " ANSI_BOLD "rev" ANSI_NORMAL " exists on the "
ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD ANSI_BOLD "ref" ANSI_NORMAL " you've specified or add " ANSI_BOLD
"allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".", "allRefs = true;" ANSI_NORMAL " to " ANSI_BOLD "fetchGit" ANSI_NORMAL ".",
input.getRev()->gitRev(), input.getRev()->gitRev(),
*input.getRef(), *input.getRef(),
actualUrl actualUrl

View file

@ -26,11 +26,11 @@ std::regex hostRegex(hostRegexS, std::regex::ECMAScript);
struct GitArchiveInputScheme : InputScheme struct GitArchiveInputScheme : InputScheme
{ {
virtual std::string type() = 0; virtual std::string type() const = 0;
virtual std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const = 0; virtual std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const = 0;
std::optional<Input> inputFromURL(const ParsedURL & url) override std::optional<Input> inputFromURL(const ParsedURL & url) const override
{ {
if (url.scheme != type()) return {}; if (url.scheme != type()) return {};
@ -100,7 +100,7 @@ struct GitArchiveInputScheme : InputScheme
return input; return input;
} }
std::optional<Input> inputFromAttrs(const Attrs & attrs) override std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{ {
if (maybeGetStrAttr(attrs, "type") != type()) return {}; if (maybeGetStrAttr(attrs, "type") != type()) return {};
@ -116,7 +116,7 @@ struct GitArchiveInputScheme : InputScheme
return input; return input;
} }
ParsedURL toURL(const Input & input) override ParsedURL toURL(const Input & input) const override
{ {
auto owner = getStrAttr(input.attrs, "owner"); auto owner = getStrAttr(input.attrs, "owner");
auto repo = getStrAttr(input.attrs, "repo"); auto repo = getStrAttr(input.attrs, "repo");
@ -132,7 +132,7 @@ struct GitArchiveInputScheme : InputScheme
}; };
} }
bool hasAllInfo(const Input & input) override bool hasAllInfo(const Input & input) const override
{ {
return input.getRev() && maybeGetIntAttr(input.attrs, "lastModified"); return input.getRev() && maybeGetIntAttr(input.attrs, "lastModified");
} }
@ -140,7 +140,7 @@ struct GitArchiveInputScheme : InputScheme
Input applyOverrides( Input applyOverrides(
const Input & _input, const Input & _input,
std::optional<std::string> ref, std::optional<std::string> ref,
std::optional<Hash> rev) override std::optional<Hash> rev) const override
{ {
auto input(_input); auto input(_input);
if (rev && ref) if (rev && ref)
@ -227,7 +227,7 @@ struct GitArchiveInputScheme : InputScheme
struct GitHubInputScheme : GitArchiveInputScheme struct GitHubInputScheme : GitArchiveInputScheme
{ {
std::string type() override { return "github"; } std::string type() const override { return "github"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{ {
@ -240,14 +240,29 @@ struct GitHubInputScheme : GitArchiveInputScheme
return std::pair<std::string, std::string>("Authorization", fmt("token %s", token)); return std::pair<std::string, std::string>("Authorization", fmt("token %s", token));
} }
std::string getHost(const Input & input) const
{
return maybeGetStrAttr(input.attrs, "host").value_or("github.com");
}
std::string getOwner(const Input & input) const
{
return getStrAttr(input.attrs, "owner");
}
std::string getRepo(const Input & input) const
{
return getStrAttr(input.attrs, "repo");
}
Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override Hash getRevFromRef(nix::ref<Store> store, const Input & input) const override
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto host = getHost(input);
auto url = fmt( auto url = fmt(
host == "github.com" host == "github.com"
? "https://api.%s/repos/%s/%s/commits/%s" ? "https://api.%s/repos/%s/%s/commits/%s"
: "https://%s/api/v3/repos/%s/%s/commits/%s", : "https://%s/api/v3/repos/%s/%s/commits/%s",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), *input.getRef()); host, getOwner(input), getRepo(input), *input.getRef());
Headers headers = makeHeadersWithAuthTokens(host); Headers headers = makeHeadersWithAuthTokens(host);
@ -262,8 +277,10 @@ struct GitHubInputScheme : GitArchiveInputScheme
DownloadUrl getDownloadUrl(const Input & input) const override DownloadUrl getDownloadUrl(const Input & input) const override
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto host = getHost(input);
Headers headers = makeHeadersWithAuthTokens(host); Headers headers = makeHeadersWithAuthTokens(host);
// If we have no auth headers then we default to the public archive // If we have no auth headers then we default to the public archive
// urls so we do not run into rate limits. // urls so we do not run into rate limits.
const auto urlFmt = const auto urlFmt =
@ -273,17 +290,17 @@ struct GitHubInputScheme : GitArchiveInputScheme
? "https://%s/%s/%s/archive/%s.tar.gz" ? "https://%s/%s/%s/archive/%s.tar.gz"
: "https://api.%s/repos/%s/%s/tarball/%s"; : "https://api.%s/repos/%s/%s/tarball/%s";
const auto url = fmt(urlFmt, host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), const auto url = fmt(urlFmt, host, getOwner(input), getRepo(input),
input.getRev()->to_string(Base16, false)); input.getRev()->to_string(Base16, false));
return DownloadUrl { url, headers }; return DownloadUrl { url, headers };
} }
void clone(const Input & input, const Path & destDir) override void clone(const Input & input, const Path & destDir) const override
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto host = getHost(input);
Input::fromURL(fmt("git+https://%s/%s/%s.git", Input::fromURL(fmt("git+https://%s/%s/%s.git",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"))) host, getOwner(input), getRepo(input)))
.applyOverrides(input.getRef(), input.getRev()) .applyOverrides(input.getRef(), input.getRev())
.clone(destDir); .clone(destDir);
} }
@ -291,7 +308,7 @@ struct GitHubInputScheme : GitArchiveInputScheme
struct GitLabInputScheme : GitArchiveInputScheme struct GitLabInputScheme : GitArchiveInputScheme
{ {
std::string type() override { return "gitlab"; } std::string type() const override { return "gitlab"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{ {
@ -346,7 +363,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers }; return DownloadUrl { url, headers };
} }
void clone(const Input & input, const Path & destDir) override void clone(const Input & input, const Path & destDir) const override
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com"); auto host = maybeGetStrAttr(input.attrs, "host").value_or("gitlab.com");
// FIXME: get username somewhere // FIXME: get username somewhere
@ -359,7 +376,7 @@ struct GitLabInputScheme : GitArchiveInputScheme
struct SourceHutInputScheme : GitArchiveInputScheme struct SourceHutInputScheme : GitArchiveInputScheme
{ {
std::string type() override { return "sourcehut"; } std::string type() const override { return "sourcehut"; }
std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override std::optional<std::pair<std::string, std::string>> accessHeaderFromToken(const std::string & token) const override
{ {
@ -433,7 +450,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme
return DownloadUrl { url, headers }; return DownloadUrl { url, headers };
} }
void clone(const Input & input, const Path & destDir) override void clone(const Input & input, const Path & destDir) const override
{ {
auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht"); auto host = maybeGetStrAttr(input.attrs, "host").value_or("git.sr.ht");
Input::fromURL(fmt("git+https://%s/%s/%s", Input::fromURL(fmt("git+https://%s/%s/%s",

View file

@ -7,7 +7,7 @@ std::regex flakeRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript);
struct IndirectInputScheme : InputScheme struct IndirectInputScheme : InputScheme
{ {
std::optional<Input> inputFromURL(const ParsedURL & url) override std::optional<Input> inputFromURL(const ParsedURL & url) const override
{ {
if (url.scheme != "flake") return {}; if (url.scheme != "flake") return {};
@ -50,7 +50,7 @@ struct IndirectInputScheme : InputScheme
return input; return input;
} }
std::optional<Input> inputFromAttrs(const Attrs & attrs) override std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{ {
if (maybeGetStrAttr(attrs, "type") != "indirect") return {}; if (maybeGetStrAttr(attrs, "type") != "indirect") return {};
@ -68,7 +68,7 @@ struct IndirectInputScheme : InputScheme
return input; return input;
} }
ParsedURL toURL(const Input & input) override ParsedURL toURL(const Input & input) const override
{ {
ParsedURL url; ParsedURL url;
url.scheme = "flake"; url.scheme = "flake";
@ -78,7 +78,7 @@ struct IndirectInputScheme : InputScheme
return url; return url;
} }
bool hasAllInfo(const Input & input) override bool hasAllInfo(const Input & input) const override
{ {
return false; return false;
} }
@ -86,7 +86,7 @@ struct IndirectInputScheme : InputScheme
Input applyOverrides( Input applyOverrides(
const Input & _input, const Input & _input,
std::optional<std::string> ref, std::optional<std::string> ref,
std::optional<Hash> rev) override std::optional<Hash> rev) const override
{ {
auto input(_input); auto input(_input);
if (rev) input.attrs.insert_or_assign("rev", rev->gitRev()); if (rev) input.attrs.insert_or_assign("rev", rev->gitRev());

View file

@ -43,7 +43,7 @@ static std::string runHg(const Strings & args, const std::optional<std::string>
struct MercurialInputScheme : InputScheme struct MercurialInputScheme : InputScheme
{ {
std::optional<Input> inputFromURL(const ParsedURL & url) override std::optional<Input> inputFromURL(const ParsedURL & url) const override
{ {
if (url.scheme != "hg+http" && if (url.scheme != "hg+http" &&
url.scheme != "hg+https" && url.scheme != "hg+https" &&
@ -69,7 +69,7 @@ struct MercurialInputScheme : InputScheme
return inputFromAttrs(attrs); return inputFromAttrs(attrs);
} }
std::optional<Input> inputFromAttrs(const Attrs & attrs) override std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{ {
if (maybeGetStrAttr(attrs, "type") != "hg") return {}; if (maybeGetStrAttr(attrs, "type") != "hg") return {};
@ -89,7 +89,7 @@ struct MercurialInputScheme : InputScheme
return input; return input;
} }
ParsedURL toURL(const Input & input) override ParsedURL toURL(const Input & input) const override
{ {
auto url = parseURL(getStrAttr(input.attrs, "url")); auto url = parseURL(getStrAttr(input.attrs, "url"));
url.scheme = "hg+" + url.scheme; url.scheme = "hg+" + url.scheme;
@ -98,7 +98,7 @@ struct MercurialInputScheme : InputScheme
return url; return url;
} }
bool hasAllInfo(const Input & input) override bool hasAllInfo(const Input & input) const override
{ {
// FIXME: ugly, need to distinguish between dirty and clean // FIXME: ugly, need to distinguish between dirty and clean
// default trees. // default trees.
@ -108,7 +108,7 @@ struct MercurialInputScheme : InputScheme
Input applyOverrides( Input applyOverrides(
const Input & input, const Input & input,
std::optional<std::string> ref, std::optional<std::string> ref,
std::optional<Hash> rev) override std::optional<Hash> rev) const override
{ {
auto res(input); auto res(input);
if (rev) res.attrs.insert_or_assign("rev", rev->gitRev()); if (rev) res.attrs.insert_or_assign("rev", rev->gitRev());

View file

@ -6,7 +6,7 @@ namespace nix::fetchers {
struct PathInputScheme : InputScheme struct PathInputScheme : InputScheme
{ {
std::optional<Input> inputFromURL(const ParsedURL & url) override std::optional<Input> inputFromURL(const ParsedURL & url) const override
{ {
if (url.scheme != "path") return {}; if (url.scheme != "path") return {};
@ -32,7 +32,7 @@ struct PathInputScheme : InputScheme
return input; return input;
} }
std::optional<Input> inputFromAttrs(const Attrs & attrs) override std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{ {
if (maybeGetStrAttr(attrs, "type") != "path") return {}; if (maybeGetStrAttr(attrs, "type") != "path") return {};
@ -54,7 +54,7 @@ struct PathInputScheme : InputScheme
return input; return input;
} }
ParsedURL toURL(const Input & input) override ParsedURL toURL(const Input & input) const override
{ {
auto query = attrsToQuery(input.attrs); auto query = attrsToQuery(input.attrs);
query.erase("path"); query.erase("path");
@ -66,7 +66,7 @@ struct PathInputScheme : InputScheme
}; };
} }
bool hasAllInfo(const Input & input) override bool hasAllInfo(const Input & input) const override
{ {
return true; return true;
} }

View file

@ -153,6 +153,9 @@ static std::shared_ptr<Registry> getGlobalRegistry(ref<Store> store)
{ {
static auto reg = [&]() { static auto reg = [&]() {
auto path = fetchSettings.flakeRegistry.get(); auto path = fetchSettings.flakeRegistry.get();
if (path == "") {
return std::make_shared<Registry>(Registry::Global); // empty registry
}
if (!hasPrefix(path, "/")) { if (!hasPrefix(path, "/")) {
auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath; auto storePath = downloadFile(store, path, "flake-registry.json", false).storePath;

View file

@ -185,7 +185,7 @@ struct CurlInputScheme : InputScheme
virtual bool isValidURL(const ParsedURL & url) const = 0; virtual bool isValidURL(const ParsedURL & url) const = 0;
std::optional<Input> inputFromURL(const ParsedURL & url) override std::optional<Input> inputFromURL(const ParsedURL & url) const override
{ {
if (!isValidURL(url)) if (!isValidURL(url))
return std::nullopt; return std::nullopt;
@ -203,7 +203,7 @@ struct CurlInputScheme : InputScheme
return input; return input;
} }
std::optional<Input> inputFromAttrs(const Attrs & attrs) override std::optional<Input> inputFromAttrs(const Attrs & attrs) const override
{ {
auto type = maybeGetStrAttr(attrs, "type"); auto type = maybeGetStrAttr(attrs, "type");
if (type != inputType()) return {}; if (type != inputType()) return {};
@ -220,16 +220,17 @@ struct CurlInputScheme : InputScheme
return input; return input;
} }
ParsedURL toURL(const Input & input) override ParsedURL toURL(const Input & input) const override
{ {
auto url = parseURL(getStrAttr(input.attrs, "url")); auto url = parseURL(getStrAttr(input.attrs, "url"));
// NAR hashes are preferred over file hashes since tar/zip files // don't have a canonical representation. // NAR hashes are preferred over file hashes since tar/zip
// files don't have a canonical representation.
if (auto narHash = input.getNarHash()) if (auto narHash = input.getNarHash())
url.query.insert_or_assign("narHash", narHash->to_string(SRI, true)); url.query.insert_or_assign("narHash", narHash->to_string(SRI, true));
return url; return url;
} }
bool hasAllInfo(const Input & input) override bool hasAllInfo(const Input & input) const override
{ {
return true; return true;
} }

View file

@ -132,7 +132,7 @@ public:
log(*state, lvl, fs.s); log(*state, lvl, fs.s);
} }
void logEI(const ErrorInfo &ei) override void logEI(const ErrorInfo & ei) override
{ {
auto state(state_.lock()); auto state(state_.lock());
@ -180,10 +180,12 @@ public:
auto machineName = getS(fields, 1); auto machineName = getS(fields, 1);
if (machineName != "") if (machineName != "")
i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName); i->s += fmt(" on " ANSI_BOLD "%s" ANSI_NORMAL, machineName);
auto curRound = getI(fields, 2);
auto nrRounds = getI(fields, 3); // Used to be curRound and nrRounds, but the
if (nrRounds != 1) // implementation was broken for a long time.
i->s += fmt(" (round %d/%d)", curRound, nrRounds); if (getI(fields, 2) != 1 || getI(fields, 3) != 1) {
throw Error("log message indicated repeating builds, but this is not currently implemented");
}
i->name = DrvName(name).name; i->name = DrvName(name).name;
} }

View file

@ -346,7 +346,7 @@ void BinaryCacheStore::narFromPath(const StorePath & storePath, Sink & sink)
try { try {
getFile(info->url, *decompressor); getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile & e) { } catch (NoSuchBinaryCacheFile & e) {
throw SubstituteGone(e.info()); throw SubstituteGone(std::move(e.info()));
} }
decompressor->finish(); decompressor->finish();

View file

@ -134,7 +134,7 @@ void DerivationGoal::killChild()
void DerivationGoal::timedOut(Error && ex) void DerivationGoal::timedOut(Error && ex)
{ {
killChild(); killChild();
done(BuildResult::TimedOut, {}, ex); done(BuildResult::TimedOut, {}, std::move(ex));
} }
@ -501,6 +501,14 @@ void DerivationGoal::inputsRealised()
now-known results of dependencies. If so, we become a now-known results of dependencies. If so, we become a
stub goal aliasing that resolved derivation goal. */ stub goal aliasing that resolved derivation goal. */
std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs); std::optional attempt = fullDrv.tryResolve(worker.store, inputDrvOutputs);
if (!attempt) {
/* TODO (impure derivations-induced tech debt) (see below):
The above attempt should have found it, but because we manage
inputDrvOutputs statefully, sometimes it gets out of sync with
the real source of truth (store). So we query the store
directly if there's a problem. */
attempt = fullDrv.tryResolve(worker.store);
}
assert(attempt); assert(attempt);
Derivation drvResolved { *std::move(attempt) }; Derivation drvResolved { *std::move(attempt) };
@ -563,10 +571,6 @@ void DerivationGoal::inputsRealised()
/* What type of derivation are we building? */ /* What type of derivation are we building? */
derivationType = drv->type(); derivationType = drv->type();
/* Don't repeat fixed-output derivations since they're already
verified by their output hash.*/
nrRounds = derivationType.isFixed() ? 1 : settings.buildRepeat + 1;
/* Okay, try to build. Note that here we don't wait for a build /* Okay, try to build. Note that here we don't wait for a build
slot to become available, since we don't need one if there is a slot to become available, since we don't need one if there is a
build hook. */ build hook. */
@ -581,12 +585,11 @@ void DerivationGoal::started()
auto msg = fmt( auto msg = fmt(
buildMode == bmRepair ? "repairing outputs of '%s'" : buildMode == bmRepair ? "repairing outputs of '%s'" :
buildMode == bmCheck ? "checking outputs of '%s'" : buildMode == bmCheck ? "checking outputs of '%s'" :
nrRounds > 1 ? "building '%s' (round %d/%d)" : "building '%s'", worker.store.printStorePath(drvPath));
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
fmt("building '%s'", worker.store.printStorePath(drvPath)); fmt("building '%s'", worker.store.printStorePath(drvPath));
if (hook) msg += fmt(" on '%s'", machineName); if (hook) msg += fmt(" on '%s'", machineName);
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg, act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds}); Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1});
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds); mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
worker.updateProgress(); worker.updateProgress();
} }
@ -940,14 +943,6 @@ void DerivationGoal::buildDone()
cleanupPostOutputsRegisteredModeNonCheck(); cleanupPostOutputsRegisteredModeNonCheck();
/* Repeat the build if necessary. */
if (curRound++ < nrRounds) {
outputLocks.unlock();
state = &DerivationGoal::tryToBuild;
worker.wakeUp(shared_from_this());
return;
}
/* It is now safe to delete the lock files, since all future /* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will lockers will see that the output paths are valid; they will
not create new lock files with the same names as the old not create new lock files with the same names as the old
@ -976,7 +971,7 @@ void DerivationGoal::buildDone()
BuildResult::PermanentFailure; BuildResult::PermanentFailure;
} }
done(st, {}, e); done(st, {}, std::move(e));
return; return;
} }
} }
@ -1008,22 +1003,34 @@ void DerivationGoal::resolvedFinished()
throw Error( throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)", "derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,resolve)",
worker.store.printStorePath(drvPath), wantedOutput); worker.store.printStorePath(drvPath), wantedOutput);
auto realisation = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
if (!realisation) auto realisation = [&]{
throw Error( auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput });
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)", if (take1) return *take1;
worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
/* The above `get` should work. But sateful tracking of
outputs in resolvedResult, this can get out of sync with the
store, which is our actual source of truth. For now we just
check the store directly if it fails. */
auto take2 = worker.evalStore.queryRealisation(DrvOutput { *resolvedHash, wantedOutput });
if (take2) return *take2;
throw Error(
"derivation '%s' doesn't have expected output '%s' (derivation-goal.cc/resolvedFinished,realisation)",
worker.store.printStorePath(resolvedDrvGoal->drvPath), wantedOutput);
}();
if (drv->type().isPure()) { if (drv->type().isPure()) {
auto newRealisation = *realisation; auto newRealisation = realisation;
newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput }; newRealisation.id = DrvOutput { initialOutput->outputHash, wantedOutput };
newRealisation.signatures.clear(); newRealisation.signatures.clear();
if (!drv->type().isFixed()) if (!drv->type().isFixed())
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath); newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation.outPath);
signRealisation(newRealisation); signRealisation(newRealisation);
worker.store.registerDrvOutput(newRealisation); worker.store.registerDrvOutput(newRealisation);
} }
outputPaths.insert(realisation->outPath); outputPaths.insert(realisation.outPath);
builtOutputs.emplace(realisation->id, *realisation); builtOutputs.emplace(realisation.id, realisation);
} }
runPostBuildHook( runPostBuildHook(
@ -1427,7 +1434,7 @@ void DerivationGoal::done(
fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl; fs << worker.store.printStorePath(drvPath) << "\t" << buildResult.toString() << std::endl;
} }
amDone(buildResult.success() ? ecSuccess : ecFailed, ex); amDone(buildResult.success() ? ecSuccess : ecFailed, std::move(ex));
} }

View file

@ -115,11 +115,6 @@ struct DerivationGoal : public Goal
BuildMode buildMode; BuildMode buildMode;
/* The current round, if we're building multiple times. */
size_t curRound = 1;
size_t nrRounds;
std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds; std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds;
std::unique_ptr<Activity> act; std::unique_ptr<Activity> act;

View file

@ -30,7 +30,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (ex) if (ex)
logError(i->ex->info()); logError(i->ex->info());
else else
ex = i->ex; ex = std::move(i->ex);
} }
if (i->exitCode != Goal::ecSuccess) { if (i->exitCode != Goal::ecSuccess) {
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath); if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
@ -40,7 +40,7 @@ void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMod
if (failed.size() == 1 && ex) { if (failed.size() == 1 && ex) {
ex->status = worker.exitStatus(); ex->status = worker.exitStatus();
throw *ex; throw std::move(*ex);
} else if (!failed.empty()) { } else if (!failed.empty()) {
if (ex) logError(ex->info()); if (ex) logError(ex->info());
throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed)); throw Error(worker.exitStatus(), "build of %s failed", showPaths(failed));
@ -109,7 +109,7 @@ void Store::ensurePath(const StorePath & path)
if (goal->exitCode != Goal::ecSuccess) { if (goal->exitCode != Goal::ecSuccess) {
if (goal->ex) { if (goal->ex) {
goal->ex->status = worker.exitStatus(); goal->ex->status = worker.exitStatus();
throw *goal->ex; throw std::move(*goal->ex);
} else } else
throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path)); throw Error(worker.exitStatus(), "path '%s' does not exist and cannot be created", printStorePath(path));
} }

View file

@ -230,7 +230,7 @@ void LocalDerivationGoal::tryLocalBuild() {
outputLocks.unlock(); outputLocks.unlock();
buildUser.reset(); buildUser.reset();
worker.permanentFailure = true; worker.permanentFailure = true;
done(BuildResult::InputRejected, {}, e); done(BuildResult::InputRejected, {}, std::move(e));
return; return;
} }
@ -409,12 +409,16 @@ void LocalDerivationGoal::startBuilder()
#if __linux__ #if __linux__
settings.requireExperimentalFeature(Xp::Cgroups); settings.requireExperimentalFeature(Xp::Cgroups);
auto cgroupFS = getCgroupFS();
if (!cgroupFS)
throw Error("cannot determine the cgroups file system");
auto ourCgroups = getCgroups("/proc/self/cgroup"); auto ourCgroups = getCgroups("/proc/self/cgroup");
auto ourCgroup = ourCgroups[""]; auto ourCgroup = ourCgroups[""];
if (ourCgroup == "") if (ourCgroup == "")
throw Error("cannot determine cgroup name from /proc/self/cgroup"); throw Error("cannot determine cgroup name from /proc/self/cgroup");
auto ourCgroupPath = canonPath("/sys/fs/cgroup/" + ourCgroup); auto ourCgroupPath = canonPath(*cgroupFS + "/" + ourCgroup);
if (!pathExists(ourCgroupPath)) if (!pathExists(ourCgroupPath))
throw Error("expected cgroup directory '%s'", ourCgroupPath); throw Error("expected cgroup directory '%s'", ourCgroupPath);
@ -2256,7 +2260,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
InodesSeen inodesSeen; InodesSeen inodesSeen;
Path checkSuffix = ".check"; Path checkSuffix = ".check";
bool keepPreviousRound = settings.keepFailed || settings.runDiffHook;
std::exception_ptr delayedException; std::exception_ptr delayedException;
@ -2684,10 +2687,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
debug("unreferenced input: '%1%'", worker.store.printStorePath(i)); debug("unreferenced input: '%1%'", worker.store.printStorePath(i));
} }
if (curRound == nrRounds) { localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences()
localStore.optimisePath(actualPath, NoRepair); // FIXME: combine with scanForReferences() worker.markContentsGood(newInfo.path);
worker.markContentsGood(newInfo.path);
}
newInfo.deriver = drvPath; newInfo.deriver = drvPath;
newInfo.ultimate = true; newInfo.ultimate = true;
@ -2716,61 +2717,6 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Apply output checks. */ /* Apply output checks. */
checkOutputs(infos); checkOutputs(infos);
/* Compare the result with the previous round, and report which
path is different, if any.*/
if (curRound > 1 && prevInfos != infos) {
assert(prevInfos.size() == infos.size());
for (auto i = prevInfos.begin(), j = infos.begin(); i != prevInfos.end(); ++i, ++j)
if (!(*i == *j)) {
buildResult.isNonDeterministic = true;
Path prev = worker.store.printStorePath(i->second.path) + checkSuffix;
bool prevExists = keepPreviousRound && pathExists(prev);
hintformat hint = prevExists
? hintfmt("output '%s' of '%s' differs from '%s' from previous round",
worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath), prev)
: hintfmt("output '%s' of '%s' differs from previous round",
worker.store.printStorePath(i->second.path), worker.store.printStorePath(drvPath));
handleDiffHook(
buildUser ? buildUser->getUID() : getuid(),
buildUser ? buildUser->getGID() : getgid(),
prev, worker.store.printStorePath(i->second.path),
worker.store.printStorePath(drvPath), tmpDir);
if (settings.enforceDeterminism)
throw NotDeterministic(hint);
printError(hint);
curRound = nrRounds; // we know enough, bail out early
}
}
/* If this is the first round of several, then move the output out of the way. */
if (nrRounds > 1 && curRound == 1 && curRound < nrRounds && keepPreviousRound) {
for (auto & [_, outputStorePath] : finalOutputs) {
auto path = worker.store.printStorePath(outputStorePath);
Path prev = path + checkSuffix;
deletePath(prev);
Path dst = path + checkSuffix;
renameFile(path, dst);
}
}
if (curRound < nrRounds) {
prevInfos = std::move(infos);
return {};
}
/* Remove the .check directories if we're done. FIXME: keep them
if the result was not determistic? */
if (curRound == nrRounds) {
for (auto & [_, outputStorePath] : finalOutputs) {
Path prev = worker.store.printStorePath(outputStorePath) + checkSuffix;
deletePath(prev);
}
}
/* Register each output path as valid, and register the sets of /* Register each output path as valid, and register the sets of
paths referenced by each of them. If there are cycles in the paths referenced by each of them. If there are cycles in the
outputs, this will fail. */ outputs, this will fail. */

View file

@ -238,7 +238,6 @@ struct ClientSettings
} }
else if (trusted else if (trusted
|| name == settings.buildTimeout.name || name == settings.buildTimeout.name
|| name == settings.buildRepeat.name
|| name == settings.maxSilentTime.name || name == settings.maxSilentTime.name
|| name == settings.pollInterval.name || name == settings.pollInterval.name
|| name == "connect-timeout" || name == "connect-timeout"

View file

@ -448,7 +448,7 @@ std::string Derivation::unparse(const Store & store, bool maskOutputs,
// FIXME: remove // FIXME: remove
bool isDerivation(const std::string & fileName) bool isDerivation(std::string_view fileName)
{ {
return hasSuffix(fileName, drvExtension); return hasSuffix(fileName, drvExtension);
} }

View file

@ -224,7 +224,7 @@ StorePath writeDerivation(Store & store,
Derivation parseDerivation(const Store & store, std::string && s, std::string_view name); Derivation parseDerivation(const Store & store, std::string && s, std::string_view name);
// FIXME: remove // FIXME: remove
bool isDerivation(const std::string & fileName); bool isDerivation(std::string_view fileName);
/* Calculate the name that will be used for the store path for this /* Calculate the name that will be used for the store path for this
output. output.

View file

@ -20,11 +20,12 @@ nlohmann::json DerivedPath::Built::toJSON(ref<Store> store) const {
// Fallback for the input-addressed derivation case: We expect to always be // Fallback for the input-addressed derivation case: We expect to always be
// able to print the output paths, so lets do it // able to print the output paths, so lets do it
const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath); const auto knownOutputs = store->queryPartialDerivationOutputMap(drvPath);
for (const auto& output : outputs) { for (const auto & output : outputs) {
auto knownOutput = get(knownOutputs, output); auto knownOutput = get(knownOutputs, output);
res["outputs"][output] = (knownOutput && *knownOutput) if (knownOutput && *knownOutput)
? store->printStorePath(**knownOutput) res["outputs"][output] = store->printStorePath(**knownOutput);
: nullptr; else
res["outputs"][output] = nullptr;
} }
return res; return res;
} }
@ -78,15 +79,16 @@ DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_
return {store.parseStorePath(s)}; return {store.parseStorePath(s)};
} }
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s) DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view drvS, std::string_view outputsS)
{ {
size_t n = s.find("!"); auto drvPath = store.parseStorePath(drvS);
assert(n != s.npos);
auto drvPath = store.parseStorePath(s.substr(0, n));
auto outputsS = s.substr(n + 1);
std::set<std::string> outputs; std::set<std::string> outputs;
if (outputsS != "*") if (outputsS != "*") {
outputs = tokenizeString<std::set<std::string>>(outputsS, ","); outputs = tokenizeString<std::set<std::string>>(outputsS, ",");
if (outputs.empty())
throw Error(
"Explicit list of wanted outputs '%s' must not be empty. Consider using '*' as a wildcard meaning all outputs if no output in particular is wanted.", outputsS);
}
return {drvPath, outputs}; return {drvPath, outputs};
} }
@ -95,7 +97,7 @@ DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
size_t n = s.find("!"); size_t n = s.find("!");
return n == s.npos return n == s.npos
? (DerivedPath) DerivedPath::Opaque::parse(store, s) ? (DerivedPath) DerivedPath::Opaque::parse(store, s)
: (DerivedPath) DerivedPath::Built::parse(store, s); : (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1));
} }
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const

View file

@ -47,7 +47,7 @@ struct DerivedPathBuilt {
std::set<std::string> outputs; std::set<std::string> outputs;
std::string to_string(const Store & store) const; std::string to_string(const Store & store) const;
static DerivedPathBuilt parse(const Store & store, std::string_view); static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view);
nlohmann::json toJSON(ref<Store> store) const; nlohmann::json toJSON(ref<Store> store) const;
bool operator < (const DerivedPathBuilt & b) const bool operator < (const DerivedPathBuilt & b) const

View file

@ -33,14 +33,6 @@ FileTransferSettings fileTransferSettings;
static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings); static GlobalConfig::Register rFileTransferSettings(&fileTransferSettings);
std::string resolveUri(std::string_view uri)
{
if (uri.compare(0, 8, "channel:") == 0)
return "https://nixos.org/channels/" + std::string(uri.substr(8)) + "/nixexprs.tar.xz";
else
return std::string(uri);
}
struct curlFileTransfer : public FileTransfer struct curlFileTransfer : public FileTransfer
{ {
CURLM * curlm = 0; CURLM * curlm = 0;
@ -142,9 +134,9 @@ struct curlFileTransfer : public FileTransfer
} }
template<class T> template<class T>
void fail(const T & e) void fail(T && e)
{ {
failEx(std::make_exception_ptr(e)); failEx(std::make_exception_ptr(std::move(e)));
} }
LambdaSink finalSink; LambdaSink finalSink;
@ -472,7 +464,7 @@ struct curlFileTransfer : public FileTransfer
fileTransfer.enqueueItem(shared_from_this()); fileTransfer.enqueueItem(shared_from_this());
} }
else else
fail(exc); fail(std::move(exc));
} }
} }
}; };
@ -873,14 +865,4 @@ FileTransferError::FileTransferError(FileTransfer::Error error, std::optional<st
err.msg = hf; err.msg = hf;
} }
bool isUri(std::string_view s)
{
if (s.compare(0, 8, "channel:") == 0) return true;
size_t pos = s.find("://");
if (pos == std::string::npos) return false;
std::string scheme(s, 0, pos);
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
}
} }

View file

@ -125,9 +125,4 @@ public:
FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args); FileTransferError(FileTransfer::Error error, std::optional<std::string> response, const Args & ... args);
}; };
bool isUri(std::string_view s);
/* Resolve deprecated 'channel:<foo>' URLs. */
std::string resolveUri(std::string_view uri);
} }

View file

@ -281,10 +281,28 @@ public:
`NIX_REMOTE` is empty, the uid under which the Nix daemon runs if `NIX_REMOTE` is empty, the uid under which the Nix daemon runs if
`NIX_REMOTE` is `daemon`). Obviously, this should not be used in `NIX_REMOTE` is `daemon`). Obviously, this should not be used in
multi-user settings with untrusted users. multi-user settings with untrusted users.
)"};
Defaults to `nixbld` when running as root, *empty* otherwise.
)",
{}, false};
Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids", Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
"Whether to allocate UIDs for builders automatically."}; R"(
Whether to select UIDs for builds automatically, instead of using the
users in `build-users-group`.
UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = auto-allocate-uids
auto-allocate-uids = true
```
)"};
Setting<uint32_t> startId{this, Setting<uint32_t> startId{this,
#if __linux__ #if __linux__
@ -308,11 +326,22 @@ public:
Setting<bool> useCgroups{ Setting<bool> useCgroups{
this, false, "use-cgroups", this, false, "use-cgroups",
R"( R"(
Whether to execute builds inside cgroups. Cgroups are Whether to execute builds inside cgroups.
enabled automatically for derivations that require the This is only supported on Linux.
`uid-range` system feature.
)" Cgroups are required and enabled automatically for derivations
}; that require the `uid-range` system feature.
> **Warning**
> This is an experimental feature.
To enable it, add the following to [`nix.conf`](#):
```
extra-experimental-features = cgroups
use-cgroups = true
```
)"};
#endif #endif
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26", Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
@ -347,11 +376,6 @@ public:
)", )",
{"build-max-log-size"}}; {"build-max-log-size"}};
/* When buildRepeat > 0 and verboseBuild == true, whether to print
repeated builds (i.e. builds other than the first one) to
stderr. Hack to prevent Hydra logs from being polluted. */
bool printRepeatedBuilds = true;
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval", Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."}; "How often (in seconds) to poll for locks."};
@ -475,19 +499,6 @@ public:
Setting<bool> sandboxFallback{this, true, "sandbox-fallback", Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."}; "Whether to disable sandboxing when the kernel doesn't allow it."};
Setting<size_t> buildRepeat{
this, 0, "repeat",
R"(
How many times to repeat builds to check whether they are
deterministic. The default value is 0. If the value is non-zero,
every build is repeated the specified number of times. If the
contents of any of the runs differs from the previous ones and
`enforce-determinism` is true, the build is rejected and the
resulting store paths are not registered as valid in Nixs
database.
)",
{"build-repeat"}};
#if __linux__ #if __linux__
Setting<std::string> sandboxShmSize{ Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size", this, "50%", "sandbox-dev-shm-size",
@ -551,10 +562,6 @@ public:
configuration file, and cannot be passed at the command line. configuration file, and cannot be passed at the command line.
)"}; )"};
Setting<bool> enforceDeterminism{
this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output. See `repeat`."};
Setting<Strings> trustedPublicKeys{ Setting<Strings> trustedPublicKeys{
this, this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="}, {"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},

View file

@ -255,8 +255,8 @@ private:
<< settings.maxLogSize; << settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3) if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 3)
conn.to conn.to
<< settings.buildRepeat << 0 // buildRepeat hasn't worked for ages anyway
<< settings.enforceDeterminism; << 0;
if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) { if (GET_PROTOCOL_MINOR(conn.remoteVersion) >= 7) {
conn.to << ((int) settings.keepFailed); conn.to << ((int) settings.keepFailed);

View file

@ -185,7 +185,7 @@ std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot)
bool useBuildUsers() bool useBuildUsers()
{ {
#if __linux__ #if __linux__
static bool b = (settings.buildUsersGroup != "" || settings.startId.get() != 0) && getuid() == 0; static bool b = (settings.buildUsersGroup != "" || settings.autoAllocateUids) && getuid() == 0;
return b; return b;
#elif __APPLE__ #elif __APPLE__
static bool b = settings.buildUsersGroup != "" && getuid() == 0; static bool b = settings.buildUsersGroup != "" && getuid() == 0;

View file

@ -447,7 +447,7 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path,
} catch (Error & e) { } catch (Error & e) {
// Ugly backwards compatibility hack. // Ugly backwards compatibility hack.
if (e.msg().find("is not valid") != std::string::npos) if (e.msg().find("is not valid") != std::string::npos)
throw InvalidPath(e.info()); throw InvalidPath(std::move(e.info()));
throw; throw;
} }
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {

View file

@ -19,21 +19,21 @@ using json = nlohmann::json;
namespace nix { namespace nix {
bool Store::isInStore(const Path & path) const bool Store::isInStore(PathView path) const
{ {
return isInDir(path, storeDir); return isInDir(path, storeDir);
} }
std::pair<StorePath, Path> Store::toStorePath(const Path & path) const std::pair<StorePath, Path> Store::toStorePath(PathView path) const
{ {
if (!isInStore(path)) if (!isInStore(path))
throw Error("path '%1%' is not in the Nix store", path); throw Error("path '%1%' is not in the Nix store", path);
Path::size_type slash = path.find('/', storeDir.size() + 1); auto slash = path.find('/', storeDir.size() + 1);
if (slash == Path::npos) if (slash == Path::npos)
return {parseStorePath(path), ""}; return {parseStorePath(path), ""};
else else
return {parseStorePath(std::string_view(path).substr(0, slash)), path.substr(slash)}; return {parseStorePath(path.substr(0, slash)), (Path) path.substr(slash)};
} }

View file

@ -179,7 +179,7 @@ public:
/* Return true if path is in the Nix store (but not the Nix /* Return true if path is in the Nix store (but not the Nix
store itself). */ store itself). */
bool isInStore(const Path & path) const; bool isInStore(PathView path) const;
/* Return true if path is a store path, i.e. a direct child of /* Return true if path is a store path, i.e. a direct child of
the Nix store. */ the Nix store. */
@ -187,7 +187,7 @@ public:
/* Split a path like /nix/store/<hash>-<name>/<bla> into /* Split a path like /nix/store/<hash>-<name>/<bla> into
/nix/store/<hash>-<name> and /<bla>. */ /nix/store/<hash>-<name> and /<bla>. */
std::pair<StorePath, Path> toStorePath(const Path & path) const; std::pair<StorePath, Path> toStorePath(PathView path) const;
/* Follow symlinks until we end up with a path in the Nix store. */ /* Follow symlinks until we end up with a path in the Nix store. */
Path followLinksToStore(std::string_view path) const; Path followLinksToStore(std::string_view path) const;

View file

@ -35,10 +35,6 @@ static ArchiveSettings archiveSettings;
static GlobalConfig::Register rArchiveSettings(&archiveSettings); static GlobalConfig::Register rArchiveSettings(&archiveSettings);
const std::string narVersionMagic1 = "nix-archive-1";
static std::string caseHackSuffix = "~nix~case~hack~";
PathFilter defaultPathFilter = [](const Path &) { return true; }; PathFilter defaultPathFilter = [](const Path &) { return true; };

View file

@ -103,7 +103,9 @@ void copyNAR(Source & source, Sink & sink);
void copyPath(const Path & from, const Path & to); void copyPath(const Path & from, const Path & to);
extern const std::string narVersionMagic1; inline constexpr std::string_view narVersionMagic1 = "nix-archive-1";
inline constexpr std::string_view caseHackSuffix = "~nix~case~hack~";
} }

View file

@ -2,6 +2,7 @@
#include "cgroup.hh" #include "cgroup.hh"
#include "util.hh" #include "util.hh"
#include "finally.hh"
#include <chrono> #include <chrono>
#include <cmath> #include <cmath>
@ -10,9 +11,25 @@
#include <thread> #include <thread>
#include <dirent.h> #include <dirent.h>
#include <mntent.h>
namespace nix { namespace nix {
std::optional<Path> getCgroupFS()
{
static auto res = [&]() -> std::optional<Path> {
auto fp = fopen("/proc/mounts", "r");
if (!fp) return std::nullopt;
Finally delFP = [&]() { fclose(fp); };
while (auto ent = getmntent(fp))
if (std::string_view(ent->mnt_type) == "cgroup2")
return ent->mnt_dir;
return std::nullopt;
}();
return res;
}
// FIXME: obsolete, check for cgroup2 // FIXME: obsolete, check for cgroup2
std::map<std::string, std::string> getCgroups(const Path & cgroupFile) std::map<std::string, std::string> getCgroups(const Path & cgroupFile)
{ {

View file

@ -9,6 +9,8 @@
namespace nix { namespace nix {
std::optional<Path> getCgroupFS();
std::map<std::string, std::string> getCgroups(const Path & cgroupFile); std::map<std::string, std::string> getCgroups(const Path & cgroupFile);
struct CgroupStats struct CgroupStats

View file

@ -262,6 +262,28 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
prefix += ":" ANSI_NORMAL " "; prefix += ":" ANSI_NORMAL " ";
std::ostringstream oss; std::ostringstream oss;
// traces
if (showTrace && !einfo.traces.empty()) {
for (const auto & trace : einfo.traces) {
oss << "\n" << "" << trace.hint.str() << "\n";
if (trace.pos.has_value() && (*trace.pos)) {
auto pos = trace.pos.value();
oss << "\n";
printAtPos(pos, oss);
auto loc = getCodeLines(pos);
if (loc.has_value()) {
oss << "\n";
printCodeLines(oss, "", pos, *loc);
oss << "\n";
}
}
}
oss << "\n" << prefix;
}
oss << einfo.msg << "\n"; oss << einfo.msg << "\n";
if (einfo.errPos.has_value() && *einfo.errPos) { if (einfo.errPos.has_value() && *einfo.errPos) {
@ -285,26 +307,6 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s
"?" << std::endl; "?" << std::endl;
} }
// traces
if (showTrace && !einfo.traces.empty()) {
for (auto iter = einfo.traces.rbegin(); iter != einfo.traces.rend(); ++iter) {
oss << "\n" << "" << iter->hint.str() << "\n";
if (iter->pos.has_value() && (*iter->pos)) {
auto pos = iter->pos.value();
oss << "\n";
printAtPos(pos, oss);
auto loc = getCodeLines(pos);
if (loc.has_value()) {
oss << "\n";
printCodeLines(oss, "", pos, *loc);
oss << "\n";
}
}
}
}
out << indent(prefix, std::string(filterANSIEscapes(prefix, true).size(), ' '), chomp(oss.str())); out << indent(prefix, std::string(filterANSIEscapes(prefix, true).size(), ' '), chomp(oss.str()));
return out; return out;

View file

@ -148,7 +148,7 @@ inline hintformat hintfmt(const std::string & fs, const Args & ... args)
return f; return f;
} }
inline hintformat hintfmt(std::string plain_string) inline hintformat hintfmt(const std::string & plain_string)
{ {
// we won't be receiving any args in this case, so just print the original string // we won't be receiving any args in this case, so just print the original string
return hintfmt("%s", normaltxt(plain_string)); return hintfmt("%s", normaltxt(plain_string));

View file

@ -105,14 +105,6 @@ public:
Verbosity verbosity = lvlInfo; Verbosity verbosity = lvlInfo;
void warnOnce(bool & haveWarned, const FormatOrString & fs)
{
if (!haveWarned) {
warn(fs.s);
haveWarned = true;
}
}
void writeToStderr(std::string_view s) void writeToStderr(std::string_view s)
{ {
try { try {
@ -130,11 +122,11 @@ Logger * makeSimpleLogger(bool printBuildLogs)
return new SimpleLogger(printBuildLogs); return new SimpleLogger(printBuildLogs);
} }
std::atomic<uint64_t> nextId{(uint64_t) getpid() << 32}; std::atomic<uint64_t> nextId{0};
Activity::Activity(Logger & logger, Verbosity lvl, ActivityType type, Activity::Activity(Logger & logger, Verbosity lvl, ActivityType type,
const std::string & s, const Logger::Fields & fields, ActivityId parent) const std::string & s, const Logger::Fields & fields, ActivityId parent)
: logger(logger), id(nextId++) : logger(logger), id(nextId++ + (((uint64_t) getpid()) << 32))
{ {
logger.startActivity(id, lvl, type, s, fields, parent); logger.startActivity(id, lvl, type, s, fields, parent);
} }

View file

@ -82,7 +82,7 @@ public:
log(lvlInfo, fs); log(lvlInfo, fs);
} }
virtual void logEI(const ErrorInfo &ei) = 0; virtual void logEI(const ErrorInfo & ei) = 0;
void logEI(Verbosity lvl, ErrorInfo ei) void logEI(Verbosity lvl, ErrorInfo ei)
{ {
@ -225,7 +225,11 @@ inline void warn(const std::string & fs, const Args & ... args)
logger->warn(f.str()); logger->warn(f.str());
} }
void warnOnce(bool & haveWarned, const FormatOrString & fs); #define warnOnce(haveWarned, args...) \
if (!haveWarned) { \
haveWarned = true; \
warn(args); \
}
void writeToStderr(std::string_view s); void writeToStderr(std::string_view s);

View file

@ -83,6 +83,11 @@ public:
return p != other.p; return p != other.p;
} }
bool operator < (const ref<T> & other) const
{
return p < other.p;
}
private: private:
template<typename T2, typename... Args> template<typename T2, typename... Args>

View file

@ -338,7 +338,7 @@ Sink & operator << (Sink & sink, const StringSet & s)
Sink & operator << (Sink & sink, const Error & ex) Sink & operator << (Sink & sink, const Error & ex)
{ {
auto info = ex.info(); auto & info = ex.info();
sink sink
<< "Error" << "Error"
<< info.level << info.level

View file

@ -331,17 +331,9 @@ T readNum(Source & source)
unsigned char buf[8]; unsigned char buf[8];
source((char *) buf, sizeof(buf)); source((char *) buf, sizeof(buf));
uint64_t n = auto n = readLittleEndian<uint64_t>(buf);
((uint64_t) buf[0]) |
((uint64_t) buf[1] << 8) |
((uint64_t) buf[2] << 16) |
((uint64_t) buf[3] << 24) |
((uint64_t) buf[4] << 32) |
((uint64_t) buf[5] << 40) |
((uint64_t) buf[6] << 48) |
((uint64_t) buf[7] << 56);
if (n > (uint64_t)std::numeric_limits<T>::max()) if (n > (uint64_t) std::numeric_limits<T>::max())
throw SerialisationError("serialised integer %d is too large for type '%s'", n, typeid(T).name()); throw SerialisationError("serialised integer %d is too large for type '%s'", n, typeid(T).name());
return (T) n; return (T) n;

View file

@ -2,6 +2,7 @@
#include "sync.hh" #include "sync.hh"
#include "finally.hh" #include "finally.hh"
#include "serialise.hh" #include "serialise.hh"
#include "cgroup.hh"
#include <array> #include <array>
#include <cctype> #include <cctype>
@ -36,7 +37,6 @@
#include <sys/prctl.h> #include <sys/prctl.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <mntent.h>
#include <cmath> #include <cmath>
#endif #endif
@ -727,45 +727,22 @@ unsigned int getMaxCPU()
{ {
#if __linux__ #if __linux__
try { try {
FILE *fp = fopen("/proc/mounts", "r"); auto cgroupFS = getCgroupFS();
if (!fp) if (!cgroupFS) return 0;
return 0;
Strings cgPathParts; auto cgroups = getCgroups("/proc/self/cgroup");
auto cgroup = cgroups[""];
if (cgroup == "") return 0;
struct mntent *ent; auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max";
while ((ent = getmntent(fp))) {
std::string mountType, mountPath;
mountType = ent->mnt_type; auto cpuMax = readFile(cpuFile);
mountPath = ent->mnt_dir; auto cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " \n");
auto quota = cpuMaxParts[0];
if (mountType == "cgroup2") { auto period = cpuMaxParts[1];
cgPathParts.push_back(mountPath); if (quota != "max")
break; return std::ceil(std::stoi(quota) / std::stof(period));
} } catch (Error &) { ignoreException(lvlDebug); }
}
fclose(fp);
if (cgPathParts.size() > 0 && pathExists("/proc/self/cgroup")) {
std::string currentCgroup = readFile("/proc/self/cgroup");
Strings cgValues = tokenizeString<Strings>(currentCgroup, ":");
cgPathParts.push_back(trim(cgValues.back(), "\n"));
cgPathParts.push_back("cpu.max");
std::string fullCgPath = canonPath(concatStringsSep("/", cgPathParts));
if (pathExists(fullCgPath)) {
std::string cpuMax = readFile(fullCgPath);
std::vector<std::string> cpuMaxParts = tokenizeString<std::vector<std::string>>(cpuMax, " ");
std::string quota = cpuMaxParts[0];
std::string period = trim(cpuMaxParts[1], "\n");
if (quota != "max")
return std::ceil(std::stoi(quota) / std::stof(period));
}
}
} catch (Error &) { ignoreException(); }
#endif #endif
return 0; return 0;
@ -1427,7 +1404,7 @@ std::string shellEscape(const std::string_view s)
} }
void ignoreException() void ignoreException(Verbosity lvl)
{ {
/* Make sure no exceptions leave this function. /* Make sure no exceptions leave this function.
printError() also throws when remote is closed. */ printError() also throws when remote is closed. */
@ -1435,7 +1412,7 @@ void ignoreException()
try { try {
throw; throw;
} catch (std::exception & e) { } catch (std::exception & e) {
printError("error (ignored): %1%", e.what()); printMsg(lvl, "error (ignored): %1%", e.what());
} }
} catch (...) { } } catch (...) { }
} }
@ -1617,6 +1594,21 @@ std::string stripIndentation(std::string_view s)
} }
std::pair<std::string_view, std::string_view> getLine(std::string_view s)
{
auto newline = s.find('\n');
if (newline == s.npos) {
return {s, ""};
} else {
auto line = s.substr(0, newline);
if (!line.empty() && line[line.size() - 1] == '\r')
line = line.substr(0, line.size() - 1);
return {line, s.substr(newline + 1)};
}
}
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
static Sync<std::pair<unsigned short, unsigned short>> windowSize{{0, 0}}; static Sync<std::pair<unsigned short, unsigned short>> windowSize{{0, 0}};

View file

@ -510,6 +510,18 @@ std::optional<N> string2Float(const std::string_view s)
} }
/* Convert a little-endian integer to host order. */
template<typename T>
T readLittleEndian(unsigned char * p)
{
T x = 0;
for (size_t i = 0; i < sizeof(x); ++i, ++p) {
x |= ((T) *p) << (i * 8);
}
return x;
}
/* Return true iff `s' starts with `prefix'. */ /* Return true iff `s' starts with `prefix'. */
bool hasPrefix(std::string_view s, std::string_view prefix); bool hasPrefix(std::string_view s, std::string_view prefix);
@ -528,7 +540,7 @@ std::string shellEscape(const std::string_view s);
/* Exception handling in destructors: print an error message, then /* Exception handling in destructors: print an error message, then
ignore the exception. */ ignore the exception. */
void ignoreException(); void ignoreException(Verbosity lvl = lvlError);
@ -563,6 +575,12 @@ std::string base64Decode(std::string_view s);
std::string stripIndentation(std::string_view s); std::string stripIndentation(std::string_view s);
/* Get the prefix of 's' up to and excluding the next line break (LF
optionally preceded by CR), and the remainder following the line
break. */
std::pair<std::string_view, std::string_view> getLine(std::string_view s);
/* Get a value for the specified key from an associate container. */ /* Get a value for the specified key from an associate container. */
template <class T> template <class T>
const typename T::mapped_type * get(const T & map, const typename T::key_type & key) const typename T::mapped_type * get(const T & map, const typename T::key_type & key)
@ -737,4 +755,13 @@ inline std::string operator + (std::string && s, std::string_view s2)
return std::move(s); return std::move(s);
} }
inline std::string operator + (std::string_view s1, const char * s2)
{
std::string s;
s.reserve(s1.size() + strlen(s2));
s.append(s1);
s.append(s2);
return s;
}
} }

View file

@ -808,14 +808,23 @@ static void opServe(Strings opFlags, Strings opArgs)
if (GET_PROTOCOL_MINOR(clientVersion) >= 2) if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
settings.maxLogSize = readNum<unsigned long>(in); settings.maxLogSize = readNum<unsigned long>(in);
if (GET_PROTOCOL_MINOR(clientVersion) >= 3) { if (GET_PROTOCOL_MINOR(clientVersion) >= 3) {
settings.buildRepeat = readInt(in); auto nrRepeats = readInt(in);
settings.enforceDeterminism = readInt(in); if (nrRepeats != 0) {
throw Error("client requested repeating builds, but this is not currently implemented");
}
// Ignore 'enforceDeterminism'. It used to be true by
// default, but also only never had any effect when
// `nrRepeats == 0`. We have already asserted that
// `nrRepeats` in fact is 0, so we can safely ignore this
// without doing something other than what the client
// asked for.
readInt(in);
settings.runDiffHook = true; settings.runDiffHook = true;
} }
if (GET_PROTOCOL_MINOR(clientVersion) >= 7) { if (GET_PROTOCOL_MINOR(clientVersion) >= 7) {
settings.keepFailed = (bool) readInt(in); settings.keepFailed = (bool) readInt(in);
} }
settings.printRepeatedBuilds = false;
}; };
while (true) { while (true) {
@ -926,7 +935,6 @@ static void opServe(Strings opFlags, Strings opArgs)
worker_proto::write(*store, out, status.builtOutputs); worker_proto::write(*store, out, status.builtOutputs);
} }
break; break;
} }

View file

@ -257,7 +257,7 @@ static void daemonLoop()
} catch (Interrupted & e) { } catch (Interrupted & e) {
return; return;
} catch (Error & error) { } catch (Error & error) {
ErrorInfo ei = error.info(); auto ei = error.info();
// FIXME: add to trace? // FIXME: add to trace?
ei.msg = hintfmt("error processing connection: %1%", ei.msg.str()); ei.msg = hintfmt("error processing connection: %1%", ei.msg.str());
logError(ei); logError(ei);

View file

@ -16,7 +16,7 @@ R""(
# Description # Description
This command recreates the lock file of a flake (`flake.lock`), thus This command recreates the lock file of a flake (`flake.lock`), thus
updating the lock for every mutable input (like `nixpkgs`) to its updating the lock for every unlocked input (like `nixpkgs`) to its
current version. This is equivalent to passing `--recreate-lock-file` current version. This is equivalent to passing `--recreate-lock-file`
to any command that operates on a flake. That is, to any command that operates on a flake. That is,

View file

@ -215,7 +215,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
if (!lockedFlake.lockFile.root->inputs.empty()) if (!lockedFlake.lockFile.root->inputs.empty())
logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL); logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
std::unordered_set<std::shared_ptr<Node>> visited; std::set<ref<Node>> visited;
std::function<void(const Node & node, const std::string & prefix)> recurse; std::function<void(const Node & node, const std::string & prefix)> recurse;
@ -227,7 +227,7 @@ struct CmdFlakeMetadata : FlakeCommand, MixJSON
if (auto lockedNode = std::get_if<0>(&input.second)) { if (auto lockedNode = std::get_if<0>(&input.second)) {
logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s", logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s",
prefix + (last ? treeLast : treeConn), input.first, prefix + (last ? treeLast : treeConn), input.first,
*lockedNode ? (*lockedNode)->lockedRef : flake.lockedRef); (*lockedNode)->lockedRef);
bool firstVisit = visited.insert(*lockedNode).second; bool firstVisit = visited.insert(*lockedNode).second;

View file

@ -18,51 +18,56 @@ values such as packages or NixOS modules provided by the flake).
Flake references (*flakerefs*) are a way to specify the location of a Flake references (*flakerefs*) are a way to specify the location of a
flake. These have two different forms: flake. These have two different forms:
* An attribute set representation, e.g.
```nix ## Attribute set representation
{
type = "github";
owner = "NixOS";
repo = "nixpkgs";
}
```
The only required attribute is `type`. The supported types are Example:
listed below.
* A URL-like syntax, e.g. ```nix
{
type = "github";
owner = "NixOS";
repo = "nixpkgs";
}
```
``` The only required attribute is `type`. The supported types are
github:NixOS/nixpkgs listed below.
```
These are used on the command line as a more convenient alternative ## URL-like syntax
to the attribute set representation. For instance, in the command
```console Example:
# nix build github:NixOS/nixpkgs#hello
```
`github:NixOS/nixpkgs` is a flake reference (while `hello` is an ```
output attribute). They are also allowed in the `inputs` attribute github:NixOS/nixpkgs
of a flake, e.g. ```
```nix These are used on the command line as a more convenient alternative
inputs.nixpkgs.url = github:NixOS/nixpkgs; to the attribute set representation. For instance, in the command
```
is equivalent to ```console
# nix build github:NixOS/nixpkgs#hello
```
```nix `github:NixOS/nixpkgs` is a flake reference (while `hello` is an
inputs.nixpkgs = { output attribute). They are also allowed in the `inputs` attribute
type = "github"; of a flake, e.g.
owner = "NixOS";
repo = "nixpkgs";
};
```
## Examples ```nix
inputs.nixpkgs.url = github:NixOS/nixpkgs;
```
is equivalent to
```nix
inputs.nixpkgs = {
type = "github";
owner = "NixOS";
repo = "nixpkgs";
};
```
### Examples
Here are some examples of flake references in their URL-like representation: Here are some examples of flake references in their URL-like representation:

View file

@ -164,6 +164,13 @@ operate are determined as follows:
``` ```
and likewise, using a store path to a "drv" file to specify the derivation:
```console
# nix build '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^dev,static'
```
* You can also specify that *all* outputs should be used using the * You can also specify that *all* outputs should be used using the
syntax *installable*`^*`. For example, the following shows the size syntax *installable*`^*`. For example, the following shows the size
of all outputs of the `glibc` package in the binary cache: of all outputs of the `glibc` package in the binary cache:
@ -177,6 +184,12 @@ operate are determined as follows:
/nix/store/q6580lr01jpcsqs4r5arlh4ki2c1m9rv-glibc-2.33-123-dev 44200560 /nix/store/q6580lr01jpcsqs4r5arlh4ki2c1m9rv-glibc-2.33-123-dev 44200560
``` ```
and likewise, using a store path to a "drv" file to specify the derivation:
```console
# nix path-info -S '/nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^*'
```
* If you didn't specify the desired outputs, but the derivation has an * If you didn't specify the desired outputs, but the derivation has an
attribute `meta.outputsToInstall`, Nix will use those outputs. For attribute `meta.outputsToInstall`, Nix will use those outputs. For
example, since the package `nixpkgs#libxml2` has this attribute: example, since the package `nixpkgs#libxml2` has this attribute:
@ -189,6 +202,9 @@ operate are determined as follows:
a command like `nix shell nixpkgs#libxml2` will provide only those a command like `nix shell nixpkgs#libxml2` will provide only those
two outputs by default. two outputs by default.
Note that a store derivation (given by `.drv` file store path) doesn't have
any attributes like `meta`, and thus this case doesn't apply to it.
* Otherwise, Nix will use all outputs of the derivation. * Otherwise, Nix will use all outputs of the derivation.
# Nix stores # Nix stores

View file

@ -20,11 +20,11 @@ following fields:
* An integer that can be used to unambiguously identify the package in * An integer that can be used to unambiguously identify the package in
invocations of `nix profile remove` and `nix profile upgrade`. invocations of `nix profile remove` and `nix profile upgrade`.
* The original ("mutable") flake reference and output attribute path * The original ("unlocked") flake reference and output attribute path
used at installation time. used at installation time.
* The immutable flake reference to which the mutable flake reference * The locked flake reference to which the unlocked flake reference was
was resolved. resolved.
* The store path(s) of the package. * The store path(s) of the package.

View file

@ -2,7 +2,7 @@ R""(
# Examples # Examples
* Upgrade all packages that were installed using a mutable flake * Upgrade all packages that were installed using an unlocked flake
reference: reference:
```console ```console
@ -32,9 +32,9 @@ the package was installed.
> **Warning** > **Warning**
> >
> This only works if you used a *mutable* flake reference at > This only works if you used an *unlocked* flake reference at
> installation time, e.g. `nixpkgs#hello`. It does not work if you > installation time, e.g. `nixpkgs#hello`. It does not work if you
> used an *immutable* flake reference > used a *locked* flake reference
> (e.g. `github:NixOS/nixpkgs/13d0c311e3ae923a00f734b43fd1d35b47d8943a#hello`), > (e.g. `github:NixOS/nixpkgs/13d0c311e3ae923a00f734b43fd1d35b47d8943a#hello`),
> since in that case the "latest version" is always the same. > since in that case the "latest version" is always the same.

View file

@ -88,8 +88,7 @@ has the following fields:
the user at the time of installation (e.g. `nixpkgs`). This is also the user at the time of installation (e.g. `nixpkgs`). This is also
the flake reference that will be used by `nix profile upgrade`. the flake reference that will be used by `nix profile upgrade`.
* `uri`: The immutable flake reference to which `originalUrl` * `uri`: The locked flake reference to which `originalUrl` resolved.
resolved.
* `attrPath`: The flake output attribute that provided this * `attrPath`: The flake output attribute that provided this
package. Note that this is not necessarily the attribute that the package. Note that this is not necessarily the attribute that the

View file

@ -183,14 +183,12 @@ struct CmdRegistryPin : RegistryCommand, EvalCommand
void run(nix::ref<nix::Store> store) override void run(nix::ref<nix::Store> store) override
{ {
if (locked.empty()) { if (locked.empty()) locked = url;
locked = url;
}
auto registry = getRegistry(); auto registry = getRegistry();
auto ref = parseFlakeRef(url); auto ref = parseFlakeRef(url);
auto locked_ref = parseFlakeRef(locked); auto lockedRef = parseFlakeRef(locked);
registry->remove(ref.input); registry->remove(ref.input);
auto [tree, resolved] = locked_ref.resolve(store).input.fetch(store); auto [tree, resolved] = lockedRef.resolve(store).input.fetch(store);
fetchers::Attrs extraAttrs; fetchers::Attrs extraAttrs;
if (ref.subdir != "") extraAttrs["dir"] = ref.subdir; if (ref.subdir != "") extraAttrs["dir"] = ref.subdir;
registry->add(ref.input, resolved, extraAttrs); registry->add(ref.input, resolved, extraAttrs);

View file

@ -8,13 +8,15 @@ set -o pipefail
nix build -f multiple-outputs.nix --json a b --no-link | jq --exit-status ' nix build -f multiple-outputs.nix --json a b --no-link | jq --exit-status '
(.[0] | (.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and (.drvPath | match(".*multiple-outputs-a.drv")) and
(.outputs | keys | length == 2) and (.outputs |
(.outputs.first | match(".*multiple-outputs-a-first")) and (keys | length == 2) and
(.outputs.second | match(".*multiple-outputs-a-second"))) (.first | match(".*multiple-outputs-a-first")) and
(.second | match(".*multiple-outputs-a-second"))))
and (.[1] | and (.[1] |
(.drvPath | match(".*multiple-outputs-b.drv")) and (.drvPath | match(".*multiple-outputs-b.drv")) and
(.outputs | keys | length == 1) and (.outputs |
(.outputs.out | match(".*multiple-outputs-b"))) (keys | length == 1) and
(.out | match(".*multiple-outputs-b"))))
' '
# Test output selection using the '^' syntax. # Test output selection using the '^' syntax.
@ -56,6 +58,48 @@ nix build -f multiple-outputs.nix --json 'e^*' --no-link | jq --exit-status '
(.outputs | keys == ["a", "b", "c"])) (.outputs | keys == ["a", "b", "c"]))
' '
# Test building from raw store path to drv not expression.
drv=$(nix eval -f multiple-outputs.nix --raw a.drvPath)
if nix build "$drv^not-an-output" --no-link --json; then
fail "'not-an-output' should fail to build"
fi
if nix build "$drv^" --no-link --json; then
fail "'empty outputs list' should fail to build"
fi
if nix build "$drv^*nope" --no-link --json; then
fail "'* must be entire string' should fail to build"
fi
nix build "$drv^first" --no-link --json | jq --exit-status '
(.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and
(.outputs |
(keys | length == 1) and
(.first | match(".*multiple-outputs-a-first")) and
(has("second") | not)))
'
nix build "$drv^first,second" --no-link --json | jq --exit-status '
(.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and
(.outputs |
(keys | length == 2) and
(.first | match(".*multiple-outputs-a-first")) and
(.second | match(".*multiple-outputs-a-second"))))
'
nix build "$drv^*" --no-link --json | jq --exit-status '
(.[0] |
(.drvPath | match(".*multiple-outputs-a.drv")) and
(.outputs |
(keys | length == 2) and
(.first | match(".*multiple-outputs-a-first")) and
(.second | match(".*multiple-outputs-a-second"))))
'
# Make sure that `--impure` works (regression test for https://github.com/NixOS/nix/issues/6488) # Make sure that `--impure` works (regression test for https://github.com/NixOS/nix/issues/6488)
nix build --impure -f multiple-outputs.nix --json e --no-link | jq --exit-status ' nix build --impure -f multiple-outputs.nix --json e --no-link | jq --exit-status '
(.[0] | (.[0] |

View file

@ -44,7 +44,7 @@ with import ./config.nix;
}; };
hashmismatch = import <nix/fetchurl.nix> { hashmismatch = import <nix/fetchurl.nix> {
url = "file://" + builtins.getEnv "TMPDIR" + "/dummy"; url = "file://" + builtins.getEnv "TEST_ROOT" + "/dummy";
sha256 = "0mdqa9w1p6cmli6976v4wi0sw9r4p5prkj7lzfd1877wk11c9c73"; sha256 = "0mdqa9w1p6cmli6976v4wi0sw9r4p5prkj7lzfd1877wk11c9c73";
}; };

View file

@ -40,14 +40,6 @@ nix-build check.nix -A deterministic --argstr checkBuildId $checkBuildId \
if grep -q 'may not be deterministic' $TEST_ROOT/log; then false; fi if grep -q 'may not be deterministic' $TEST_ROOT/log; then false; fi
checkBuildTempDirRemoved $TEST_ROOT/log checkBuildTempDirRemoved $TEST_ROOT/log
nix build -f check.nix deterministic --rebuild --repeat 1 \
--argstr checkBuildId $checkBuildId --keep-failed --no-link \
2> $TEST_ROOT/log
if grep -q 'checking is not possible' $TEST_ROOT/log; then false; fi
# Repeat is set to 1, ie. nix should build deterministic twice.
if [ "$(grep "checking outputs" $TEST_ROOT/log | wc -l)" -ne 2 ]; then false; fi
checkBuildTempDirRemoved $TEST_ROOT/log
nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \ nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \
--no-out-link 2> $TEST_ROOT/log --no-out-link 2> $TEST_ROOT/log
checkBuildTempDirRemoved $TEST_ROOT/log checkBuildTempDirRemoved $TEST_ROOT/log
@ -58,12 +50,6 @@ grep 'may not be deterministic' $TEST_ROOT/log
[ "$status" = "104" ] [ "$status" = "104" ]
checkBuildTempDirRemoved $TEST_ROOT/log checkBuildTempDirRemoved $TEST_ROOT/log
nix build -f check.nix nondeterministic --rebuild --repeat 1 \
--argstr checkBuildId $checkBuildId --keep-failed --no-link \
2> $TEST_ROOT/log || status=$?
grep 'may not be deterministic' $TEST_ROOT/log
checkBuildTempDirRemoved $TEST_ROOT/log
nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \ nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \
--no-out-link --check --keep-failed 2> $TEST_ROOT/log || status=$? --no-out-link --check --keep-failed 2> $TEST_ROOT/log || status=$?
grep 'may not be deterministic' $TEST_ROOT/log grep 'may not be deterministic' $TEST_ROOT/log
@ -72,12 +58,6 @@ if checkBuildTempDirRemoved $TEST_ROOT/log; then false; fi
clearStore clearStore
nix-build dependencies.nix --no-out-link --repeat 3
nix-build check.nix -A nondeterministic --no-out-link --repeat 1 2> $TEST_ROOT/log || status=$?
[ "$status" = "1" ]
grep 'differs from previous round' $TEST_ROOT/log
path=$(nix-build check.nix -A fetchurl --no-out-link) path=$(nix-build check.nix -A fetchurl --no-out-link)
chmod +w $path chmod +w $path
@ -91,13 +71,13 @@ nix-build check.nix -A fetchurl --no-out-link --check
nix-build check.nix -A fetchurl --no-out-link --repair nix-build check.nix -A fetchurl --no-out-link --repair
[[ $(cat $path) != foo ]] [[ $(cat $path) != foo ]]
echo 'Hello World' > $TMPDIR/dummy echo 'Hello World' > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link || status=$? nix-build check.nix -A hashmismatch --no-out-link || status=$?
[ "$status" = "102" ] [ "$status" = "102" ]
echo -n > $TMPDIR/dummy echo -n > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link nix-build check.nix -A hashmismatch --no-out-link
echo 'Hello World' > $TMPDIR/dummy echo 'Hello World' > $TEST_ROOT/dummy
nix-build check.nix -A hashmismatch --no-out-link --check || status=$? nix-build check.nix -A hashmismatch --no-out-link --check || status=$?
[ "$status" = "102" ] [ "$status" = "102" ]

View file

@ -29,3 +29,7 @@ nix-instantiate --eval -E 'assert 1 + 2 == 3; true'
[[ $(nix-instantiate -A attr --eval "./eval.nix") == '{ foo = "bar"; }' ]] [[ $(nix-instantiate -A attr --eval "./eval.nix") == '{ foo = "bar"; }' ]]
[[ $(nix-instantiate -A attr --eval --json "./eval.nix") == '{"foo":"bar"}' ]] [[ $(nix-instantiate -A attr --eval --json "./eval.nix") == '{"foo":"bar"}' ]]
[[ $(nix-instantiate -A int --eval - < "./eval.nix") == 123 ]] [[ $(nix-instantiate -A int --eval - < "./eval.nix") == 123 ]]
# Check that symlink cycles don't cause a hang.
ln -sfn cycle.nix $TEST_ROOT/cycle.nix
(! nix eval --file $TEST_ROOT/cycle.nix)

View file

@ -122,6 +122,7 @@ git -C $repo commit -m 'Bla3' -a
path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchGit file://$repo).outPath") path4=$(nix eval --impure --refresh --raw --expr "(builtins.fetchGit file://$repo).outPath")
[[ $path2 = $path4 ]] [[ $path2 = $path4 ]]
status=0
nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-B5yIPHhEm0eysJKEsO7nqxprh9vcblFxpJG11gXJus1=\"; }).outPath" || status=$? nix eval --impure --raw --expr "(builtins.fetchGit { url = $repo; rev = \"$rev2\"; narHash = \"sha256-B5yIPHhEm0eysJKEsO7nqxprh9vcblFxpJG11gXJus1=\"; }).outPath" || status=$?
[[ "$status" = "102" ]] [[ "$status" = "102" ]]

Some files were not shown because too many files have changed in this diff Show more