Merge remote-tracking branch 'nixos/master'

This commit is contained in:
Max Headroom 2022-12-01 17:52:54 +01:00
commit e95428e16a
102 changed files with 2714 additions and 1615 deletions

5
.github/assign-by-files.yml vendored Normal file
View file

@ -0,0 +1,5 @@
---
# This files is used by https://github.com/marketplace/actions/auto-assign-reviewer-by-files
# to assign maintainers
"doc/**/*":
- fricklerhandwerk

12
.github/workflows/assign-reviewer.yml vendored Normal file
View file

@ -0,0 +1,12 @@
name: "Auto Assign"
on:
- pull_request
jobs:
assign_reviewer:
runs-on: ubuntu-latest
steps:
- uses: shufo/auto-assign-reviewer-by-files@v1.1.4
with:
config: ".github/assign-by-files.yml"
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- name: Create backport PRs - name: Create backport PRs
# should be kept in sync with `version` # should be kept in sync with `version`
uses: zeebe-io/backport-action@v0.0.8 uses: zeebe-io/backport-action@v0.0.9
with: with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action # Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- uses: cachix/install-nix-action@v18 - uses: cachix/install-nix-action@v18
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v12
if: needs.check_secrets.outputs.cachix == 'true' if: needs.check_secrets.outputs.cachix == 'true'
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
@ -59,7 +59,7 @@ jobs:
fetch-depth: 0 fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v18 - uses: cachix/install-nix-action@v18
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v12
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
@ -105,7 +105,7 @@ jobs:
- uses: cachix/install-nix-action@v18 - uses: cachix/install-nix-action@v18
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV - run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10 - uses: cachix/cachix-action@v12
if: needs.check_secrets.outputs.cachix == 'true' if: needs.check_secrets.outputs.cachix == 'true'
with: with:
name: '${{ env.CACHIX_NAME }}' name: '${{ env.CACHIX_NAME }}'

View file

@ -1,16 +1,20 @@
with builtins; builtinsDump:
with import ./utils.nix; let
showBuiltin = name:
let
inherit (builtinsDump.${name}) doc args;
in
''
<dt id="builtins-${name}">
<a href="#builtins-${name}"><code>${name} ${listArgs args}</code></a>
</dt>
<dd>
builtins: ${doc}
</dd>
'';
listArgs = args: builtins.concatStringsSep " " (map (s: "<var>${s}</var>") args);
in
with builtins; concatStringsSep "\n" (map showBuiltin (attrNames builtinsDump))
concatStrings (map
(name:
let builtin = builtins.${name}; in
"<dt id=\"builtins-${name}\"><a href=\"#builtins-${name}\"><code>${name} "
+ concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
+ "</code></a></dt>"
+ "<dd>\n\n"
+ builtin.doc
+ "\n\n</dd>"
)
(attrNames builtins))

View file

@ -99,6 +99,7 @@ let
in [ cmd ] ++ concatMap subcommand (attrNames details.commands or {}); in [ cmd ] ++ concatMap subcommand (attrNames details.commands or {});
parsedToplevel = builtins.fromJSON toplevel; parsedToplevel = builtins.fromJSON toplevel;
manpages = processCommand { manpages = processCommand {
command = "nix"; command = "nix";
details = parsedToplevel; details = parsedToplevel;

View file

@ -11,16 +11,16 @@ concatStrings (map
+ concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n" + concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n"
+ (if option.documentDefault + (if option.documentDefault
then " **Default:** " + ( then " **Default:** " + (
if option.value == "" || option.value == [] if option.defaultValue == "" || option.defaultValue == []
then "*empty*" then "*empty*"
else if isBool option.value else if isBool option.defaultValue
then (if option.value then "`true`" else "`false`") then (if option.defaultValue then "`true`" else "`false`")
else else
# n.b. a StringMap value type is specified as a string, but # n.b. a StringMap value type is specified as a string, but
# this shows the value type. The empty stringmap is "null" in # this shows the value type. The empty stringmap is "null" in
# JSON, but that converts to "{ }" here. # JSON, but that converts to "{ }" here.
(if isAttrs option.value then "`\"\"`" (if isAttrs option.defaultValue then "`\"\"`"
else "`" + toString option.value + "`")) + "\n\n" else "`" + toString option.defaultValue + "`")) + "\n\n"
else " **Default:** *machine-specific*\n") else " **Default:** *machine-specific*\n")
+ (if option.aliases != [] + (if option.aliases != []
then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n" then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n"

View file

@ -1,330 +1,424 @@
// Redirects from old DocBook manual. // redirect rules for anchors ensure backwards compatibility of URLs.
var redirects = { // this must be done on the client side, as web servers do not see the anchor part of the URL.
"#part-advanced-topics": "advanced-topics/advanced-topics.html",
"#chap-tuning-cores-and-jobs": "advanced-topics/cores-vs-jobs.html", // redirections are declared as follows:
"#chap-diff-hook": "advanced-topics/diff-hook.html", // each entry has as its key a path matching the requested URL path, relative to the mdBook document root.
"#check-dirs-are-unregistered": "advanced-topics/diff-hook.html#check-dirs-are-unregistered", //
"#chap-distributed-builds": "advanced-topics/distributed-builds.html", // IMPORTANT: it must specify the full path with file name and suffix
"#chap-post-build-hook": "advanced-topics/post-build-hook.html", //
"#chap-post-build-hook-caveats": "advanced-topics/post-build-hook.html#implementation-caveats", // each entry is itself a set of key-value pairs, where
"#part-command-ref": "command-ref/command-ref.html", // - keys are anchors on the matched path.
"#conf-allow-import-from-derivation": "command-ref/conf-file.html#conf-allow-import-from-derivation", // - values are redirection targets relative to the current path.
"#conf-allow-new-privileges": "command-ref/conf-file.html#conf-allow-new-privileges",
"#conf-allowed-uris": "command-ref/conf-file.html#conf-allowed-uris", const redirects = {
"#conf-allowed-users": "command-ref/conf-file.html#conf-allowed-users", "index.html": {
"#conf-auto-optimise-store": "command-ref/conf-file.html#conf-auto-optimise-store", "part-advanced-topics": "advanced-topics/advanced-topics.html",
"#conf-binary-cache-public-keys": "command-ref/conf-file.html#conf-binary-cache-public-keys", "chap-tuning-cores-and-jobs": "advanced-topics/cores-vs-jobs.html",
"#conf-binary-caches": "command-ref/conf-file.html#conf-binary-caches", "chap-diff-hook": "advanced-topics/diff-hook.html",
"#conf-build-compress-log": "command-ref/conf-file.html#conf-build-compress-log", "check-dirs-are-unregistered": "advanced-topics/diff-hook.html#check-dirs-are-unregistered",
"#conf-build-cores": "command-ref/conf-file.html#conf-build-cores", "chap-distributed-builds": "advanced-topics/distributed-builds.html",
"#conf-build-extra-chroot-dirs": "command-ref/conf-file.html#conf-build-extra-chroot-dirs", "chap-post-build-hook": "advanced-topics/post-build-hook.html",
"#conf-build-extra-sandbox-paths": "command-ref/conf-file.html#conf-build-extra-sandbox-paths", "chap-post-build-hook-caveats": "advanced-topics/post-build-hook.html#implementation-caveats",
"#conf-build-fallback": "command-ref/conf-file.html#conf-build-fallback", "part-command-ref": "command-ref/command-ref.html",
"#conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs", "conf-allow-import-from-derivation": "command-ref/conf-file.html#conf-allow-import-from-derivation",
"#conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size", "conf-allow-new-privileges": "command-ref/conf-file.html#conf-allow-new-privileges",
"#conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time", "conf-allowed-uris": "command-ref/conf-file.html#conf-allowed-uris",
"#conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat", "conf-allowed-users": "command-ref/conf-file.html#conf-allowed-users",
"#conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout", "conf-auto-optimise-store": "command-ref/conf-file.html#conf-auto-optimise-store",
"#conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot", "conf-binary-cache-public-keys": "command-ref/conf-file.html#conf-binary-cache-public-keys",
"#conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox", "conf-binary-caches": "command-ref/conf-file.html#conf-binary-caches",
"#conf-build-use-substitutes": "command-ref/conf-file.html#conf-build-use-substitutes", "conf-build-compress-log": "command-ref/conf-file.html#conf-build-compress-log",
"#conf-build-users-group": "command-ref/conf-file.html#conf-build-users-group", "conf-build-cores": "command-ref/conf-file.html#conf-build-cores",
"#conf-builders": "command-ref/conf-file.html#conf-builders", "conf-build-extra-chroot-dirs": "command-ref/conf-file.html#conf-build-extra-chroot-dirs",
"#conf-builders-use-substitutes": "command-ref/conf-file.html#conf-builders-use-substitutes", "conf-build-extra-sandbox-paths": "command-ref/conf-file.html#conf-build-extra-sandbox-paths",
"#conf-compress-build-log": "command-ref/conf-file.html#conf-compress-build-log", "conf-build-fallback": "command-ref/conf-file.html#conf-build-fallback",
"#conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout", "conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs",
"#conf-cores": "command-ref/conf-file.html#conf-cores", "conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size",
"#conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook", "conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time",
"#conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism", "conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat",
"#conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations", "conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout",
"#conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches", "conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot",
"#conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms", "conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox",
"#conf-extra-sandbox-paths": "command-ref/conf-file.html#conf-extra-sandbox-paths", "conf-build-use-substitutes": "command-ref/conf-file.html#conf-build-use-substitutes",
"#conf-extra-substituters": "command-ref/conf-file.html#conf-extra-substituters", "conf-build-users-group": "command-ref/conf-file.html#conf-build-users-group",
"#conf-fallback": "command-ref/conf-file.html#conf-fallback", "conf-builders": "command-ref/conf-file.html#conf-builders",
"#conf-fsync-metadata": "command-ref/conf-file.html#conf-fsync-metadata", "conf-builders-use-substitutes": "command-ref/conf-file.html#conf-builders-use-substitutes",
"#conf-gc-keep-derivations": "command-ref/conf-file.html#conf-gc-keep-derivations", "conf-compress-build-log": "command-ref/conf-file.html#conf-compress-build-log",
"#conf-gc-keep-outputs": "command-ref/conf-file.html#conf-gc-keep-outputs", "conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout",
"#conf-hashed-mirrors": "command-ref/conf-file.html#conf-hashed-mirrors", "conf-cores": "command-ref/conf-file.html#conf-cores",
"#conf-http-connections": "command-ref/conf-file.html#conf-http-connections", "conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook",
"#conf-keep-build-log": "command-ref/conf-file.html#conf-keep-build-log", "conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism",
"#conf-keep-derivations": "command-ref/conf-file.html#conf-keep-derivations", "conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations",
"#conf-keep-env-derivations": "command-ref/conf-file.html#conf-keep-env-derivations", "conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches",
"#conf-keep-outputs": "command-ref/conf-file.html#conf-keep-outputs", "conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms",
"#conf-max-build-log-size": "command-ref/conf-file.html#conf-max-build-log-size", "conf-extra-sandbox-paths": "command-ref/conf-file.html#conf-extra-sandbox-paths",
"#conf-max-free": "command-ref/conf-file.html#conf-max-free", "conf-extra-substituters": "command-ref/conf-file.html#conf-extra-substituters",
"#conf-max-jobs": "command-ref/conf-file.html#conf-max-jobs", "conf-fallback": "command-ref/conf-file.html#conf-fallback",
"#conf-max-silent-time": "command-ref/conf-file.html#conf-max-silent-time", "conf-fsync-metadata": "command-ref/conf-file.html#conf-fsync-metadata",
"#conf-min-free": "command-ref/conf-file.html#conf-min-free", "conf-gc-keep-derivations": "command-ref/conf-file.html#conf-gc-keep-derivations",
"#conf-narinfo-cache-negative-ttl": "command-ref/conf-file.html#conf-narinfo-cache-negative-ttl", "conf-gc-keep-outputs": "command-ref/conf-file.html#conf-gc-keep-outputs",
"#conf-narinfo-cache-positive-ttl": "command-ref/conf-file.html#conf-narinfo-cache-positive-ttl", "conf-hashed-mirrors": "command-ref/conf-file.html#conf-hashed-mirrors",
"#conf-netrc-file": "command-ref/conf-file.html#conf-netrc-file", "conf-http-connections": "command-ref/conf-file.html#conf-http-connections",
"#conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files", "conf-keep-build-log": "command-ref/conf-file.html#conf-keep-build-log",
"#conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook", "conf-keep-derivations": "command-ref/conf-file.html#conf-keep-derivations",
"#conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook", "conf-keep-env-derivations": "command-ref/conf-file.html#conf-keep-env-derivations",
"#conf-repeat": "command-ref/conf-file.html#conf-repeat", "conf-keep-outputs": "command-ref/conf-file.html#conf-keep-outputs",
"#conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs", "conf-max-build-log-size": "command-ref/conf-file.html#conf-max-build-log-size",
"#conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval", "conf-max-free": "command-ref/conf-file.html#conf-max-free",
"#conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook", "conf-max-jobs": "command-ref/conf-file.html#conf-max-jobs",
"#conf-sandbox": "command-ref/conf-file.html#conf-sandbox", "conf-max-silent-time": "command-ref/conf-file.html#conf-max-silent-time",
"#conf-sandbox-dev-shm-size": "command-ref/conf-file.html#conf-sandbox-dev-shm-size", "conf-min-free": "command-ref/conf-file.html#conf-min-free",
"#conf-sandbox-paths": "command-ref/conf-file.html#conf-sandbox-paths", "conf-narinfo-cache-negative-ttl": "command-ref/conf-file.html#conf-narinfo-cache-negative-ttl",
"#conf-secret-key-files": "command-ref/conf-file.html#conf-secret-key-files", "conf-narinfo-cache-positive-ttl": "command-ref/conf-file.html#conf-narinfo-cache-positive-ttl",
"#conf-show-trace": "command-ref/conf-file.html#conf-show-trace", "conf-netrc-file": "command-ref/conf-file.html#conf-netrc-file",
"#conf-stalled-download-timeout": "command-ref/conf-file.html#conf-stalled-download-timeout", "conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files",
"#conf-substitute": "command-ref/conf-file.html#conf-substitute", "conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook",
"#conf-substituters": "command-ref/conf-file.html#conf-substituters", "conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook",
"#conf-system": "command-ref/conf-file.html#conf-system", "conf-repeat": "command-ref/conf-file.html#conf-repeat",
"#conf-system-features": "command-ref/conf-file.html#conf-system-features", "conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs",
"#conf-tarball-ttl": "command-ref/conf-file.html#conf-tarball-ttl", "conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval",
"#conf-timeout": "command-ref/conf-file.html#conf-timeout", "conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook",
"#conf-trace-function-calls": "command-ref/conf-file.html#conf-trace-function-calls", "conf-sandbox": "command-ref/conf-file.html#conf-sandbox",
"#conf-trusted-binary-caches": "command-ref/conf-file.html#conf-trusted-binary-caches", "conf-sandbox-dev-shm-size": "command-ref/conf-file.html#conf-sandbox-dev-shm-size",
"#conf-trusted-public-keys": "command-ref/conf-file.html#conf-trusted-public-keys", "conf-sandbox-paths": "command-ref/conf-file.html#conf-sandbox-paths",
"#conf-trusted-substituters": "command-ref/conf-file.html#conf-trusted-substituters", "conf-secret-key-files": "command-ref/conf-file.html#conf-secret-key-files",
"#conf-trusted-users": "command-ref/conf-file.html#conf-trusted-users", "conf-show-trace": "command-ref/conf-file.html#conf-show-trace",
"#extra-sandbox-paths": "command-ref/conf-file.html#extra-sandbox-paths", "conf-stalled-download-timeout": "command-ref/conf-file.html#conf-stalled-download-timeout",
"#sec-conf-file": "command-ref/conf-file.html", "conf-substitute": "command-ref/conf-file.html#conf-substitute",
"#env-NIX_PATH": "command-ref/env-common.html#env-NIX_PATH", "conf-substituters": "command-ref/conf-file.html#conf-substituters",
"#env-common": "command-ref/env-common.html", "conf-system": "command-ref/conf-file.html#conf-system",
"#envar-remote": "command-ref/env-common.html#env-NIX_REMOTE", "conf-system-features": "command-ref/conf-file.html#conf-system-features",
"#sec-common-env": "command-ref/env-common.html", "conf-tarball-ttl": "command-ref/conf-file.html#conf-tarball-ttl",
"#ch-files": "command-ref/files.html", "conf-timeout": "command-ref/conf-file.html#conf-timeout",
"#ch-main-commands": "command-ref/main-commands.html", "conf-trace-function-calls": "command-ref/conf-file.html#conf-trace-function-calls",
"#opt-out-link": "command-ref/nix-build.html#opt-out-link", "conf-trusted-binary-caches": "command-ref/conf-file.html#conf-trusted-binary-caches",
"#sec-nix-build": "command-ref/nix-build.html", "conf-trusted-public-keys": "command-ref/conf-file.html#conf-trusted-public-keys",
"#sec-nix-channel": "command-ref/nix-channel.html", "conf-trusted-substituters": "command-ref/conf-file.html#conf-trusted-substituters",
"#sec-nix-collect-garbage": "command-ref/nix-collect-garbage.html", "conf-trusted-users": "command-ref/conf-file.html#conf-trusted-users",
"#sec-nix-copy-closure": "command-ref/nix-copy-closure.html", "extra-sandbox-paths": "command-ref/conf-file.html#extra-sandbox-paths",
"#sec-nix-daemon": "command-ref/nix-daemon.html", "sec-conf-file": "command-ref/conf-file.html",
"#refsec-nix-env-install-examples": "command-ref/nix-env.html#examples", "env-NIX_PATH": "command-ref/env-common.html#env-NIX_PATH",
"#rsec-nix-env-install": "command-ref/nix-env.html#operation---install", "env-common": "command-ref/env-common.html",
"#rsec-nix-env-set": "command-ref/nix-env.html#operation---set", "envar-remote": "command-ref/env-common.html#env-NIX_REMOTE",
"#rsec-nix-env-set-flag": "command-ref/nix-env.html#operation---set-flag", "sec-common-env": "command-ref/env-common.html",
"#rsec-nix-env-upgrade": "command-ref/nix-env.html#operation---upgrade", "ch-files": "command-ref/files.html",
"#sec-nix-env": "command-ref/nix-env.html", "ch-main-commands": "command-ref/main-commands.html",
"#ssec-version-comparisons": "command-ref/nix-env.html#versions", "opt-out-link": "command-ref/nix-build.html#opt-out-link",
"#sec-nix-hash": "command-ref/nix-hash.html", "sec-nix-build": "command-ref/nix-build.html",
"#sec-nix-instantiate": "command-ref/nix-instantiate.html", "sec-nix-channel": "command-ref/nix-channel.html",
"#sec-nix-prefetch-url": "command-ref/nix-prefetch-url.html", "sec-nix-collect-garbage": "command-ref/nix-collect-garbage.html",
"#sec-nix-shell": "command-ref/nix-shell.html", "sec-nix-copy-closure": "command-ref/nix-copy-closure.html",
"#ssec-nix-shell-shebang": "command-ref/nix-shell.html#use-as-a--interpreter", "sec-nix-daemon": "command-ref/nix-daemon.html",
"#nixref-queries": "command-ref/nix-store.html#queries", "refsec-nix-env-install-examples": "command-ref/nix-env.html#examples",
"#opt-add-root": "command-ref/nix-store.html#opt-add-root", "rsec-nix-env-install": "command-ref/nix-env.html#operation---install",
"#refsec-nix-store-dump": "command-ref/nix-store.html#operation---dump", "rsec-nix-env-set": "command-ref/nix-env.html#operation---set",
"#refsec-nix-store-export": "command-ref/nix-store.html#operation---export", "rsec-nix-env-set-flag": "command-ref/nix-env.html#operation---set-flag",
"#refsec-nix-store-import": "command-ref/nix-store.html#operation---import", "rsec-nix-env-upgrade": "command-ref/nix-env.html#operation---upgrade",
"#refsec-nix-store-query": "command-ref/nix-store.html#operation---query", "sec-nix-env": "command-ref/nix-env.html",
"#refsec-nix-store-verify": "command-ref/nix-store.html#operation---verify", "ssec-version-comparisons": "command-ref/nix-env.html#versions",
"#rsec-nix-store-gc": "command-ref/nix-store.html#operation---gc", "sec-nix-hash": "command-ref/nix-hash.html",
"#rsec-nix-store-generate-binary-cache-key": "command-ref/nix-store.html#operation---generate-binary-cache-key", "sec-nix-instantiate": "command-ref/nix-instantiate.html",
"#rsec-nix-store-realise": "command-ref/nix-store.html#operation---realise", "sec-nix-prefetch-url": "command-ref/nix-prefetch-url.html",
"#rsec-nix-store-serve": "command-ref/nix-store.html#operation---serve", "sec-nix-shell": "command-ref/nix-shell.html",
"#sec-nix-store": "command-ref/nix-store.html", "ssec-nix-shell-shebang": "command-ref/nix-shell.html#use-as-a--interpreter",
"#opt-I": "command-ref/opt-common.html#opt-I", "nixref-queries": "command-ref/nix-store.html#queries",
"#opt-attr": "command-ref/opt-common.html#opt-attr", "opt-add-root": "command-ref/nix-store.html#opt-add-root",
"#opt-common": "command-ref/opt-common.html", "refsec-nix-store-dump": "command-ref/nix-store.html#operation---dump",
"#opt-cores": "command-ref/opt-common.html#opt-cores", "refsec-nix-store-export": "command-ref/nix-store.html#operation---export",
"#opt-log-format": "command-ref/opt-common.html#opt-log-format", "refsec-nix-store-import": "command-ref/nix-store.html#operation---import",
"#opt-max-jobs": "command-ref/opt-common.html#opt-max-jobs", "refsec-nix-store-query": "command-ref/nix-store.html#operation---query",
"#opt-max-silent-time": "command-ref/opt-common.html#opt-max-silent-time", "refsec-nix-store-verify": "command-ref/nix-store.html#operation---verify",
"#opt-timeout": "command-ref/opt-common.html#opt-timeout", "rsec-nix-store-gc": "command-ref/nix-store.html#operation---gc",
"#sec-common-options": "command-ref/opt-common.html", "rsec-nix-store-generate-binary-cache-key": "command-ref/nix-store.html#operation---generate-binary-cache-key",
"#ch-utilities": "command-ref/utilities.html", "rsec-nix-store-realise": "command-ref/nix-store.html#operation---realise",
"#chap-hacking": "contributing/hacking.html", "rsec-nix-store-serve": "command-ref/nix-store.html#operation---serve",
"#adv-attr-allowSubstitutes": "language/advanced-attributes.html#adv-attr-allowSubstitutes", "sec-nix-store": "command-ref/nix-store.html",
"#adv-attr-allowedReferences": "language/advanced-attributes.html#adv-attr-allowedReferences", "opt-I": "command-ref/opt-common.html#opt-I",
"#adv-attr-allowedRequisites": "language/advanced-attributes.html#adv-attr-allowedRequisites", "opt-attr": "command-ref/opt-common.html#opt-attr",
"#adv-attr-disallowedReferences": "language/advanced-attributes.html#adv-attr-disallowedReferences", "opt-common": "command-ref/opt-common.html",
"#adv-attr-disallowedRequisites": "language/advanced-attributes.html#adv-attr-disallowedRequisites", "opt-cores": "command-ref/opt-common.html#opt-cores",
"#adv-attr-exportReferencesGraph": "language/advanced-attributes.html#adv-attr-exportReferencesGraph", "opt-log-format": "command-ref/opt-common.html#opt-log-format",
"#adv-attr-impureEnvVars": "language/advanced-attributes.html#adv-attr-impureEnvVars", "opt-max-jobs": "command-ref/opt-common.html#opt-max-jobs",
"#adv-attr-outputHash": "language/advanced-attributes.html#adv-attr-outputHash", "opt-max-silent-time": "command-ref/opt-common.html#opt-max-silent-time",
"#adv-attr-outputHashAlgo": "language/advanced-attributes.html#adv-attr-outputHashAlgo", "opt-timeout": "command-ref/opt-common.html#opt-timeout",
"#adv-attr-outputHashMode": "language/advanced-attributes.html#adv-attr-outputHashMode", "sec-common-options": "command-ref/opt-common.html",
"#adv-attr-passAsFile": "language/advanced-attributes.html#adv-attr-passAsFile", "ch-utilities": "command-ref/utilities.html",
"#adv-attr-preferLocalBuild": "language/advanced-attributes.html#adv-attr-preferLocalBuild", "chap-hacking": "contributing/hacking.html",
"#fixed-output-drvs": "language/advanced-attributes.html#adv-attr-outputHash", "adv-attr-allowSubstitutes": "language/advanced-attributes.html#adv-attr-allowSubstitutes",
"#sec-advanced-attributes": "language/advanced-attributes.html", "adv-attr-allowedReferences": "language/advanced-attributes.html#adv-attr-allowedReferences",
"#builtin-abort": "language/builtins.html#builtins-abort", "adv-attr-allowedRequisites": "language/advanced-attributes.html#adv-attr-allowedRequisites",
"#builtin-add": "language/builtins.html#builtins-add", "adv-attr-disallowedReferences": "language/advanced-attributes.html#adv-attr-disallowedReferences",
"#builtin-all": "language/builtins.html#builtins-all", "adv-attr-disallowedRequisites": "language/advanced-attributes.html#adv-attr-disallowedRequisites",
"#builtin-any": "language/builtins.html#builtins-any", "adv-attr-exportReferencesGraph": "language/advanced-attributes.html#adv-attr-exportReferencesGraph",
"#builtin-attrNames": "language/builtins.html#builtins-attrNames", "adv-attr-impureEnvVars": "language/advanced-attributes.html#adv-attr-impureEnvVars",
"#builtin-attrValues": "language/builtins.html#builtins-attrValues", "adv-attr-outputHash": "language/advanced-attributes.html#adv-attr-outputHash",
"#builtin-baseNameOf": "language/builtins.html#builtins-baseNameOf", "adv-attr-outputHashAlgo": "language/advanced-attributes.html#adv-attr-outputHashAlgo",
"#builtin-bitAnd": "language/builtins.html#builtins-bitAnd", "adv-attr-outputHashMode": "language/advanced-attributes.html#adv-attr-outputHashMode",
"#builtin-bitOr": "language/builtins.html#builtins-bitOr", "adv-attr-passAsFile": "language/advanced-attributes.html#adv-attr-passAsFile",
"#builtin-bitXor": "language/builtins.html#builtins-bitXor", "adv-attr-preferLocalBuild": "language/advanced-attributes.html#adv-attr-preferLocalBuild",
"#builtin-builtins": "language/builtins.html#builtins-builtins", "fixed-output-drvs": "language/advanced-attributes.html#adv-attr-outputHash",
"#builtin-compareVersions": "language/builtins.html#builtins-compareVersions", "sec-advanced-attributes": "language/advanced-attributes.html",
"#builtin-concatLists": "language/builtins.html#builtins-concatLists", "builtin-abort": "language/builtins.html#builtins-abort",
"#builtin-concatStringsSep": "language/builtins.html#builtins-concatStringsSep", "builtin-add": "language/builtins.html#builtins-add",
"#builtin-currentSystem": "language/builtins.html#builtins-currentSystem", "builtin-all": "language/builtins.html#builtins-all",
"#builtin-deepSeq": "language/builtins.html#builtins-deepSeq", "builtin-any": "language/builtins.html#builtins-any",
"#builtin-derivation": "language/builtins.html#builtins-derivation", "builtin-attrNames": "language/builtins.html#builtins-attrNames",
"#builtin-dirOf": "language/builtins.html#builtins-dirOf", "builtin-attrValues": "language/builtins.html#builtins-attrValues",
"#builtin-div": "language/builtins.html#builtins-div", "builtin-baseNameOf": "language/builtins.html#builtins-baseNameOf",
"#builtin-elem": "language/builtins.html#builtins-elem", "builtin-bitAnd": "language/builtins.html#builtins-bitAnd",
"#builtin-elemAt": "language/builtins.html#builtins-elemAt", "builtin-bitOr": "language/builtins.html#builtins-bitOr",
"#builtin-fetchGit": "language/builtins.html#builtins-fetchGit", "builtin-bitXor": "language/builtins.html#builtins-bitXor",
"#builtin-fetchTarball": "language/builtins.html#builtins-fetchTarball", "builtin-builtins": "language/builtins.html#builtins-builtins",
"#builtin-fetchurl": "language/builtins.html#builtins-fetchurl", "builtin-compareVersions": "language/builtins.html#builtins-compareVersions",
"#builtin-filterSource": "language/builtins.html#builtins-filterSource", "builtin-concatLists": "language/builtins.html#builtins-concatLists",
"#builtin-foldl-prime": "language/builtins.html#builtins-foldl-prime", "builtin-concatStringsSep": "language/builtins.html#builtins-concatStringsSep",
"#builtin-fromJSON": "language/builtins.html#builtins-fromJSON", "builtin-currentSystem": "language/builtins.html#builtins-currentSystem",
"#builtin-functionArgs": "language/builtins.html#builtins-functionArgs", "builtin-deepSeq": "language/builtins.html#builtins-deepSeq",
"#builtin-genList": "language/builtins.html#builtins-genList", "builtin-derivation": "language/builtins.html#builtins-derivation",
"#builtin-getAttr": "language/builtins.html#builtins-getAttr", "builtin-dirOf": "language/builtins.html#builtins-dirOf",
"#builtin-getEnv": "language/builtins.html#builtins-getEnv", "builtin-div": "language/builtins.html#builtins-div",
"#builtin-hasAttr": "language/builtins.html#builtins-hasAttr", "builtin-elem": "language/builtins.html#builtins-elem",
"#builtin-hashFile": "language/builtins.html#builtins-hashFile", "builtin-elemAt": "language/builtins.html#builtins-elemAt",
"#builtin-hashString": "language/builtins.html#builtins-hashString", "builtin-fetchGit": "language/builtins.html#builtins-fetchGit",
"#builtin-head": "language/builtins.html#builtins-head", "builtin-fetchTarball": "language/builtins.html#builtins-fetchTarball",
"#builtin-import": "language/builtins.html#builtins-import", "builtin-fetchurl": "language/builtins.html#builtins-fetchurl",
"#builtin-intersectAttrs": "language/builtins.html#builtins-intersectAttrs", "builtin-filterSource": "language/builtins.html#builtins-filterSource",
"#builtin-isAttrs": "language/builtins.html#builtins-isAttrs", "builtin-foldl-prime": "language/builtins.html#builtins-foldl-prime",
"#builtin-isBool": "language/builtins.html#builtins-isBool", "builtin-fromJSON": "language/builtins.html#builtins-fromJSON",
"#builtin-isFloat": "language/builtins.html#builtins-isFloat", "builtin-functionArgs": "language/builtins.html#builtins-functionArgs",
"#builtin-isFunction": "language/builtins.html#builtins-isFunction", "builtin-genList": "language/builtins.html#builtins-genList",
"#builtin-isInt": "language/builtins.html#builtins-isInt", "builtin-getAttr": "language/builtins.html#builtins-getAttr",
"#builtin-isList": "language/builtins.html#builtins-isList", "builtin-getEnv": "language/builtins.html#builtins-getEnv",
"#builtin-isNull": "language/builtins.html#builtins-isNull", "builtin-hasAttr": "language/builtins.html#builtins-hasAttr",
"#builtin-isString": "language/builtins.html#builtins-isString", "builtin-hashFile": "language/builtins.html#builtins-hashFile",
"#builtin-length": "language/builtins.html#builtins-length", "builtin-hashString": "language/builtins.html#builtins-hashString",
"#builtin-lessThan": "language/builtins.html#builtins-lessThan", "builtin-head": "language/builtins.html#builtins-head",
"#builtin-listToAttrs": "language/builtins.html#builtins-listToAttrs", "builtin-import": "language/builtins.html#builtins-import",
"#builtin-map": "language/builtins.html#builtins-map", "builtin-intersectAttrs": "language/builtins.html#builtins-intersectAttrs",
"#builtin-match": "language/builtins.html#builtins-match", "builtin-isAttrs": "language/builtins.html#builtins-isAttrs",
"#builtin-mul": "language/builtins.html#builtins-mul", "builtin-isBool": "language/builtins.html#builtins-isBool",
"#builtin-parseDrvName": "language/builtins.html#builtins-parseDrvName", "builtin-isFloat": "language/builtins.html#builtins-isFloat",
"#builtin-path": "language/builtins.html#builtins-path", "builtin-isFunction": "language/builtins.html#builtins-isFunction",
"#builtin-pathExists": "language/builtins.html#builtins-pathExists", "builtin-isInt": "language/builtins.html#builtins-isInt",
"#builtin-placeholder": "language/builtins.html#builtins-placeholder", "builtin-isList": "language/builtins.html#builtins-isList",
"#builtin-readDir": "language/builtins.html#builtins-readDir", "builtin-isNull": "language/builtins.html#builtins-isNull",
"#builtin-readFile": "language/builtins.html#builtins-readFile", "builtin-isString": "language/builtins.html#builtins-isString",
"#builtin-removeAttrs": "language/builtins.html#builtins-removeAttrs", "builtin-length": "language/builtins.html#builtins-length",
"#builtin-replaceStrings": "language/builtins.html#builtins-replaceStrings", "builtin-lessThan": "language/builtins.html#builtins-lessThan",
"#builtin-seq": "language/builtins.html#builtins-seq", "builtin-listToAttrs": "language/builtins.html#builtins-listToAttrs",
"#builtin-sort": "language/builtins.html#builtins-sort", "builtin-map": "language/builtins.html#builtins-map",
"#builtin-split": "language/builtins.html#builtins-split", "builtin-match": "language/builtins.html#builtins-match",
"#builtin-splitVersion": "language/builtins.html#builtins-splitVersion", "builtin-mul": "language/builtins.html#builtins-mul",
"#builtin-stringLength": "language/builtins.html#builtins-stringLength", "builtin-parseDrvName": "language/builtins.html#builtins-parseDrvName",
"#builtin-sub": "language/builtins.html#builtins-sub", "builtin-path": "language/builtins.html#builtins-path",
"#builtin-substring": "language/builtins.html#builtins-substring", "builtin-pathExists": "language/builtins.html#builtins-pathExists",
"#builtin-tail": "language/builtins.html#builtins-tail", "builtin-placeholder": "language/builtins.html#builtins-placeholder",
"#builtin-throw": "language/builtins.html#builtins-throw", "builtin-readDir": "language/builtins.html#builtins-readDir",
"#builtin-toFile": "language/builtins.html#builtins-toFile", "builtin-readFile": "language/builtins.html#builtins-readFile",
"#builtin-toJSON": "language/builtins.html#builtins-toJSON", "builtin-removeAttrs": "language/builtins.html#builtins-removeAttrs",
"#builtin-toPath": "language/builtins.html#builtins-toPath", "builtin-replaceStrings": "language/builtins.html#builtins-replaceStrings",
"#builtin-toString": "language/builtins.html#builtins-toString", "builtin-seq": "language/builtins.html#builtins-seq",
"#builtin-toXML": "language/builtins.html#builtins-toXML", "builtin-sort": "language/builtins.html#builtins-sort",
"#builtin-trace": "language/builtins.html#builtins-trace", "builtin-split": "language/builtins.html#builtins-split",
"#builtin-tryEval": "language/builtins.html#builtins-tryEval", "builtin-splitVersion": "language/builtins.html#builtins-splitVersion",
"#builtin-typeOf": "language/builtins.html#builtins-typeOf", "builtin-stringLength": "language/builtins.html#builtins-stringLength",
"#ssec-builtins": "language/builtins.html", "builtin-sub": "language/builtins.html#builtins-sub",
"#attr-system": "language/derivations.html#attr-system", "builtin-substring": "language/builtins.html#builtins-substring",
"#ssec-derivation": "language/derivations.html", "builtin-tail": "language/builtins.html#builtins-tail",
"#ch-expression-language": "language/index.html", "builtin-throw": "language/builtins.html#builtins-throw",
"#sec-constructs": "language/constructs.html", "builtin-toFile": "language/builtins.html#builtins-toFile",
"#sect-let-language": "language/constructs.html#let-language", "builtin-toJSON": "language/builtins.html#builtins-toJSON",
"#ss-functions": "language/constructs.html#functions", "builtin-toPath": "language/builtins.html#builtins-toPath",
"#sec-language-operators": "language/operators.html", "builtin-toString": "language/builtins.html#builtins-toString",
"#table-operators": "language/operators.html", "builtin-toXML": "language/builtins.html#builtins-toXML",
"#ssec-values": "language/values.html", "builtin-trace": "language/builtins.html#builtins-trace",
"#gloss-closure": "glossary.html#gloss-closure", "builtin-tryEval": "language/builtins.html#builtins-tryEval",
"#gloss-derivation": "glossary.html#gloss-derivation", "builtin-typeOf": "language/builtins.html#builtins-typeOf",
"#gloss-deriver": "glossary.html#gloss-deriver", "ssec-builtins": "language/builtins.html",
"#gloss-nar": "glossary.html#gloss-nar", "attr-system": "language/derivations.html#attr-system",
"#gloss-output-path": "glossary.html#gloss-output-path", "ssec-derivation": "language/derivations.html",
"#gloss-profile": "glossary.html#gloss-profile", "ch-expression-language": "language/index.html",
"#gloss-reachable": "glossary.html#gloss-reachable", "sec-constructs": "language/constructs.html",
"#gloss-reference": "glossary.html#gloss-reference", "sect-let-language": "language/constructs.html#let-language",
"#gloss-substitute": "glossary.html#gloss-substitute", "ss-functions": "language/constructs.html#functions",
"#gloss-user-env": "glossary.html#gloss-user-env", "sec-language-operators": "language/operators.html",
"#gloss-validity": "glossary.html#gloss-validity", "table-operators": "language/operators.html",
"#part-glossary": "glossary.html", "ssec-values": "language/values.html",
"#sec-building-source": "installation/building-source.html", "gloss-closure": "glossary.html#gloss-closure",
"#ch-env-variables": "installation/env-variables.html", "gloss-derivation": "glossary.html#gloss-derivation",
"#sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables", "gloss-deriver": "glossary.html#gloss-deriver",
"#sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file", "gloss-nar": "glossary.html#gloss-nar",
"#sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file-with-macos-and-the-nix-daemon", "gloss-output-path": "glossary.html#gloss-output-path",
"#chap-installation": "installation/installation.html", "gloss-profile": "glossary.html#gloss-profile",
"#ch-installing-binary": "installation/installing-binary.html", "gloss-reachable": "glossary.html#gloss-reachable",
"#sect-macos-installation": "installation/installing-binary.html#macos-installation", "gloss-reference": "glossary.html#gloss-reference",
"#sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation", "gloss-substitute": "glossary.html#gloss-substitute",
"#sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation", "gloss-user-env": "glossary.html#gloss-user-env",
"#sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation", "gloss-validity": "glossary.html#gloss-validity",
"#sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation", "part-glossary": "glossary.html",
"#sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation", "sec-building-source": "installation/building-source.html",
"#sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball", "ch-env-variables": "installation/env-variables.html",
"#sect-nix-install-pinned-version-url": "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url", "sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables",
"#sect-single-user-installation": "installation/installing-binary.html#single-user-installation", "sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file",
"#ch-installing-source": "installation/installing-source.html", "sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file-with-macos-and-the-nix-daemon",
"#ssec-multi-user": "installation/multi-user.html", "chap-installation": "installation/installation.html",
"#ch-nix-security": "installation/nix-security.html", "ch-installing-binary": "installation/installing-binary.html",
"#sec-obtaining-source": "installation/obtaining-source.html", "sect-macos-installation": "installation/installing-binary.html#macos-installation",
"#sec-prerequisites-source": "installation/prerequisites-source.html", "sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation",
"#sec-single-user": "installation/single-user.html", "sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation",
"#ch-supported-platforms": "installation/supported-platforms.html", "sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation",
"#ch-upgrading-nix": "installation/upgrading.html", "sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation",
"#ch-about-nix": "introduction.html", "sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation",
"#chap-introduction": "introduction.html", "sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball",
"#ch-basic-package-mgmt": "package-management/basic-package-mgmt.html", "sect-nix-install-pinned-version-url": "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url",
"#ssec-binary-cache-substituter": "package-management/binary-cache-substituter.html", "sect-single-user-installation": "installation/installing-binary.html#single-user-installation",
"#sec-channels": "package-management/channels.html", "ch-installing-source": "installation/installing-source.html",
"#ssec-copy-closure": "package-management/copy-closure.html", "ssec-multi-user": "installation/multi-user.html",
"#sec-garbage-collection": "package-management/garbage-collection.html", "ch-nix-security": "installation/nix-security.html",
"#ssec-gc-roots": "package-management/garbage-collector-roots.html", "sec-obtaining-source": "installation/obtaining-source.html",
"#chap-package-management": "package-management/package-management.html", "sec-prerequisites-source": "installation/prerequisites-source.html",
"#sec-profiles": "package-management/profiles.html", "sec-single-user": "installation/single-user.html",
"#ssec-s3-substituter": "package-management/s3-substituter.html", "ch-supported-platforms": "installation/supported-platforms.html",
"#ssec-s3-substituter-anonymous-reads": "package-management/s3-substituter.html#anonymous-reads-to-your-s3-compatible-binary-cache", "ch-upgrading-nix": "installation/upgrading.html",
"#ssec-s3-substituter-authenticated-reads": "package-management/s3-substituter.html#authenticated-reads-to-your-s3-binary-cache", "ch-about-nix": "introduction.html",
"#ssec-s3-substituter-authenticated-writes": "package-management/s3-substituter.html#authenticated-writes-to-your-s3-compatible-binary-cache", "chap-introduction": "introduction.html",
"#sec-sharing-packages": "package-management/sharing-packages.html", "ch-basic-package-mgmt": "package-management/basic-package-mgmt.html",
"#ssec-ssh-substituter": "package-management/ssh-substituter.html", "ssec-binary-cache-substituter": "package-management/binary-cache-substituter.html",
"#chap-quick-start": "quick-start.html", "sec-channels": "package-management/channels.html",
"#sec-relnotes": "release-notes/release-notes.html", "ssec-copy-closure": "package-management/copy-closure.html",
"#ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html", "sec-garbage-collection": "package-management/garbage-collection.html",
"#ch-relnotes-0.10": "release-notes/rl-0.10.html", "ssec-gc-roots": "package-management/garbage-collector-roots.html",
"#ssec-relnotes-0.11": "release-notes/rl-0.11.html", "chap-package-management": "package-management/package-management.html",
"#ssec-relnotes-0.12": "release-notes/rl-0.12.html", "sec-profiles": "package-management/profiles.html",
"#ssec-relnotes-0.13": "release-notes/rl-0.13.html", "ssec-s3-substituter": "package-management/s3-substituter.html",
"#ssec-relnotes-0.14": "release-notes/rl-0.14.html", "ssec-s3-substituter-anonymous-reads": "package-management/s3-substituter.html#anonymous-reads-to-your-s3-compatible-binary-cache",
"#ssec-relnotes-0.15": "release-notes/rl-0.15.html", "ssec-s3-substituter-authenticated-reads": "package-management/s3-substituter.html#authenticated-reads-to-your-s3-binary-cache",
"#ssec-relnotes-0.16": "release-notes/rl-0.16.html", "ssec-s3-substituter-authenticated-writes": "package-management/s3-substituter.html#authenticated-writes-to-your-s3-compatible-binary-cache",
"#ch-relnotes-0.5": "release-notes/rl-0.5.html", "sec-sharing-packages": "package-management/sharing-packages.html",
"#ch-relnotes-0.6": "release-notes/rl-0.6.html", "ssec-ssh-substituter": "package-management/ssh-substituter.html",
"#ch-relnotes-0.7": "release-notes/rl-0.7.html", "chap-quick-start": "quick-start.html",
"#ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html", "sec-relnotes": "release-notes/release-notes.html",
"#ch-relnotes-0.8": "release-notes/rl-0.8.html", "ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html",
"#ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html", "ch-relnotes-0.10": "release-notes/rl-0.10.html",
"#ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html", "ssec-relnotes-0.11": "release-notes/rl-0.11.html",
"#ch-relnotes-0.9": "release-notes/rl-0.9.html", "ssec-relnotes-0.12": "release-notes/rl-0.12.html",
"#ssec-relnotes-1.0": "release-notes/rl-1.0.html", "ssec-relnotes-0.13": "release-notes/rl-0.13.html",
"#ssec-relnotes-1.1": "release-notes/rl-1.1.html", "ssec-relnotes-0.14": "release-notes/rl-0.14.html",
"#ssec-relnotes-1.10": "release-notes/rl-1.10.html", "ssec-relnotes-0.15": "release-notes/rl-0.15.html",
"#ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html", "ssec-relnotes-0.16": "release-notes/rl-0.16.html",
"#ssec-relnotes-1.11": "release-notes/rl-1.11.html", "ch-relnotes-0.5": "release-notes/rl-0.5.html",
"#ssec-relnotes-1.2": "release-notes/rl-1.2.html", "ch-relnotes-0.6": "release-notes/rl-0.6.html",
"#ssec-relnotes-1.3": "release-notes/rl-1.3.html", "ch-relnotes-0.7": "release-notes/rl-0.7.html",
"#ssec-relnotes-1.4": "release-notes/rl-1.4.html", "ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html",
"#ssec-relnotes-1.5.1": "release-notes/rl-1.5.1.html", "ch-relnotes-0.8": "release-notes/rl-0.8.html",
"#ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html", "ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html",
"#ssec-relnotes-1.5": "release-notes/rl-1.5.html", "ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html",
"#ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html", "ch-relnotes-0.9": "release-notes/rl-0.9.html",
"#ssec-relnotes-1.6.0": "release-notes/rl-1.6.html", "ssec-relnotes-1.0": "release-notes/rl-1.0.html",
"#ssec-relnotes-1.7": "release-notes/rl-1.7.html", "ssec-relnotes-1.1": "release-notes/rl-1.1.html",
"#ssec-relnotes-1.8": "release-notes/rl-1.8.html", "ssec-relnotes-1.10": "release-notes/rl-1.10.html",
"#ssec-relnotes-1.9": "release-notes/rl-1.9.html", "ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html",
"#ssec-relnotes-2.0": "release-notes/rl-2.0.html", "ssec-relnotes-1.11": "release-notes/rl-1.11.html",
"#ssec-relnotes-2.1": "release-notes/rl-2.1.html", "ssec-relnotes-1.2": "release-notes/rl-1.2.html",
"#ssec-relnotes-2.2": "release-notes/rl-2.2.html", "ssec-relnotes-1.3": "release-notes/rl-1.3.html",
"#ssec-relnotes-2.3": "release-notes/rl-2.3.html" "ssec-relnotes-1.4": "release-notes/rl-1.4.html",
"ssec-relnotes-1.5.1": "release-notes/rl-1.5.1.html",
"ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html",
"ssec-relnotes-1.5": "release-notes/rl-1.5.html",
"ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html",
"ssec-relnotes-1.6.0": "release-notes/rl-1.6.html",
"ssec-relnotes-1.7": "release-notes/rl-1.7.html",
"ssec-relnotes-1.8": "release-notes/rl-1.8.html",
"ssec-relnotes-1.9": "release-notes/rl-1.9.html",
"ssec-relnotes-2.0": "release-notes/rl-2.0.html",
"ssec-relnotes-2.1": "release-notes/rl-2.1.html",
"ssec-relnotes-2.2": "release-notes/rl-2.2.html",
"ssec-relnotes-2.3": "release-notes/rl-2.3.html"
},
"language/values.html": {
"simple-values": "#primitives",
"lists": "#list",
"strings": "#string",
"lists": "#list",
"attribute-sets": "#attribute-set"
}
}; };
var isRoot = (document.location.pathname.endsWith('/') || document.location.pathname.endsWith('/index.html')) && path_to_root === ''; // the following code matches the current page's URL against the set of redirects.
if (isRoot && redirects[document.location.hash]) { //
document.location.href = path_to_root + redirects[document.location.hash]; // it is written to minimize the latency between page load and redirect.
// therefore we avoid function calls, copying data, and unnecessary loops.
// IMPORTANT: we use stateful array operations and their order matters!
//
// matching URLs is more involved than it should be:
//
// 1. `document.location.pathname` can have an arbitrary prefix.
//
// 2. `path_to_root` is set by mdBook. it consists only of `../`s and
// determines the depth of `<path>` relative to the prefix:
//
// `document.location.pathname`
// |------------------------------|
// /<prefix>/<path>/[<file>[.html]][#<anchor>]
// |----|
// `path_to_root` has same number of path segments
//
// source: https://phaiax.github.io/mdBook/format/theme/index-hbs.html#data
//
// 3. the following paths are equivalent:
//
// /foo/bar/
// /foo/bar/index.html
// /foo/bar/index
//
// 4. the following paths are also equivalent:
//
// /foo/bar/baz
// /foo/bar/baz.html
//
let segments = document.location.pathname.split('/');
let file = segments.pop();
// normalize file name
if (file === '') { file = "index.html"; }
else if (!file.endsWith('.html')) { file = file + '.html'; }
segments.push(file);
// use `path_to_root` to discern prefix from path.
const depth = path_to_root.split('/').length;
// remove segments containing prefix. the following works because
// 1. the original `document.location.pathname` is absolute,
// hence first element of `segments` is always empty.
// 2. last element of splitting `path_to_root` is also always empty.
// 3. last element of `segments` is the file name.
//
// visual example:
//
// '/foo/bar/baz.html'.split('/') -> [ '', 'foo', 'bar', 'baz.html' ]
// '../'.split('/') -> [ '..', '' ]
//
// the following operations will then result in
//
// path = 'bar/baz.html'
//
segments.splice(0, segments.length - depth);
const path = segments.join('/');
// anchor starts with the hash character (`#`),
// but our redirect declarations don't, so we strip it.
// example:
// document.location.hash -> '#foo'
// document.location.hash.substring(1) -> 'foo'
const anchor = document.location.hash.substring(1);
const redirect = redirects[path];
if (redirect) {
const target = redirect[anchor];
if (target) {
document.location.href = target;
}
} }

View file

@ -8,6 +8,6 @@
# Description # Description
The Nix daemon is necessary in multi-user Nix installations. It performs The Nix daemon is necessary in multi-user Nix installations. It runs
build actions and other operations on the Nix store on behalf of build tasks and other operations on the Nix store on behalf of
unprivileged users. unprivileged users.

View file

@ -71,7 +71,7 @@ paths. Realisation is a somewhat overloaded term:
outputs are already valid, in which case we are done outputs are already valid, in which case we are done
immediately. Otherwise, there may be [substitutes](../glossary.md) immediately. Otherwise, there may be [substitutes](../glossary.md)
that produce the outputs (e.g., by downloading them). Finally, the that produce the outputs (e.g., by downloading them). Finally, the
outputs can be produced by performing the build action described outputs can be produced by running the build task described
by the derivation. by the derivation.
- If the store path is not a derivation, realisation ensures that the - If the store path is not a derivation, realisation ensures that the

View file

@ -1,7 +1,7 @@
# Glossary # Glossary
- [derivation]{#gloss-derivation}\ - [derivation]{#gloss-derivation}\
A description of a build action. The result of a derivation is a A description of a build task. The result of a derivation is a
store object. Derivations are typically specified in Nix expressions store object. Derivations are typically specified in Nix expressions
using the [`derivation` primitive](language/derivations.md). These are using the [`derivation` primitive](language/derivations.md). These are
translated into low-level *store derivations* (implicitly by translated into low-level *store derivations* (implicitly by
@ -53,8 +53,8 @@
A file that is an immediate child of the Nix store directory. These A file that is an immediate child of the Nix store directory. These
can be regular files, but also entire directory trees. Store objects can be regular files, but also entire directory trees. Store objects
can be sources (objects copied from outside of the store), can be sources (objects copied from outside of the store),
derivation outputs (objects produced by running a build action), or derivation outputs (objects produced by running a build task), or
derivations (files describing a build action). derivations (files describing a build task).
- [input-addressed store object]{#gloss-input-addressed-store-object}\ - [input-addressed store object]{#gloss-input-addressed-store-object}\
A store object produced by building a A store object produced by building a

View file

@ -88,10 +88,24 @@ extension. The installer will also create `/etc/profile.d/nix.sh`.
### Linux ### Linux
```console Remove files created by Nix:
sudo rm -rf /etc/profile/nix.sh /etc/nix /nix ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
# If you are on Linux with systemd, you will need to run: ```console
sudo rm -rf /nix /etc/nix /etc/profile/nix.sh ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
```
Remove build users and their group:
```console
for i in $(seq 30001 30032); do
sudo userdel $i
done
sudo groupdel 30000
```
If you are on Linux with systemd, remove the Nix daemon service:
```console
sudo systemctl stop nix-daemon.socket sudo systemctl stop nix-daemon.socket
sudo systemctl stop nix-daemon.service sudo systemctl stop nix-daemon.service
sudo systemctl disable nix-daemon.socket sudo systemctl disable nix-daemon.socket
@ -99,8 +113,13 @@ sudo systemctl disable nix-daemon.service
sudo systemctl daemon-reload sudo systemctl daemon-reload
``` ```
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`, There may also be references to Nix in
and `/etc/zshrc` which you may remove.
- `/etc/profile`
- `/etc/bashrc`
- `/etc/zshrc`
which you may remove.
### macOS ### macOS

View file

@ -104,7 +104,7 @@ a currently running program.
Packages are built from _Nix expressions_, which is a simple Packages are built from _Nix expressions_, which is a simple
functional language. A Nix expression describes everything that goes functional language. A Nix expression describes everything that goes
into a package build action (a “derivation”): other packages, sources, into a package build task (a “derivation”): other packages, sources,
the build script, environment variables for the build script, etc. the build script, environment variables for the build script, etc.
Nix tries very hard to ensure that Nix expressions are Nix tries very hard to ensure that Nix expressions are
_deterministic_: building a Nix expression twice should yield the same _deterministic_: building a Nix expression twice should yield the same

View file

@ -1,7 +1,7 @@
# Derivations # Derivations
The most important built-in function is `derivation`, which is used to The most important built-in function is `derivation`, which is used to
describe a single derivation (a build action). It takes as input a set, describe a single derivation (a build task). It takes as input a set,
the attributes of which specify the inputs of the build. the attributes of which specify the inputs of the build.
- There must be an attribute named [`system`]{#attr-system} whose value must be a - There must be an attribute named [`system`]{#attr-system} whose value must be a

View file

@ -31,3 +31,551 @@ The Nix language is
Type errors are only detected when expressions are evaluated. Type errors are only detected when expressions are evaluated.
# Overview
This is an incomplete overview of language features, by example.
<table>
<tr>
<th>
Example
</th>
<th>
Description
</th>
</tr>
<tr>
<td>
*Basic values*
</td>
<td>
</td>
</tr>
<tr>
<td>
`"hello world"`
</td>
<td>
A string
</td>
</tr>
<tr>
<td>
```
''
multi
line
string
''
```
</td>
<td>
A multi-line string. Strips common prefixed whitespace. Evaluates to `"multi\n line\n string"`.
</td>
</tr>
<tr>
<td>
`"hello ${ { a = "world" }.a }"`
`"1 2 ${toString 3}"`
`"${pkgs.bash}/bin/sh"`
</td>
<td>
String interpolation (expands to `"hello world"`, `"1 2 3"`, `"/nix/store/<hash>-bash-<version>/bin/sh"`)
</td>
</tr>
<tr>
<td>
`true`, `false`
</td>
<td>
Booleans
</td>
</tr>
<tr>
<td>
`null`
</td>
<td>
Null value
</td>
</tr>
<tr>
<td>
`123`
</td>
<td>
An integer
</td>
</tr>
<tr>
<td>
`3.141`
</td>
<td>
A floating point number
</td>
</tr>
<tr>
<td>
`/etc`
</td>
<td>
An absolute path
</td>
</tr>
<tr>
<td>
`./foo.png`
</td>
<td>
A path relative to the file containing this Nix expression
</td>
</tr>
<tr>
<td>
`~/.config`
</td>
<td>
A home path. Evaluates to the `"<user's home directory>/.config"`.
</td>
</tr>
<tr>
<td>
<nixpkgs>
</td>
<td>
Search path. Value determined by [`$NIX_PATH` environment variable](../command-ref/env-common.md#env-NIX_PATH).
</td>
</tr>
<tr>
<td>
*Compound values*
</td>
<td>
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; }`
</td>
<td>
A set with attributes named `x` and `y`
</td>
</tr>
<tr>
<td>
`{ foo.bar = 1; }`
</td>
<td>
A nested set, equivalent to `{ foo = { bar = 1; }; }`
</td>
</tr>
<tr>
<td>
`rec { x = "foo"; y = x + "bar"; }`
</td>
<td>
A recursive set, equivalent to `{ x = "foo"; y = "foobar"; }`
</td>
</tr>
<tr>
<td>
`[ "foo" "bar" "baz" ]`
`[ 1 2 3 ]`
`[ (f 1) { a = 1; b = 2; } [ "c" ] ]`
</td>
<td>
Lists with three elements.
</td>
</tr>
<tr>
<td>
*Operators*
</td>
<td>
</td>
</tr>
<tr>
<td>
`"foo" + "bar"`
</td>
<td>
String concatenation
</td>
</tr>
<tr>
<td>
`1 + 2`
</td>
<td>
Integer addition
</td>
</tr>
<tr>
<td>
`"foo" == "f" + "oo"`
</td>
<td>
Equality test (evaluates to `true`)
</td>
</tr>
<tr>
<td>
`"foo" != "bar"`
</td>
<td>
Inequality test (evaluates to `true`)
</td>
</tr>
<tr>
<td>
`!true`
</td>
<td>
Boolean negation
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; }.x`
</td>
<td>
Attribute selection (evaluates to `1`)
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; }.z or 3`
</td>
<td>
Attribute selection with default (evaluates to `3`)
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; } // { z = 3; }`
</td>
<td>
Merge two sets (attributes in the right-hand set taking precedence)
</td>
</tr>
<tr>
<td>
*Control structures*
</td>
<td>
</td>
</tr>
<tr>
<td>
`if 1 + 1 == 2 then "yes!" else "no!"`
</td>
<td>
Conditional expression
</td>
</tr>
<tr>
<td>
`assert 1 + 1 == 2; "yes!"`
</td>
<td>
Assertion check (evaluates to `"yes!"`).
</td>
</tr>
<tr>
<td>
`let x = "foo"; y = "bar"; in x + y`
</td>
<td>
Variable definition
</td>
</tr>
<tr>
<td>
`with builtins; head [ 1 2 3 ]`
</td>
<td>
Add all attributes from the given set to the scope (evaluates to `1`)
</td>
</tr>
<tr>
<td>
*Functions (lambdas)*
</td>
<td>
</td>
</tr>
<tr>
<td>
`x: x + 1`
</td>
<td>
A function that expects an integer and returns it increased by 1
</td>
</tr>
<tr>
<td>
`x: y: x + y`
</td>
<td>
Curried function, equivalent to `x: (y: x + y)`. Can be used like a function that takes two arguments and returns their sum.
</td>
</tr>
<tr>
<td>
`(x: x + 1) 100`
</td>
<td>
A function call (evaluates to 101)
</td>
</tr>
<tr>
<td>
`let inc = x: x + 1; in inc (inc (inc 100))`
</td>
<td>
A function bound to a variable and subsequently called by name (evaluates to 103)
</td>
</tr>
<tr>
<td>
`{ x, y }: x + y`
</td>
<td>
A function that expects a set with required attributes `x` and `y` and concatenates them
</td>
</tr>
<tr>
<td>
`{ x, y ? "bar" }: x + y`
</td>
<td>
A function that expects a set with required attribute `x` and optional `y`, using `"bar"` as default value for `y`
</td>
</tr>
<tr>
<td>
`{ x, y, ... }: x + y`
</td>
<td>
A function that expects a set with required attributes `x` and `y` and ignores any other attributes
</td>
</tr>
<tr>
<td>
`{ x, y } @ args: x + y`
`args @ { x, y }: x + y`
</td>
<td>
A function that expects a set with required attributes `x` and `y`, and binds the whole set to `args`
</td>
</tr>
<tr>
<td>
*Built-in functions*
</td>
<td>
</td>
</tr>
<tr>
<td>
`import ./foo.nix`
</td>
<td>
Load and return Nix expression in given file
</td>
</tr>
<tr>
<td>
`map (x: x + x) [ 1 2 3 ]`
</td>
<td>
Apply a function to every element of a list (evaluates to `[ 2 4 6 ]`)
</td>
</tr>
</table>

View file

@ -150,6 +150,20 @@
recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division
operation. `./a.${foo}/b.${bar}` is a path. operation. `./a.${foo}/b.${bar}` is a path.
When a path appears in an antiquotation, and is thus coerced into a string,
the path is first copied into the Nix store and the resulting string is
the Nix store path. For instance `"${./foo.txt}" will cause `foo.txt` in
the current directory to be copied into the Nix store and result in the
string `"/nix/store/<HASH>-foo.txt"`.
Note that the Nix language assumes that all input files will remain
_unchanged_ during the course of the Nix expression evaluation.
If you for example antiquote a file path during a `nix repl` session, and
then later in the same session, after having changed the file contents,
evaluate the antiquotation with the file path again, then Nix will still
return the first store path. It will _not_ reread the file contents to
produce a different Nix store path.
- <a id="type-boolean" href="#type-boolean">Boolean</a> - <a id="type-boolean" href="#type-boolean">Boolean</a>
*Booleans* with values `true` and `false`. *Booleans* with values `true` and `false`.

View file

@ -5,3 +5,50 @@
arguments will be ignored and the resulting derivation will have arguments will be ignored and the resulting derivation will have
`__impure` set to `true`, making it an impure derivation. `__impure` set to `true`, making it an impure derivation.
* If `builtins.readFile` is called on a file with context, then only the parts
of that context that appear in the content of the file are retained.
This avoids a lot of spurious errors where some benign strings end-up having
a context just because they are read from a store path
([#7260](https://github.com/NixOS/nix/pull/7260)).
* Nix can now automatically pick UIDs for builds, removing the need to
create `nixbld*` user accounts. These UIDs are allocated starting at
872415232 (0x34000000) on Linux and 56930 on macOS.
This is an experimental feature. To enable it, add the following to
`nix.conf`:
```
extra-experimental-features = auto-allocate-uids
auto-allocate-uids = true
```
* On Linux, Nix can now run builds in a user namespace where the build
runs as root (UID 0) and has 65,536 UIDs available. This is
primarily useful for running containers such as `systemd-nspawn`
inside a Nix build. For an example, see
https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
A build can enable this by requiring the `uid-range` system feature,
i.e. by setting the derivation attribute
```
requiredSystemFeatures = [ "uid-range" ];
```
The `uid-range` system feature requires the `auto-allocate-uids`
setting to be enabled (see above).
* On Linux, Nix has experimental support for running builds inside a
cgroup. It can be enabled by adding
```
extra-experimental-features = cgroups
use-cgroups = true
```
to `nix.conf`. Cgroups are required for derivations that require the
`uid-range` system feature.
* `nix build --json` now prints some statistics about top-level
derivations, such as CPU statistics when cgroups are enabled.

View file

@ -36,6 +36,17 @@ let
shell = "${pkgs.bashInteractive}/bin/bash"; shell = "${pkgs.bashInteractive}/bin/bash";
home = "/root"; home = "/root";
gid = 0; gid = 0;
groups = [ "root" ];
description = "System administrator";
};
nobody = {
uid = 65534;
shell = "${pkgs.shadow}/bin/nologin";
home = "/var/empty";
gid = 65534;
groups = [ "nobody" ];
description = "Unprivileged account (don't use!)";
}; };
} // lib.listToAttrs ( } // lib.listToAttrs (
@ -57,6 +68,7 @@ let
groups = { groups = {
root.gid = 0; root.gid = 0;
nixbld.gid = 30000; nixbld.gid = 30000;
nobody.gid = 65534;
}; };
userToPasswd = ( userToPasswd = (

View file

@ -506,6 +506,12 @@
overlay = self.overlays.default; overlay = self.overlays.default;
}); });
tests.containers = (import ./tests/containers.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
overlay = self.overlays.default;
});
tests.setuid = nixpkgs.lib.genAttrs tests.setuid = nixpkgs.lib.genAttrs
["i686-linux" "x86_64-linux"] ["i686-linux" "x86_64-linux"]
(system: (system:

View file

@ -58,7 +58,7 @@ readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
readonly ROOT_HOME=~root readonly ROOT_HOME=~root
if [ -t 0 ]; then if [ -t 0 ] && [ -z "${NIX_INSTALLER_YES:-}" ]; then
readonly IS_HEADLESS='no' readonly IS_HEADLESS='no'
else else
readonly IS_HEADLESS='yes' readonly IS_HEADLESS='yes'

View file

@ -71,6 +71,8 @@ while [ $# -gt 0 ]; do
# # intentional tail space # # intentional tail space
# ACTIONS="${ACTIONS}uninstall " # ACTIONS="${ACTIONS}uninstall "
# ;; # ;;
--yes)
export NIX_INSTALLER_YES=1;;
--no-channel-add) --no-channel-add)
export NIX_INSTALLER_NO_CHANNEL_ADD=1;; export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
--daemon-user-count) --daemon-user-count)
@ -90,7 +92,7 @@ while [ $# -gt 0 ]; do
shift;; shift;;
*) *)
{ {
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]" echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--yes] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
echo "Choose installation method." echo "Choose installation method."
echo "" echo ""
@ -104,6 +106,8 @@ while [ $# -gt 0 ]; do
echo " trivial to uninstall." echo " trivial to uninstall."
echo " (default)" echo " (default)"
echo "" echo ""
echo " --yes: Run the script non-interactively, accepting all prompts."
echo ""
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default." echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
echo "" echo ""
echo " --no-modify-profile: Don't modify the user profile to automatically load nix." echo " --no-modify-profile: Don't modify the user profile to automatically load nix."

View file

@ -186,12 +186,12 @@ static int main_build_remote(int argc, char * * argv)
// build the hint template. // build the hint template.
std::string errorText = std::string errorText =
"Failed to find a machine for remote build!\n" "Failed to find a machine for remote build!\n"
"derivation: %s\nrequired (system, features): (%s, %s)"; "derivation: %s\nrequired (system, features): (%s, [%s])";
errorText += "\n%s available machines:"; errorText += "\n%s available machines:";
errorText += "\n(systems, maxjobs, supportedFeatures, mandatoryFeatures)"; errorText += "\n(systems, maxjobs, supportedFeatures, mandatoryFeatures)";
for (unsigned int i = 0; i < machines.size(); ++i) for (unsigned int i = 0; i < machines.size(); ++i)
errorText += "\n(%s, %s, %s, %s)"; errorText += "\n([%s], %s, [%s], [%s])";
// add the template values. // add the template values.
std::string drvstr; std::string drvstr;

View file

@ -226,7 +226,7 @@ MixProfile::MixProfile()
{ {
addFlag({ addFlag({
.longName = "profile", .longName = "profile",
.description = "The profile to update.", .description = "The profile to operate on.",
.labels = {"path"}, .labels = {"path"},
.handler = {&profile}, .handler = {&profile},
.completer = completePath .completer = completePath

View file

@ -270,55 +270,59 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix) void SourceExprCommand::completeInstallable(std::string_view prefix)
{ {
if (file) { try {
completionType = ctAttrs; if (file) {
completionType = ctAttrs;
evalSettings.pureEval = false; evalSettings.pureEval = false;
auto state = getEvalState(); auto state = getEvalState();
Expr *e = state->parseExprFromFile( Expr *e = state->parseExprFromFile(
resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file))) resolveExprPath(state->checkSourcePath(lookupFileArg(*state, *file)))
); );
Value root; Value root;
state->eval(e, root); state->eval(e, root);
auto autoArgs = getAutoArgs(*state); auto autoArgs = getAutoArgs(*state);
std::string prefix_ = std::string(prefix); std::string prefix_ = std::string(prefix);
auto sep = prefix_.rfind('.'); auto sep = prefix_.rfind('.');
std::string searchWord; std::string searchWord;
if (sep != std::string::npos) { if (sep != std::string::npos) {
searchWord = prefix_.substr(sep + 1, std::string::npos); searchWord = prefix_.substr(sep + 1, std::string::npos);
prefix_ = prefix_.substr(0, sep); prefix_ = prefix_.substr(0, sep);
} else { } else {
searchWord = prefix_; searchWord = prefix_;
prefix_ = ""; prefix_ = "";
} }
auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root); auto [v, pos] = findAlongAttrPath(*state, prefix_, *autoArgs, root);
Value &v1(*v); Value &v1(*v);
state->forceValue(v1, pos); state->forceValue(v1, pos);
Value v2; Value v2;
state->autoCallFunction(*autoArgs, v1, v2); state->autoCallFunction(*autoArgs, v1, v2);
if (v2.type() == nAttrs) { if (v2.type() == nAttrs) {
for (auto & i : *v2.attrs) { for (auto & i : *v2.attrs) {
std::string name = state->symbols[i.name]; std::string name = state->symbols[i.name];
if (name.find(searchWord) == 0) { if (name.find(searchWord) == 0) {
if (prefix_ == "") if (prefix_ == "")
completions->add(name); completions->add(name);
else else
completions->add(prefix_ + "." + name); completions->add(prefix_ + "." + name);
}
} }
} }
} else {
completeFlakeRefWithFragment(
getEvalState(),
lockFlags,
getDefaultFlakeAttrPathPrefixes(),
getDefaultFlakeAttrPaths(),
prefix);
} }
} else { } catch (EvalError&) {
completeFlakeRefWithFragment( // Don't want eval errors to mess-up with the completion engine, so let's just swallow them
getEvalState(),
lockFlags,
getDefaultFlakeAttrPathPrefixes(),
getDefaultFlakeAttrPaths(),
prefix);
} }
} }
@ -1040,20 +1044,20 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front(); return installables.front();
} }
BuiltPaths Installable::build( std::vector<BuiltPathWithResult> Installable::build(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables, const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode) BuildMode bMode)
{ {
BuiltPaths res; std::vector<BuiltPathWithResult> res;
for (auto & [_, builtPath] : build2(evalStore, store, mode, installables, bMode)) for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode))
res.push_back(builtPath); res.push_back(builtPathWithResult);
return res; return res;
} }
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::build2( std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Installable::build2(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
@ -1073,7 +1077,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
} }
} }
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> res; std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> res;
switch (mode) { switch (mode) {
@ -1114,10 +1118,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
output, *drvOutput->second); output, *drvOutput->second);
} }
} }
res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }}); res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }}});
}, },
[&](const DerivedPath::Opaque & bo) { [&](const DerivedPath::Opaque & bo) {
res.push_back({installable, BuiltPath::Opaque { bo.path }}); res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }}});
}, },
}, path.raw()); }, path.raw());
} }
@ -1127,7 +1131,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
case Realise::Outputs: { case Realise::Outputs: {
if (settings.printMissing) if (settings.printMissing)
printMissing(store, pathsToBuild, lvlInfo); printMissing(store, pathsToBuild, lvlInfo);
for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) { for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) {
if (!buildResult.success()) if (!buildResult.success())
@ -1139,10 +1143,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
std::map<std::string, StorePath> outputs; std::map<std::string, StorePath> outputs;
for (auto & path : buildResult.builtOutputs) for (auto & path : buildResult.builtOutputs)
outputs.emplace(path.first.outputName, path.second.outPath); outputs.emplace(path.first.outputName, path.second.outPath);
res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }}); res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }, .result = buildResult}});
}, },
[&](const DerivedPath::Opaque & bo) { [&](const DerivedPath::Opaque & bo) {
res.push_back({installable, BuiltPath::Opaque { bo.path }}); res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }, .result = buildResult}});
}, },
}, buildResult.path.raw()); }, buildResult.path.raw());
} }
@ -1165,9 +1169,12 @@ BuiltPaths Installable::toBuiltPaths(
OperateOn operateOn, OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables) const std::vector<std::shared_ptr<Installable>> & installables)
{ {
if (operateOn == OperateOn::Output) if (operateOn == OperateOn::Output) {
return Installable::build(evalStore, store, mode, installables); BuiltPaths res;
else { for (auto & p : Installable::build(evalStore, store, mode, installables))
res.push_back(p.path);
return res;
} else {
if (mode == Realise::Nothing) if (mode == Realise::Nothing)
settings.readOnlyMode = true; settings.readOnlyMode = true;

View file

@ -7,6 +7,7 @@
#include "eval.hh" #include "eval.hh"
#include "store-api.hh" #include "store-api.hh"
#include "flake/flake.hh" #include "flake/flake.hh"
#include "build-result.hh"
#include <optional> #include <optional>
@ -51,6 +52,12 @@ enum class OperateOn {
Derivation Derivation
}; };
struct BuiltPathWithResult
{
BuiltPath path;
std::optional<BuildResult> result;
};
struct Installable struct Installable
{ {
virtual ~Installable() { } virtual ~Installable() { }
@ -91,14 +98,14 @@ struct Installable
return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}}); return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
} }
static BuiltPaths build( static std::vector<BuiltPathWithResult> build(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables, const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal); BuildMode bMode = bmNormal);
static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> build2( static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> build2(
ref<Store> evalStore, ref<Store> evalStore,
ref<Store> store, ref<Store> store,
Realise mode, Realise mode,

View file

@ -8,7 +8,7 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown -pthread libcmd_LDFLAGS = $(EDITLINE_LIBS) $(LOWDOWN_LIBS) -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers libcmd_LIBS = libstore libutil libexpr libmain libfetchers

View file

@ -270,6 +270,7 @@ void NixRepl::mainLoop()
// ctrl-D should exit the debugger. // ctrl-D should exit the debugger.
state->debugStop = false; state->debugStop = false;
state->debugQuit = true; state->debugQuit = true;
logger->cout("");
break; break;
} }
try { try {
@ -384,6 +385,10 @@ StringSet NixRepl::completePrefix(const std::string & prefix)
i++; i++;
} }
} else { } else {
/* Temporarily disable the debugger, to avoid re-entering readline. */
auto debug_repl = state->debugRepl;
state->debugRepl = nullptr;
Finally restoreDebug([&]() { state->debugRepl = debug_repl; });
try { try {
/* This is an expression that should evaluate to an /* This is an expression that should evaluate to an
attribute set. Evaluate it to get the names of the attribute set. Evaluate it to get the names of the

View file

@ -7,7 +7,6 @@
#include "globals.hh" #include "globals.hh"
#include "eval-inline.hh" #include "eval-inline.hh"
#include "filetransfer.hh" #include "filetransfer.hh"
#include "json.hh"
#include "function-trace.hh" #include "function-trace.hh"
#include <algorithm> #include <algorithm>
@ -21,6 +20,7 @@
#include <functional> #include <functional>
#include <sys/resource.h> #include <sys/resource.h>
#include <nlohmann/json.hpp>
#if HAVE_BOEHMGC #if HAVE_BOEHMGC
@ -35,6 +35,8 @@
#endif #endif
using json = nlohmann::json;
namespace nix { namespace nix {
static char * allocString(size_t size) static char * allocString(size_t size)
@ -69,15 +71,11 @@ static char * dupString(const char * s)
// empty string. // empty string.
static const char * makeImmutableStringWithLen(const char * s, size_t size) static const char * makeImmutableStringWithLen(const char * s, size_t size)
{ {
char * t;
if (size == 0) if (size == 0)
return ""; return "";
#if HAVE_BOEHMGC auto t = allocString(size + 1);
t = GC_STRNDUP(s, size); memcpy(t, s, size);
#else t[size] = 0;
t = strndup(s, size);
#endif
if (!t) throw std::bad_alloc();
return t; return t;
} }
@ -904,7 +902,7 @@ void EvalState::throwEvalError(const char * s, const std::string & s2,
const std::string & s3) const std::string & s3)
{ {
debugThrowLastTrace(EvalError({ debugThrowLastTrace(EvalError({
.msg = hintfmt(s, s2), .msg = hintfmt(s, s2, s3),
.errPos = positions[noPos] .errPos = positions[noPos]
})); }));
} }
@ -913,7 +911,7 @@ void EvalState::throwEvalError(const PosIdx pos, const char * s, const std::stri
const std::string & s3) const std::string & s3)
{ {
debugThrowLastTrace(EvalError({ debugThrowLastTrace(EvalError({
.msg = hintfmt(s, s2), .msg = hintfmt(s, s2, s3),
.errPos = positions[pos] .errPos = positions[pos]
})); }));
} }
@ -922,7 +920,7 @@ void EvalState::throwEvalError(const PosIdx pos, const char * s, const std::stri
const std::string & s3, Env & env, Expr & expr) const std::string & s3, Env & env, Expr & expr)
{ {
debugThrow(EvalError({ debugThrow(EvalError({
.msg = hintfmt(s, s2), .msg = hintfmt(s, s2, s3),
.errPos = positions[pos] .errPos = positions[pos]
}), env, expr); }), env, expr);
} }
@ -2441,97 +2439,97 @@ void EvalState::printStats()
std::fstream fs; std::fstream fs;
if (outPath != "-") if (outPath != "-")
fs.open(outPath, std::fstream::out); fs.open(outPath, std::fstream::out);
JSONObject topObj(outPath == "-" ? std::cerr : fs, true); json topObj = json::object();
topObj.attr("cpuTime",cpuTime); topObj["cpuTime"] = cpuTime;
{ topObj["envs"] = {
auto envs = topObj.object("envs"); {"number", nrEnvs},
envs.attr("number", nrEnvs); {"elements", nrValuesInEnvs},
envs.attr("elements", nrValuesInEnvs); {"bytes", bEnvs},
envs.attr("bytes", bEnvs); };
} topObj["list"] = {
{ {"elements", nrListElems},
auto lists = topObj.object("list"); {"bytes", bLists},
lists.attr("elements", nrListElems); {"concats", nrListConcats},
lists.attr("bytes", bLists); };
lists.attr("concats", nrListConcats); topObj["values"] = {
} {"number", nrValues},
{ {"bytes", bValues},
auto values = topObj.object("values"); };
values.attr("number", nrValues); topObj["symbols"] = {
values.attr("bytes", bValues); {"number", symbols.size()},
} {"bytes", symbols.totalSize()},
{ };
auto syms = topObj.object("symbols"); topObj["sets"] = {
syms.attr("number", symbols.size()); {"number", nrAttrsets},
syms.attr("bytes", symbols.totalSize()); {"bytes", bAttrsets},
} {"elements", nrAttrsInAttrsets},
{ };
auto sets = topObj.object("sets"); topObj["sizes"] = {
sets.attr("number", nrAttrsets); {"Env", sizeof(Env)},
sets.attr("bytes", bAttrsets); {"Value", sizeof(Value)},
sets.attr("elements", nrAttrsInAttrsets); {"Bindings", sizeof(Bindings)},
} {"Attr", sizeof(Attr)},
{ };
auto sizes = topObj.object("sizes"); topObj["nrOpUpdates"] = nrOpUpdates;
sizes.attr("Env", sizeof(Env)); topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied;
sizes.attr("Value", sizeof(Value)); topObj["nrThunks"] = nrThunks;
sizes.attr("Bindings", sizeof(Bindings)); topObj["nrAvoided"] = nrAvoided;
sizes.attr("Attr", sizeof(Attr)); topObj["nrLookups"] = nrLookups;
} topObj["nrPrimOpCalls"] = nrPrimOpCalls;
topObj.attr("nrOpUpdates", nrOpUpdates); topObj["nrFunctionCalls"] = nrFunctionCalls;
topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied);
topObj.attr("nrThunks", nrThunks);
topObj.attr("nrAvoided", nrAvoided);
topObj.attr("nrLookups", nrLookups);
topObj.attr("nrPrimOpCalls", nrPrimOpCalls);
topObj.attr("nrFunctionCalls", nrFunctionCalls);
#if HAVE_BOEHMGC #if HAVE_BOEHMGC
{ topObj["gc"] = {
auto gc = topObj.object("gc"); {"heapSize", heapSize},
gc.attr("heapSize", heapSize); {"totalBytes", totalBytes},
gc.attr("totalBytes", totalBytes); };
}
#endif #endif
if (countCalls) { if (countCalls) {
topObj["primops"] = primOpCalls;
{ {
auto obj = topObj.object("primops"); auto& list = topObj["functions"];
for (auto & i : primOpCalls) list = json::array();
obj.attr(i.first, i.second);
}
{
auto list = topObj.list("functions");
for (auto & [fun, count] : functionCalls) { for (auto & [fun, count] : functionCalls) {
auto obj = list.object(); json obj = json::object();
if (fun->name) if (fun->name)
obj.attr("name", (std::string_view) symbols[fun->name]); obj["name"] = (std::string_view) symbols[fun->name];
else else
obj.attr("name", nullptr); obj["name"] = nullptr;
if (auto pos = positions[fun->pos]) { if (auto pos = positions[fun->pos]) {
obj.attr("file", (std::string_view) pos.file); obj["file"] = (std::string_view) pos.file;
obj.attr("line", pos.line); obj["line"] = pos.line;
obj.attr("column", pos.column); obj["column"] = pos.column;
} }
obj.attr("count", count); obj["count"] = count;
list.push_back(obj);
} }
} }
{ {
auto list = topObj.list("attributes"); auto list = topObj["attributes"];
list = json::array();
for (auto & i : attrSelects) { for (auto & i : attrSelects) {
auto obj = list.object(); json obj = json::object();
if (auto pos = positions[i.first]) { if (auto pos = positions[i.first]) {
obj.attr("file", (const std::string &) pos.file); obj["file"] = (const std::string &) pos.file;
obj.attr("line", pos.line); obj["line"] = pos.line;
obj.attr("column", pos.column); obj["column"] = pos.column;
} }
obj.attr("count", i.second); obj["count"] = i.second;
list.push_back(obj);
} }
} }
} }
if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") { if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") {
auto list = topObj.list("symbols"); // XXX: overrides earlier assignment
symbols.dump([&](const std::string & s) { list.elem(s); }); topObj["symbols"] = json::array();
auto &list = topObj["symbols"];
symbols.dump([&](const std::string & s) { list.emplace_back(s); });
}
if (outPath == "-") {
std::cerr << topObj.dump(2) << std::endl;
} else {
fs << topObj.dump(2) << std::endl;
} }
} }
} }

View file

@ -43,7 +43,7 @@ let
outputs = flake.outputs (inputs // { self = result; }); outputs = flake.outputs (inputs // { self = result; });
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; }; result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; };
in in
if node.flake or true then if node.flake or true then
assert builtins.isFunction flake.outputs; assert builtins.isFunction flake.outputs;

View file

@ -5,14 +5,15 @@
#include "globals.hh" #include "globals.hh"
#include "json-to-value.hh" #include "json-to-value.hh"
#include "names.hh" #include "names.hh"
#include "references.hh"
#include "store-api.hh" #include "store-api.hh"
#include "util.hh" #include "util.hh"
#include "json.hh"
#include "value-to-json.hh" #include "value-to-json.hh"
#include "value-to-xml.hh" #include "value-to-xml.hh"
#include "primops.hh" #include "primops.hh"
#include <boost/container/small_vector.hpp> #include <boost/container/small_vector.hpp>
#include <nlohmann/json.hpp>
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
@ -1010,6 +1011,7 @@ static void prim_second(EvalState & state, const PosIdx pos, Value * * args, Val
derivation. */ derivation. */
static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * * args, Value & v) static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{ {
using nlohmann::json;
state.forceAttrs(*args[0], pos); state.forceAttrs(*args[0], pos);
/* Figure out the name first (for stack backtraces). */ /* Figure out the name first (for stack backtraces). */
@ -1031,11 +1033,10 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
} }
/* Check whether attributes should be passed as a JSON file. */ /* Check whether attributes should be passed as a JSON file. */
std::ostringstream jsonBuf; std::optional<json> jsonObject;
std::unique_ptr<JSONObject> jsonObject;
attr = args[0]->attrs->find(state.sStructuredAttrs); attr = args[0]->attrs->find(state.sStructuredAttrs);
if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos)) if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos))
jsonObject = std::make_unique<JSONObject>(jsonBuf); jsonObject = json::object();
/* Check whether null attributes should be ignored. */ /* Check whether null attributes should be ignored. */
bool ignoreNulls = false; bool ignoreNulls = false;
@ -1137,8 +1138,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
if (i->name == state.sStructuredAttrs) continue; if (i->name == state.sStructuredAttrs) continue;
auto placeholder(jsonObject->placeholder(key)); (*jsonObject)[key] = printValueAsJSON(state, true, *i->value, pos, context);
printValueAsJSON(state, true, *i->value, pos, placeholder, context);
if (i->name == state.sBuilder) if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName); drv.builder = state.forceString(*i->value, context, posDrvName);
@ -1182,8 +1182,8 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
} }
if (jsonObject) { if (jsonObject) {
drv.env.emplace("__json", jsonObject->dump());
jsonObject.reset(); jsonObject.reset();
drv.env.emplace("__json", jsonBuf.str());
} }
/* Everything in the context of the strings in the derivation /* Everything in the context of the strings in the derivation
@ -1542,6 +1542,10 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, V
refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references; refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references;
} catch (Error &) { // FIXME: should be InvalidPathError } catch (Error &) { // FIXME: should be InvalidPathError
} }
// Re-scan references to filter down to just the ones that actually occur in the file.
auto refsSink = PathRefScanSink::fromPaths(refs);
refsSink << s;
refs = refsSink.getResultPaths();
} }
auto context = state.store->printStorePathSet(refs); auto context = state.store->printStorePathSet(refs);
v.mkString(s, context); v.mkString(s, context);

View file

@ -1,84 +1,82 @@
#include "value-to-json.hh" #include "value-to-json.hh"
#include "json.hh"
#include "eval-inline.hh" #include "eval-inline.hh"
#include "util.hh" #include "util.hh"
#include <cstdlib> #include <cstdlib>
#include <iomanip> #include <iomanip>
#include <nlohmann/json.hpp>
namespace nix { namespace nix {
using json = nlohmann::json;
void printValueAsJSON(EvalState & state, bool strict, json printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore) Value & v, const PosIdx pos, PathSet & context, bool copyToStore)
{ {
checkInterrupt(); checkInterrupt();
if (strict) state.forceValue(v, pos); if (strict) state.forceValue(v, pos);
json out;
switch (v.type()) { switch (v.type()) {
case nInt: case nInt:
out.write(v.integer); out = v.integer;
break; break;
case nBool: case nBool:
out.write(v.boolean); out = v.boolean;
break; break;
case nString: case nString:
copyContext(v, context); copyContext(v, context);
out.write(v.string.s); out = v.string.s;
break; break;
case nPath: case nPath:
if (copyToStore) if (copyToStore)
out.write(state.copyPathToStore(context, v.path)); out = state.copyPathToStore(context, v.path);
else else
out.write(v.path); out = v.path;
break; break;
case nNull: case nNull:
out.write(nullptr);
break; break;
case nAttrs: { case nAttrs: {
auto maybeString = state.tryAttrsToString(pos, v, context, false, false); auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) { if (maybeString) {
out.write(*maybeString); out = *maybeString;
break; break;
} }
auto i = v.attrs->find(state.sOutPath); auto i = v.attrs->find(state.sOutPath);
if (i == v.attrs->end()) { if (i == v.attrs->end()) {
auto obj(out.object()); out = json::object();
StringSet names; StringSet names;
for (auto & j : *v.attrs) for (auto & j : *v.attrs)
names.emplace(state.symbols[j.name]); names.emplace(state.symbols[j.name]);
for (auto & j : names) { for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j))); Attr & a(*v.attrs->find(state.symbols.create(j)));
auto placeholder(obj.placeholder(j)); out[j] = printValueAsJSON(state, strict, *a.value, a.pos, context, copyToStore);
printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context, copyToStore);
} }
} else } else
printValueAsJSON(state, strict, *i->value, i->pos, out, context, copyToStore); return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore);
break; break;
} }
case nList: { case nList: {
auto list(out.list()); out = json::array();
for (auto elem : v.listItems()) { for (auto elem : v.listItems())
auto placeholder(list.placeholder()); out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore));
printValueAsJSON(state, strict, *elem, pos, placeholder, context, copyToStore);
}
break; break;
} }
case nExternal: case nExternal:
v.external->printValueAsJSON(state, strict, out, context, copyToStore); return v.external->printValueAsJSON(state, strict, context, copyToStore);
break; break;
case nFloat: case nFloat:
out.write(v.fpoint); out = v.fpoint;
break; break;
case nThunk: case nThunk:
@ -91,17 +89,17 @@ void printValueAsJSON(EvalState & state, bool strict,
state.debugThrowLastTrace(e); state.debugThrowLastTrace(e);
throw e; throw e;
} }
return out;
} }
void printValueAsJSON(EvalState & state, bool strict, void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore) Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
{ {
JSONPlaceholder out(str); str << printValueAsJSON(state, strict, v, pos, context, copyToStore);
printValueAsJSON(state, strict, v, pos, out, context, copyToStore);
} }
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict, json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
JSONPlaceholder & out, PathSet & context, bool copyToStore) const PathSet & context, bool copyToStore) const
{ {
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType())); state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
} }

View file

@ -5,13 +5,12 @@
#include <string> #include <string>
#include <map> #include <map>
#include <nlohmann/json_fwd.hpp>
namespace nix { namespace nix {
class JSONPlaceholder; nlohmann::json printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict, void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true); Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);

View file

@ -7,6 +7,7 @@
#if HAVE_BOEHMGC #if HAVE_BOEHMGC
#include <gc/gc_allocator.h> #include <gc/gc_allocator.h>
#endif #endif
#include <nlohmann/json_fwd.hpp>
namespace nix { namespace nix {
@ -62,7 +63,6 @@ class StorePath;
class Store; class Store;
class EvalState; class EvalState;
class XMLWriter; class XMLWriter;
class JSONPlaceholder;
typedef int64_t NixInt; typedef int64_t NixInt;
@ -98,8 +98,8 @@ class ExternalValueBase
virtual bool operator ==(const ExternalValueBase & b) const; virtual bool operator ==(const ExternalValueBase & b) const;
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */ /* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
virtual void printValueAsJSON(EvalState & state, bool strict, virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict,
JSONPlaceholder & out, PathSet & context, bool copyToStore = true) const; PathSet & context, bool copyToStore = true) const;
/* Print the value as XML. Defaults to unevaluated */ /* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location, virtual void printValueAsXML(EvalState & state, bool strict, bool location,

View file

@ -486,6 +486,10 @@ struct GitInputScheme : InputScheme
} }
input.attrs.insert_or_assign("ref", *head); input.attrs.insert_or_assign("ref", *head);
unlockedAttrs.insert_or_assign("ref", *head); unlockedAttrs.insert_or_assign("ref", *head);
} else {
if (!input.getRev()) {
unlockedAttrs.insert_or_assign("ref", input.getRef().value());
}
} }
if (auto res = getCache()->lookup(store, unlockedAttrs)) { if (auto res = getCache()->lookup(store, unlockedAttrs)) {

View file

@ -262,17 +262,20 @@ struct GitHubInputScheme : GitArchiveInputScheme
DownloadUrl getDownloadUrl(const Input & input) const override DownloadUrl getDownloadUrl(const Input & input) const override
{ {
// FIXME: use regular /archive URLs instead? api.github.com
// might have stricter rate limits.
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com"); auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto url = fmt( Headers headers = makeHeadersWithAuthTokens(host);
host == "github.com" // If we have no auth headers then we default to the public archive
? "https://api.%s/repos/%s/%s/tarball/%s" // urls so we do not run into rate limits.
: "https://%s/api/v3/repos/%s/%s/tarball/%s", const auto urlFmt =
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"), host != "github.com"
? "https://%s/api/v3/repos/%s/%s/tarball/%s"
: headers.empty()
? "https://%s/%s/%s/archive/%s.tar.gz"
: "https://api.%s/repos/%s/%s/tarball/%s";
const auto url = fmt(urlFmt, host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
input.getRev()->to_string(Base16, false)); input.getRev()->to_string(Base16, false));
Headers headers = makeHeadersWithAuthTokens(host);
return DownloadUrl { url, headers }; return DownloadUrl { url, headers };
} }

View file

@ -33,6 +33,7 @@
namespace nix { namespace nix {
char * * savedArgv;
static bool gcWarning = true; static bool gcWarning = true;

View file

@ -9,7 +9,6 @@
#include "remote-fs-accessor.hh" #include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh" #include "nar-info-disk-cache.hh"
#include "nar-accessor.hh" #include "nar-accessor.hh"
#include "json.hh"
#include "thread-pool.hh" #include "thread-pool.hh"
#include "callback.hh" #include "callback.hh"
@ -194,19 +193,12 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Optionally write a JSON file containing a listing of the /* Optionally write a JSON file containing a listing of the
contents of the NAR. */ contents of the NAR. */
if (writeNARListing) { if (writeNARListing) {
std::ostringstream jsonOut; nlohmann::json j = {
{"version", 1},
{"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
};
{ upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
JSONObject jsonRoot(jsonOut);
jsonRoot.attr("version", 1);
{
auto res = jsonRoot.placeholder("root");
listNar(res, ref<FSAccessor>(narAccessor), "", true);
}
}
upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
} }
/* Optionally maintain an index of DWARF debug info files /* Optionally maintain an index of DWARF debug info files

View file

@ -5,7 +5,7 @@
#include <string> #include <string>
#include <chrono> #include <chrono>
#include <optional>
namespace nix { namespace nix {
@ -78,6 +78,9 @@ struct BuildResult
was repeated). */ was repeated). */
time_t startTime = 0, stopTime = 0; time_t startTime = 0, stopTime = 0;
/* User and system CPU time the build took. */
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
bool success() bool success()
{ {
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid; return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;

View file

@ -7,7 +7,6 @@
#include "finally.hh" #include "finally.hh"
#include "util.hh" #include "util.hh"
#include "archive.hh" #include "archive.hh"
#include "json.hh"
#include "compression.hh" #include "compression.hh"
#include "worker-protocol.hh" #include "worker-protocol.hh"
#include "topo-sort.hh" #include "topo-sort.hh"
@ -528,13 +527,31 @@ void DerivationGoal::inputsRealised()
/* Add the relevant output closures of the input derivation /* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths `i' as input paths. Only add the closures of output paths
that are specified as inputs. */ that are specified as inputs. */
for (auto & j : wantedDepOutputs) for (auto & j : wantedDepOutputs) {
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j })) /* TODO (impure derivations-induced tech debt):
Tracking input derivation outputs statefully through the
goals is error prone and has led to bugs.
For a robust nix, we need to move towards the `else` branch,
which does not rely on goal state to match up with the
reality of the store, which is our real source of truth.
However, the impure derivations feature still relies on this
fragile way of doing things, because its builds do not have
a representation in the store, which is a usability problem
in itself */
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j })) {
worker.store.computeFSClosure(*outPath, inputPaths); worker.store.computeFSClosure(*outPath, inputPaths);
else }
throw Error( else {
"derivation '%s' requires non-existent output '%s' from input derivation '%s'", auto outMap = worker.evalStore.queryDerivationOutputMap(depDrvPath);
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath)); auto outMapPath = outMap.find(j);
if (outMapPath == outMap.end()) {
throw Error(
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
}
worker.store.computeFSClosure(outMapPath->second, inputPaths);
}
}
} }
} }
@ -869,6 +886,14 @@ void DerivationGoal::buildDone()
cleanupPostChildKill(); cleanupPostChildKill();
if (buildResult.cpuUser && buildResult.cpuSystem) {
debug("builder for '%s' terminated with status %d, user CPU %.3fs, system CPU %.3fs",
worker.store.printStorePath(drvPath),
status,
((double) buildResult.cpuUser->count()) / 1000000,
((double) buildResult.cpuSystem->count()) / 1000000);
}
bool diskFull = false; bool diskFull = false;
try { try {

View file

@ -16,11 +16,11 @@ HookInstance::HookInstance()
buildHookArgs.pop_front(); buildHookArgs.pop_front();
Strings args; Strings args;
args.push_back(std::string(baseNameOf(buildHook)));
for (auto & arg : buildHookArgs) for (auto & arg : buildHookArgs)
args.push_back(arg); args.push_back(arg);
args.push_back(std::string(baseNameOf(settings.buildHook.get())));
args.push_back(std::to_string(verbosity)); args.push_back(std::to_string(verbosity));
/* Create a pipe to get the output of the child. */ /* Create a pipe to get the output of the child. */

View file

@ -8,13 +8,13 @@
#include "finally.hh" #include "finally.hh"
#include "util.hh" #include "util.hh"
#include "archive.hh" #include "archive.hh"
#include "json.hh"
#include "compression.hh" #include "compression.hh"
#include "daemon.hh" #include "daemon.hh"
#include "worker-protocol.hh" #include "worker-protocol.hh"
#include "topo-sort.hh" #include "topo-sort.hh"
#include "callback.hh" #include "callback.hh"
#include "json-utils.hh" #include "json-utils.hh"
#include "cgroup.hh"
#include <regex> #include <regex>
#include <queue> #include <queue>
@ -56,6 +56,7 @@
#include <pwd.h> #include <pwd.h>
#include <grp.h> #include <grp.h>
#include <iostream>
namespace nix { namespace nix {
@ -129,26 +130,44 @@ void LocalDerivationGoal::killChild()
if (pid != -1) { if (pid != -1) {
worker.childTerminated(this); worker.childTerminated(this);
if (buildUser) { /* If we're using a build user, then there is a tricky race
/* If we're using a build user, then there is a tricky condition: if we kill the build user before the child has
race condition: if we kill the build user before the done its setuid() to the build user uid, then it won't be
child has done its setuid() to the build user uid, then killed, and we'll potentially lock up in pid.wait(). So
it won't be killed, and we'll potentially lock up in also send a conventional kill to the child. */
pid.wait(). So also send a conventional kill to the ::kill(-pid, SIGKILL); /* ignore the result */
child. */
::kill(-pid, SIGKILL); /* ignore the result */
buildUser->kill();
pid.wait();
} else
pid.kill();
assert(pid == -1); killSandbox(true);
pid.wait();
} }
DerivationGoal::killChild(); DerivationGoal::killChild();
} }
void LocalDerivationGoal::killSandbox(bool getStats)
{
if (cgroup) {
#if __linux__
auto stats = destroyCgroup(*cgroup);
if (getStats) {
buildResult.cpuUser = stats.cpuUser;
buildResult.cpuSystem = stats.cpuSystem;
}
#else
abort();
#endif
}
else if (buildUser) {
auto uid = buildUser->getUID();
assert(uid != 0);
killUser(uid);
}
}
void LocalDerivationGoal::tryLocalBuild() { void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds(); unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) { if (curBuilds >= settings.maxBuildJobs) {
@ -158,28 +177,46 @@ void LocalDerivationGoal::tryLocalBuild() {
return; return;
} }
/* If `build-users-group' is not empty, then we have to build as /* Are we doing a chroot build? */
one of the members of that group. */ {
if (settings.buildUsersGroup != "" && getuid() == 0) { auto noChroot = parsedDrv->getBoolAttr("__noChroot");
#if defined(__linux__) || defined(__APPLE__) if (settings.sandboxMode == smEnabled) {
if (!buildUser) buildUser = std::make_unique<UserLock>(); if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
if (buildUser->findFreeUser()) { auto & localStore = getLocalStore();
/* Make sure that no other processes are executing under this if (localStore.storeDir != localStore.realStoreDir.get()) {
uid. */ #if __linux__
buildUser->kill(); useChroot = true;
} else { #else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
if (useBuildUsers()) {
if (!buildUser)
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
if (!buildUser) {
if (!actLock) if (!actLock)
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting, actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath)))); fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
worker.waitForAWhile(shared_from_this()); worker.waitForAWhile(shared_from_this());
return; return;
} }
#else
/* Don't know how to block the creation of setuid/setgid
binaries on this platform. */
throw Error("build users are not supported on this platform for security reasons");
#endif
} }
actLock.reset(); actLock.reset();
@ -270,7 +307,7 @@ void LocalDerivationGoal::cleanupPostChildKill()
malicious user from leaving behind a process that keeps files malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown'ed to open and modifies them after they have been chown'ed to
root. */ root. */
if (buildUser) buildUser->kill(); killSandbox(true);
/* Terminate the recursive Nix daemon. */ /* Terminate the recursive Nix daemon. */
stopDaemon(); stopDaemon();
@ -363,6 +400,60 @@ static void linkOrCopy(const Path & from, const Path & to)
void LocalDerivationGoal::startBuilder() void LocalDerivationGoal::startBuilder()
{ {
if ((buildUser && buildUser->getUIDCount() != 1)
#if __linux__
|| settings.useCgroups
#endif
)
{
#if __linux__
settings.requireExperimentalFeature(Xp::Cgroups);
auto ourCgroups = getCgroups("/proc/self/cgroup");
auto ourCgroup = ourCgroups[""];
if (ourCgroup == "")
throw Error("cannot determine cgroup name from /proc/self/cgroup");
auto ourCgroupPath = canonPath("/sys/fs/cgroup/" + ourCgroup);
if (!pathExists(ourCgroupPath))
throw Error("expected cgroup directory '%s'", ourCgroupPath);
static std::atomic<unsigned int> counter{0};
cgroup = buildUser
? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
: fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
debug("using cgroup '%s'", *cgroup);
/* When using a build user, record the cgroup we used for that
user so that if we got interrupted previously, we can kill
any left-over cgroup first. */
if (buildUser) {
auto cgroupsDir = settings.nixStateDir + "/cgroups";
createDirs(cgroupsDir);
auto cgroupFile = fmt("%s/%d", cgroupsDir, buildUser->getUID());
if (pathExists(cgroupFile)) {
auto prevCgroup = readFile(cgroupFile);
destroyCgroup(prevCgroup);
}
writeFile(cgroupFile, *cgroup);
}
#else
throw Error("cgroups are not supported on this platform");
#endif
}
/* Make sure that no other processes are executing under the
sandbox uids. This must be done before any chownToBuilder()
calls. */
killSandbox(false);
/* Right platform? */ /* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store)) if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}", throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
@ -376,35 +467,6 @@ void LocalDerivationGoal::startBuilder()
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or(""); additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif #endif
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
/* Create a temporary directory where the build will take /* Create a temporary directory where the build will take
place. */ place. */
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700); tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
@ -580,10 +642,11 @@ void LocalDerivationGoal::startBuilder()
printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir); printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
if (mkdir(chrootRootDir.c_str(), 0750) == -1) // FIXME: make this 0700
if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
throw SysError("cannot create '%1%'", chrootRootDir); throw SysError("cannot create '%1%'", chrootRootDir);
if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1) if (buildUser && chown(chrootRootDir.c_str(), buildUser->getUIDCount() != 1 ? buildUser->getUID() : 0, buildUser->getGID()) == -1)
throw SysError("cannot change ownership of '%1%'", chrootRootDir); throw SysError("cannot change ownership of '%1%'", chrootRootDir);
/* Create a writable /tmp in the chroot. Many builders need /* Create a writable /tmp in the chroot. Many builders need
@ -597,6 +660,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */ Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc"); createDirs(chrootRootDir + "/etc");
chownToBuilder(chrootRootDir + "/etc");
if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent /* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */ view of the system (e.g., "id -gn"). */
@ -647,12 +714,28 @@ void LocalDerivationGoal::startBuilder()
dirsInChroot.erase(worker.store.printStorePath(*i.second.second)); dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
} }
#elif __APPLE__ if (cgroup) {
/* We don't really have any parent prep work to do (yet?) if (mkdir(cgroup->c_str(), 0755) != 0)
All work happens in the child, instead. */ throw SysError("creating cgroup '%s'", *cgroup);
chownToBuilder(*cgroup);
chownToBuilder(*cgroup + "/cgroup.procs");
chownToBuilder(*cgroup + "/cgroup.threads");
//chownToBuilder(*cgroup + "/cgroup.subtree_control");
}
#else #else
throw Error("sandboxing builds is not supported on this platform"); if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is not supported on this platform");
#if __APPLE__
/* We don't really have any parent prep work to do (yet?)
All work happens in the child, instead. */
#else
throw Error("sandboxing builds is not supported on this platform");
#endif
#endif #endif
} else {
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is only supported in sandboxed builds");
} }
if (needsHashRewrite() && pathExists(homeDir)) if (needsHashRewrite() && pathExists(homeDir))
@ -913,14 +996,16 @@ void LocalDerivationGoal::startBuilder()
the calling user (if build users are disabled). */ the calling user (if build users are disabled). */
uid_t hostUid = buildUser ? buildUser->getUID() : getuid(); uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
uid_t hostGid = buildUser ? buildUser->getGID() : getgid(); uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
uid_t nrIds = buildUser ? buildUser->getUIDCount() : 1;
writeFile("/proc/" + std::to_string(pid) + "/uid_map", writeFile("/proc/" + std::to_string(pid) + "/uid_map",
fmt("%d %d 1", sandboxUid(), hostUid)); fmt("%d %d %d", sandboxUid(), hostUid, nrIds));
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny"); if (!buildUser || buildUser->getUIDCount() == 1)
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map", writeFile("/proc/" + std::to_string(pid) + "/gid_map",
fmt("%d %d 1", sandboxGid(), hostGid)); fmt("%d %d %d", sandboxGid(), hostGid, nrIds));
} else { } else {
debug("note: not using a user namespace"); debug("note: not using a user namespace");
if (!buildUser) if (!buildUser)
@ -947,6 +1032,10 @@ void LocalDerivationGoal::startBuilder()
throw SysError("getting sandbox user namespace"); throw SysError("getting sandbox user namespace");
} }
/* Move the child into its own cgroup. */
if (cgroup)
writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid));
/* Signal the builder that we've updated its user namespace. */ /* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1"); writeFull(userNamespaceSync.writeSide.get(), "1");
@ -1552,6 +1641,22 @@ void setupSeccomp()
seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0) seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0)
printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes"); printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS) != 0)
printError("unable to add mips seccomp architecture");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS64N32) != 0)
printError("unable to add mips64-*abin32 seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL) != 0)
printError("unable to add mipsel seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL64N32) != 0)
printError("unable to add mips64el-*abin32 seccomp architecture");
/* Prevent builders from creating setuid/setgid binaries. */ /* Prevent builders from creating setuid/setgid binaries. */
for (int perm : { S_ISUID, S_ISGID }) { for (int perm : { S_ISUID, S_ISGID }) {
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1, if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1,
@ -1763,6 +1868,13 @@ void LocalDerivationGoal::runChild()
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1) if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
throw SysError("mounting /proc"); throw SysError("mounting /proc");
/* Mount sysfs on /sys. */
if (buildUser && buildUser->getUIDCount() != 1) {
createDirs(chrootRootDir + "/sys");
if (mount("none", (chrootRootDir + "/sys").c_str(), "sysfs", 0, 0) == -1)
throw SysError("mounting /sys");
}
/* Mount a new tmpfs on /dev/shm to ensure that whatever /* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in /dev/shm is cleaned up automatically. */ the builder puts in /dev/shm is cleaned up automatically. */
if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0, if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
@ -1805,6 +1917,12 @@ void LocalDerivationGoal::runChild()
if (unshare(CLONE_NEWNS) == -1) if (unshare(CLONE_NEWNS) == -1)
throw SysError("unsharing mount namespace"); throw SysError("unsharing mount namespace");
/* Unshare the cgroup namespace. This means
/proc/self/cgroup will show the child's cgroup as '/'
rather than whatever it is in the parent. */
if (cgroup && unshare(CLONE_NEWCGROUP) == -1)
throw SysError("unsharing cgroup namespace");
/* Do the chroot(). */ /* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1) if (chdir(chrootRootDir.c_str()) == -1)
throw SysError("cannot change directory to '%1%'", chrootRootDir); throw SysError("cannot change directory to '%1%'", chrootRootDir);
@ -1890,9 +2008,8 @@ void LocalDerivationGoal::runChild()
if (setUser && buildUser) { if (setUser && buildUser) {
/* Preserve supplementary groups of the build user, to allow /* Preserve supplementary groups of the build user, to allow
admins to specify groups such as "kvm". */ admins to specify groups such as "kvm". */
if (!buildUser->getSupplementaryGIDs().empty() && auto gids = buildUser->getSupplementaryGIDs();
setgroups(buildUser->getSupplementaryGIDs().size(), if (setgroups(gids.size(), gids.data()) == -1)
buildUser->getSupplementaryGIDs().data()) == -1)
throw SysError("cannot set supplementary groups of build user"); throw SysError("cannot set supplementary groups of build user");
if (setgid(buildUser->getGID()) == -1 || if (setgid(buildUser->getGID()) == -1 ||
@ -2221,7 +2338,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Canonicalise first. This ensures that the path we're /* Canonicalise first. This ensures that the path we're
rewriting doesn't contain a hard link to /etc/shadow or rewriting doesn't contain a hard link to /etc/shadow or
something like that. */ something like that. */
canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen); canonicalisePathMetaData(
actualPath,
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
inodesSeen);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath); debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
@ -2314,6 +2434,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
sink.s = rewriteStrings(sink.s, outputRewrites); sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s); StringSource source(sink.s);
restorePath(actualPath, source); restorePath(actualPath, source);
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, {}, inodesSeen);
} }
}; };
@ -2476,7 +2600,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* FIXME: set proper permissions in restorePath() so /* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */ we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, -1, inodesSeen); canonicalisePathMetaData(actualPath, {}, inodesSeen);
/* Calculate where we'll move the output files. In the checking case we /* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to will leave leave them where they are, for now, rather than move to

View file

@ -15,6 +15,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* The process ID of the builder. */ /* The process ID of the builder. */
Pid pid; Pid pid;
/* The cgroup of the builder, if any. */
std::optional<Path> cgroup;
/* The temporary directory. */ /* The temporary directory. */
Path tmpDir; Path tmpDir;
@ -92,8 +95,8 @@ struct LocalDerivationGoal : public DerivationGoal
result. */ result. */
std::map<Path, ValidPathInfo> prevInfos; std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); } uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); } gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir; const static Path homeDir;
@ -197,6 +200,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Forcibly kill the child process, if any. */ /* Forcibly kill the child process, if any. */
void killChild() override; void killChild() override;
/* Kill any processes running under the build user UID or in the
cgroup of the build. */
void killSandbox(bool getStats);
/* Create alternative path calculated from but distinct from the /* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths) input, so we can avoid overwriting outputs (or other store paths)
that already exist. */ that already exist. */

131
src/libstore/cgroup.cc Normal file
View file

@ -0,0 +1,131 @@
#if __linux__
#include "cgroup.hh"
#include "util.hh"
#include <chrono>
#include <cmath>
#include <regex>
#include <unordered_set>
#include <thread>
#include <dirent.h>
namespace nix {
// FIXME: obsolete, check for cgroup2
std::map<std::string, std::string> getCgroups(const Path & cgroupFile)
{
std::map<std::string, std::string> cgroups;
for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cgroupFile), "\n")) {
static std::regex regex("([0-9]+):([^:]*):(.*)");
std::smatch match;
if (!std::regex_match(line, match, regex))
throw Error("invalid line '%s' in '%s'", line, cgroupFile);
std::string name = hasPrefix(std::string(match[2]), "name=") ? std::string(match[2], 5) : match[2];
cgroups.insert_or_assign(name, match[3]);
}
return cgroups;
}
static CgroupStats destroyCgroup(const Path & cgroup, bool returnStats)
{
if (!pathExists(cgroup)) return {};
auto procsFile = cgroup + "/cgroup.procs";
if (!pathExists(procsFile))
throw Error("'%s' is not a cgroup", cgroup);
/* Use the fast way to kill every process in a cgroup, if
available. */
auto killFile = cgroup + "/cgroup.kill";
if (pathExists(killFile))
writeFile(killFile, "1");
/* Otherwise, manually kill every process in the subcgroups and
this cgroup. */
for (auto & entry : readDirectory(cgroup)) {
if (entry.type != DT_DIR) continue;
destroyCgroup(cgroup + "/" + entry.name, false);
}
int round = 1;
std::unordered_set<pid_t> pidsShown;
while (true) {
auto pids = tokenizeString<std::vector<std::string>>(readFile(procsFile));
if (pids.empty()) break;
if (round > 20)
throw Error("cannot kill cgroup '%s'", cgroup);
for (auto & pid_s : pids) {
pid_t pid;
if (auto o = string2Int<pid_t>(pid_s))
pid = *o;
else
throw Error("invalid pid '%s'", pid);
if (pidsShown.insert(pid).second) {
try {
auto cmdline = readFile(fmt("/proc/%d/cmdline", pid));
using namespace std::string_literals;
warn("killing stray builder process %d (%s)...",
pid, trim(replaceStrings(cmdline, "\0"s, " ")));
} catch (SysError &) {
}
}
// FIXME: pid wraparound
if (kill(pid, SIGKILL) == -1 && errno != ESRCH)
throw SysError("killing member %d of cgroup '%s'", pid, cgroup);
}
auto sleep = std::chrono::milliseconds((int) std::pow(2.0, std::min(round, 10)));
if (sleep.count() > 100)
printError("waiting for %d ms for cgroup '%s' to become empty", sleep.count(), cgroup);
std::this_thread::sleep_for(sleep);
round++;
}
CgroupStats stats;
if (returnStats) {
auto cpustatPath = cgroup + "/cpu.stat";
if (pathExists(cpustatPath)) {
for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cpustatPath), "\n")) {
std::string_view userPrefix = "user_usec ";
if (hasPrefix(line, userPrefix)) {
auto n = string2Int<uint64_t>(line.substr(userPrefix.size()));
if (n) stats.cpuUser = std::chrono::microseconds(*n);
}
std::string_view systemPrefix = "system_usec ";
if (hasPrefix(line, systemPrefix)) {
auto n = string2Int<uint64_t>(line.substr(systemPrefix.size()));
if (n) stats.cpuSystem = std::chrono::microseconds(*n);
}
}
}
}
if (rmdir(cgroup.c_str()) == -1)
throw SysError("deleting cgroup '%s'", cgroup);
return stats;
}
CgroupStats destroyCgroup(const Path & cgroup)
{
return destroyCgroup(cgroup, true);
}
}
#endif

27
src/libstore/cgroup.hh Normal file
View file

@ -0,0 +1,27 @@
#pragma once
#if __linux__
#include <chrono>
#include <optional>
#include "types.hh"
namespace nix {
std::map<std::string, std::string> getCgroups(const Path & cgroupFile);
struct CgroupStats
{
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
};
/* Destroy the cgroup denoted by 'path'. The postcondition is that
'path' does not exist, and thus any processes in the cgroup have
been killed. Also return statistics from the cgroup just before
destruction. */
CgroupStats destroyCgroup(const Path & cgroup);
}
#endif

View file

@ -53,28 +53,13 @@ StorePathSet BuiltPath::outPaths() const
); );
} }
template<typename T> std::string DerivedPath::Opaque::to_string(const Store & store) const
nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) { {
auto res = nlohmann::json::array();
for (const T & t : ts) {
std::visit([&res, store](const auto & t) {
res.push_back(t.toJSON(store));
}, t.raw());
}
return res;
}
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
{ return stuffToJSON<BuiltPath>(buildables, store); }
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
{ return stuffToJSON<DerivedPath>(paths, store); }
std::string DerivedPath::Opaque::to_string(const Store & store) const {
return store.printStorePath(path); return store.printStorePath(path);
} }
std::string DerivedPath::Built::to_string(const Store & store) const { std::string DerivedPath::Built::to_string(const Store & store) const
{
return store.printStorePath(drvPath) return store.printStorePath(drvPath)
+ "!" + "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs)); + (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));

View file

@ -125,7 +125,4 @@ struct BuiltPath : _BuiltPathRaw {
typedef std::vector<DerivedPath> DerivedPaths; typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths; typedef std::vector<BuiltPath> BuiltPaths;
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
} }

View file

@ -147,7 +147,7 @@ void LocalStore::addTempRoot(const StorePath & path)
} catch (SysError & e) { } catch (SysError & e) {
/* The garbage collector may have exited, so we need to /* The garbage collector may have exited, so we need to
restart. */ restart. */
if (e.errNo == EPIPE) { if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected"); debug("GC socket disconnected");
state->fdRootsSocket.close(); state->fdRootsSocket.close();
goto restart; goto restart;
@ -506,6 +506,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Finally cleanup([&]() { Finally cleanup([&]() {
debug("GC roots server shutting down"); debug("GC roots server shutting down");
fdServer.close();
while (true) { while (true) {
auto item = remove_begin(*connections.lock()); auto item = remove_begin(*connections.lock());
if (!item) break; if (!item) break;

View file

@ -130,6 +130,10 @@ StringSet Settings::getDefaultSystemFeatures()
actually require anything special on the machines. */ actually require anything special on the machines. */
StringSet features{"nixos-test", "benchmark", "big-parallel"}; StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
features.insert("uid-range");
#endif
#if __linux__ #if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0) if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm"); features.insert("kvm");

View file

@ -46,6 +46,14 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override; void set(const std::string & str, bool append = false) override;
}; };
const uint32_t maxIdsPerBuild =
#if __linux__
1 << 16
#else
1
#endif
;
class Settings : public Config { class Settings : public Config {
unsigned int getDefaultCores(); unsigned int getDefaultCores();
@ -275,6 +283,38 @@ public:
multi-user settings with untrusted users. multi-user settings with untrusted users.
)"}; )"};
Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
"Whether to allocate UIDs for builders automatically."};
Setting<uint32_t> startId{this,
#if __linux__
0x34000000,
#else
56930,
#endif
"start-id",
"The first UID and GID to use for dynamic ID allocation."};
Setting<uint32_t> uidCount{this,
#if __linux__
maxIdsPerBuild * 128,
#else
128,
#endif
"id-count",
"The number of UIDs/GIDs to use for dynamic ID allocation."};
#if __linux__
Setting<bool> useCgroups{
this, false, "use-cgroups",
R"(
Whether to execute builds inside cgroups. Cgroups are
enabled automatically for derivations that require the
`uid-range` system feature.
)"
};
#endif
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26", Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.", "Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}}; {"build-impersonate-linux-26"}};
@ -563,10 +603,10 @@ public:
cache) must have a signature by a trusted key. A trusted key is one cache) must have a signature by a trusted key. A trusted key is one
listed in `trusted-public-keys`, or a public key counterpart to a listed in `trusted-public-keys`, or a public key counterpart to a
private key stored in a file listed in `secret-key-files`. private key stored in a file listed in `secret-key-files`.
Set to `false` to disable signature checking and trust all Set to `false` to disable signature checking and trust all
non-content-addressed paths unconditionally. non-content-addressed paths unconditionally.
(Content-addressed paths are inherently trustworthy and thus (Content-addressed paths are inherently trustworthy and thus
unaffected by this configuration option.) unaffected by this configuration option.)
)"}; )"};

View file

@ -583,7 +583,10 @@ void canonicaliseTimestampAndPermissions(const Path & path)
} }
static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen) static void canonicalisePathMetaData_(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{ {
checkInterrupt(); checkInterrupt();
@ -630,7 +633,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
However, ignore files that we chown'ed ourselves previously to However, ignore files that we chown'ed ourselves previously to
ensure that we don't fail on hard links within the same build ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */ (i.e. "touch $out/foo; ln $out/foo $out/bar"). */
if (fromUid != (uid_t) -1 && st.st_uid != fromUid) { if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino))) if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path); throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT; mode_t mode = st.st_mode & ~S_IFMT;
@ -663,14 +666,17 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
if (S_ISDIR(st.st_mode)) { if (S_ISDIR(st.st_mode)) {
DirEntries entries = readDirectory(path); DirEntries entries = readDirectory(path);
for (auto & i : entries) for (auto & i : entries)
canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen); canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
} }
} }
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen) void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{ {
canonicalisePathMetaData_(path, fromUid, inodesSeen); canonicalisePathMetaData_(path, uidRange, inodesSeen);
/* On platforms that don't have lchown(), the top-level path can't /* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */ be a symlink, since we can't change its ownership. */
@ -683,10 +689,11 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
} }
void canonicalisePathMetaData(const Path & path, uid_t fromUid) void canonicalisePathMetaData(const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange)
{ {
InodesSeen inodesSeen; InodesSeen inodesSeen;
canonicalisePathMetaData(path, fromUid, inodesSeen); canonicalisePathMetaData(path, uidRange, inodesSeen);
} }
@ -1331,7 +1338,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
autoGC(); autoGC();
canonicalisePathMetaData(realPath, -1); canonicalisePathMetaData(realPath, {});
optimisePath(realPath, repair); // FIXME: combine with hashPath() optimisePath(realPath, repair); // FIXME: combine with hashPath()
@ -1444,7 +1451,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
narHash = narSink.finish(); narHash = narSink.finish();
} }
canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath canonicalisePathMetaData(realPath, {}); // FIXME: merge into restorePath
optimisePath(realPath, repair); optimisePath(realPath, repair);
@ -1486,7 +1493,7 @@ StorePath LocalStore::addTextToStore(
writeFile(realPath, s); writeFile(realPath, s);
canonicalisePathMetaData(realPath, -1); canonicalisePathMetaData(realPath, {});
StringSink sink; StringSink sink;
dumpString(s, sink); dumpString(s, sink);

View file

@ -310,9 +310,18 @@ typedef std::set<Inode> InodesSeen;
- the permissions are set of 444 or 555 (i.e., read-only with or - the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared) without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're - the owner and group are set to the Nix user and group, if we're
running as root. */ running as root.
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen); If uidRange is not empty, this function will throw an error if it
void canonicalisePathMetaData(const Path & path, uid_t fromUid); encounters files owned by a user outside of the closed interval
[uidRange->first, uidRange->second].
*/
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen);
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path); void canonicaliseTimestampAndPermissions(const Path & path);

View file

@ -20,7 +20,7 @@ endif
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox))) $(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
ifeq ($(ENABLE_S3), 1) ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
endif endif
ifdef HOST_SOLARIS ifdef HOST_SOLARIS

View file

@ -2,105 +2,197 @@
#include "globals.hh" #include "globals.hh"
#include "pathlocks.hh" #include "pathlocks.hh"
#include <grp.h>
#include <pwd.h> #include <pwd.h>
#include <grp.h>
#include <fcntl.h>
#include <unistd.h>
namespace nix { namespace nix {
UserLock::UserLock() struct SimpleUserLock : UserLock
{ {
assert(settings.buildUsersGroup != ""); AutoCloseFD fdUserLock;
createDirs(settings.nixStateDir + "/userpool"); uid_t uid;
} gid_t gid;
std::vector<gid_t> supplementaryGIDs;
bool UserLock::findFreeUser() { uid_t getUID() override { assert(uid); return uid; }
if (enabled()) return true; uid_t getUIDCount() override { return 1; }
gid_t getGID() override { assert(gid); return gid; }
/* Get the members of the build-users-group. */ std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%1%' specified in 'build-users-group' does not exist",
settings.buildUsersGroup);
gid = gr->gr_gid;
/* Copy the result of getgrnam. */ static std::unique_ptr<UserLock> acquire()
Strings users; {
for (char * * p = gr->gr_mem; *p; ++p) { assert(settings.buildUsersGroup != "");
debug("found build user '%1%'", *p); createDirs(settings.nixStateDir + "/userpool");
users.push_back(*p);
}
if (users.empty()) /* Get the members of the build-users-group. */
throw Error("the build users group '%1%' has no members", struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
settings.buildUsersGroup); if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another /* Copy the result of getgrnam. */
build. */ Strings users;
for (auto & i : users) { for (char * * p = gr->gr_mem; *p; ++p) {
debug("trying user '%1%'", i); debug("found build user '%s'", *p);
users.push_back(*p);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%1%' in the group '%2%' does not exist",
i, settings.buildUsersGroup);
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%1%'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
fdUserLock = std::move(fd);
user = i;
uid = pw->pw_uid;
/* Sanity check... */
if (uid == getuid() || uid == geteuid())
throw Error("the Nix user should not be a member of '%1%'",
settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build user. This
is usually either empty or contains a group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
supplementaryGIDs.resize(ngroups);
int err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
&ngroups);
// Our initial size of 32 wasn't sufficient, the correct size has
// been stored in ngroups, so we try again.
if (err == -1) {
supplementaryGIDs.resize(ngroups);
err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%1%'",
pw->pw_name);
// Finally, trim back the GID list to its real size
supplementaryGIDs.resize(ngroups);
#endif
isEnabled = true;
return true;
} }
if (users.empty())
throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another
build. */
for (auto & i : users) {
debug("trying user '%s'", i);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto lock = std::make_unique<SimpleUserLock>();
lock->fdUserLock = std::move(fd);
lock->uid = pw->pw_uid;
lock->gid = gr->gr_gid;
/* Sanity check... */
if (lock->uid == getuid() || lock->uid == geteuid())
throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build
user. This is usually either empty or contains a
group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
std::vector<gid_t> gids;
gids.resize(ngroups);
int err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
/* Our initial size of 32 wasn't sufficient, the
correct size has been stored in ngroups, so we try
again. */
if (err == -1) {
gids.resize(ngroups);
err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
// Finally, trim back the GID list to its real size.
for (auto i = 0; i < ngroups; i++)
if (gids[i] != lock->gid)
lock->supplementaryGIDs.push_back(gids[i]);
#endif
return lock;
}
}
return nullptr;
} }
};
return false; struct AutoUserLock : UserLock
}
void UserLock::kill()
{ {
killUser(uid); AutoCloseFD fdUserLock;
uid_t firstUid = 0;
gid_t firstGid = 0;
uid_t nrIds = 1;
uid_t getUID() override { assert(firstUid); return firstUid; }
gid_t getUIDCount() override { return nrIds; }
gid_t getGID() override { assert(firstGid); return firstGid; }
std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useChroot)
{
settings.requireExperimentalFeature(Xp::AutoAllocateUids);
assert(settings.startId > 0);
assert(settings.uidCount % maxIdsPerBuild == 0);
assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
assert(nrIds <= maxIdsPerBuild);
createDirs(settings.nixStateDir + "/userpool2");
size_t nrSlots = settings.uidCount / maxIdsPerBuild;
for (size_t i = 0; i < nrSlots; i++) {
debug("trying user slot '%d'", i);
createDirs(settings.nixStateDir + "/userpool2");
auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto firstUid = settings.startId + i * maxIdsPerBuild;
auto pw = getpwuid(firstUid);
if (pw)
throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
auto lock = std::make_unique<AutoUserLock>();
lock->fdUserLock = std::move(fd);
lock->firstUid = firstUid;
if (useChroot)
lock->firstGid = firstUid;
else {
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
lock->firstGid = gr->gr_gid;
}
lock->nrIds = nrIds;
return lock;
}
}
return nullptr;
}
};
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot)
{
if (settings.autoAllocateUids)
return AutoUserLock::acquire(nrIds, useChroot);
else
return SimpleUserLock::acquire();
}
bool useBuildUsers()
{
#if __linux__
static bool b = (settings.buildUsersGroup != "" || settings.startId.get() != 0) && getuid() == 0;
return b;
#elif __APPLE__
static bool b = settings.buildUsersGroup != "" && getuid() == 0;
return b;
#else
return false;
#endif
} }
} }

View file

@ -1,37 +1,38 @@
#pragma once #pragma once
#include "sync.hh"
#include "types.hh" #include "types.hh"
#include "util.hh"
#include <optional>
#include <sys/types.h>
namespace nix { namespace nix {
class UserLock struct UserLock
{ {
private: virtual ~UserLock() { }
Path fnUserLock;
AutoCloseFD fdUserLock;
bool isEnabled = false; /* Get the first and last UID. */
std::string user; std::pair<uid_t, uid_t> getUIDRange()
uid_t uid = 0; {
gid_t gid = 0; auto first = getUID();
std::vector<gid_t> supplementaryGIDs; return {first, first + getUIDCount() - 1};
}
public: /* Get the first UID. */
UserLock(); virtual uid_t getUID() = 0;
void kill(); virtual uid_t getUIDCount() = 0;
std::string getUser() { return user; } virtual gid_t getGID() = 0;
uid_t getUID() { assert(uid); return uid; }
uid_t getGID() { assert(gid); return gid; }
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
bool findFreeUser();
bool enabled() { return isEnabled; }
virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
}; };
/* Acquire a user lock for a UID range of size `nrIds`. Note that this
may return nullptr if no user is available. */
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot);
bool useBuildUsers();
} }

View file

@ -1,6 +1,5 @@
#include "nar-accessor.hh" #include "nar-accessor.hh"
#include "archive.hh" #include "archive.hh"
#include "json.hh"
#include <map> #include <map>
#include <stack> #include <stack>
@ -243,42 +242,43 @@ ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
return make_ref<NarAccessor>(listing, getNarBytes); return make_ref<NarAccessor>(listing, getNarBytes);
} }
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor, using nlohmann::json;
const Path & path, bool recurse) json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
{ {
auto st = accessor->stat(path); auto st = accessor->stat(path);
auto obj = res.object(); json obj = json::object();
switch (st.type) { switch (st.type) {
case FSAccessor::Type::tRegular: case FSAccessor::Type::tRegular:
obj.attr("type", "regular"); obj["type"] = "regular";
obj.attr("size", st.fileSize); obj["size"] = st.fileSize;
if (st.isExecutable) if (st.isExecutable)
obj.attr("executable", true); obj["executable"] = true;
if (st.narOffset) if (st.narOffset)
obj.attr("narOffset", st.narOffset); obj["narOffset"] = st.narOffset;
break; break;
case FSAccessor::Type::tDirectory: case FSAccessor::Type::tDirectory:
obj.attr("type", "directory"); obj["type"] = "directory";
{ {
auto res2 = obj.object("entries"); obj["entries"] = json::object();
json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) { for (auto & name : accessor->readDirectory(path)) {
if (recurse) { if (recurse) {
auto res3 = res2.placeholder(name); res2[name] = listNar(accessor, path + "/" + name, true);
listNar(res3, accessor, path + "/" + name, true);
} else } else
res2.object(name); res2[name] = json::object();
} }
} }
break; break;
case FSAccessor::Type::tSymlink: case FSAccessor::Type::tSymlink:
obj.attr("type", "symlink"); obj["type"] = "symlink";
obj.attr("target", accessor->readLink(path)); obj["target"] = accessor->readLink(path);
break; break;
default: default:
throw Error("path '%s' does not exist in NAR", path); throw Error("path '%s' does not exist in NAR", path);
} }
return obj;
} }
} }

View file

@ -2,6 +2,7 @@
#include <functional> #include <functional>
#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh" #include "fs-accessor.hh"
namespace nix { namespace nix {
@ -24,11 +25,8 @@ ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing, const std::string & listing,
GetNarBytes getNarBytes); GetNarBytes getNarBytes);
class JSONPlaceholder;
/* Write a JSON representation of the contents of a NAR (except file /* Write a JSON representation of the contents of a NAR (except file
contents). */ contents). */
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor, nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
const Path & path, bool recurse);
} }

View file

@ -2,7 +2,6 @@
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
#include <regex> #include <regex>
#include "json.hh"
namespace nix { namespace nix {
@ -90,6 +89,7 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
StringSet ParsedDerivation::getRequiredSystemFeatures() const StringSet ParsedDerivation::getRequiredSystemFeatures() const
{ {
// FIXME: cache this?
StringSet res; StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings())) for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i); res.insert(i);
@ -125,6 +125,11 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true); return getBoolAttr("allowSubstitutes", true);
} }
bool ParsedDerivation::useUidRange() const
{
return getRequiredSystemFeatures().count("uid-range");
}
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths) std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
@ -144,16 +149,11 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s
auto e = json.find("exportReferencesGraph"); auto e = json.find("exportReferencesGraph");
if (e != json.end() && e->is_object()) { if (e != json.end() && e->is_object()) {
for (auto i = e->begin(); i != e->end(); ++i) { for (auto i = e->begin(); i != e->end(); ++i) {
std::ostringstream str; StorePathSet storePaths;
{ for (auto & p : *i)
JSONPlaceholder jsonRoot(str, true); storePaths.insert(store.parseStorePath(p.get<std::string>()));
StorePathSet storePaths; json[i.key()] = store.pathInfoToJSON(
for (auto & p : *i) store.exportReferences(storePaths, inputPaths), false, true);
storePaths.insert(store.parseStorePath(p.get<std::string>()));
store.pathInfoToJSON(jsonRoot,
store.exportReferences(storePaths, inputPaths), false, true);
}
json[i.key()] = nlohmann::json::parse(str.str()); // urgh
} }
} }

View file

@ -38,6 +38,8 @@ public:
bool substitutesAllowed() const; bool substitutesAllowed() const;
bool useUidRange() const;
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths); std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
}; };

View file

@ -67,6 +67,40 @@ void RefScanSink::operator () (std::string_view data)
} }
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
: RefScanSink(std::move(hashes))
, backMap(std::move(backMap))
{ }
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
return PathRefScanSink(std::move(hashes), std::move(backMap));
}
StorePathSet PathRefScanSink::getResultPaths()
{
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
}
std::pair<StorePathSet, HashResult> scanForReferences( std::pair<StorePathSet, HashResult> scanForReferences(
const std::string & path, const std::string & path,
const StorePathSet & refs) const StorePathSet & refs)
@ -82,30 +116,13 @@ StorePathSet scanForReferences(
const Path & path, const Path & path,
const StorePathSet & refs) const StorePathSet & refs)
{ {
StringSet hashes; PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
std::map<std::string, StorePath> backMap; TeeSink sink { refsSink, toTee };
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
/* Look for the hashes in the NAR dump of the path. */ /* Look for the hashes in the NAR dump of the path. */
RefScanSink refsSink(std::move(hashes));
TeeSink sink { refsSink, toTee };
dumpPath(path, sink); dumpPath(path, sink);
/* Map the hashes found back to their store paths. */ return refsSink.getResultPaths();
StorePathSet found;
for (auto & i : refsSink.getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
} }

View file

@ -27,6 +27,19 @@ public:
void operator () (std::string_view data) override; void operator () (std::string_view data) override;
}; };
class PathRefScanSink : public RefScanSink
{
std::map<std::string, StorePath> backMap;
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
public:
static PathRefScanSink fromPaths(const StorePathSet & refs);
StorePathSet getResultPaths();
};
struct RewritingSink : Sink struct RewritingSink : Sink
{ {
std::string from, to, prev; std::string from, to, prev;

View file

@ -1,6 +1,6 @@
#include <nlohmann/json.hpp>
#include "remote-fs-accessor.hh" #include "remote-fs-accessor.hh"
#include "nar-accessor.hh" #include "nar-accessor.hh"
#include "json.hh"
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
@ -38,10 +38,8 @@ ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::str
if (cacheDir != "") { if (cacheDir != "") {
try { try {
std::ostringstream str; nlohmann::json j = listNar(narAccessor, "", true);
JSONPlaceholder jsonRoot(str); writeFile(makeCacheFile(hashPart, "ls"), j.dump());
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), str.str());
} catch (...) { } catch (...) {
ignoreException(); ignoreException();
} }

View file

@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
if (fakeSSH) { if (fakeSSH) {
args = { "bash", "-c" }; args = { "bash", "-c" };
} else { } else {
args = { "ssh", host.c_str(), "-x", "-a" }; args = { "ssh", host.c_str(), "-x" };
addCommonSSHOpts(args); addCommonSSHOpts(args);
if (socketPath != "") if (socketPath != "")
args.insert(args.end(), {"-S", socketPath}); args.insert(args.end(), {"-S", socketPath});

View file

@ -6,14 +6,16 @@
#include "util.hh" #include "util.hh"
#include "nar-info-disk-cache.hh" #include "nar-info-disk-cache.hh"
#include "thread-pool.hh" #include "thread-pool.hh"
#include "json.hh"
#include "url.hh" #include "url.hh"
#include "archive.hh" #include "archive.hh"
#include "callback.hh" #include "callback.hh"
#include "remote-store.hh" #include "remote-store.hh"
#include <nlohmann/json.hpp>
#include <regex> #include <regex>
using json = nlohmann::json;
namespace nix { namespace nix {
@ -838,56 +840,53 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
return paths; return paths;
} }
json Store::pathInfoToJSON(const StorePathSet & storePaths,
void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize, bool includeImpureInfo, bool showClosureSize,
Base hashBase, Base hashBase,
AllowInvalidFlag allowInvalid) AllowInvalidFlag allowInvalid)
{ {
auto jsonList = jsonOut.list(); json::array_t jsonList = json::array();
for (auto & storePath : storePaths) { for (auto & storePath : storePaths) {
auto jsonPath = jsonList.object(); auto& jsonPath = jsonList.emplace_back(json::object());
try { try {
auto info = queryPathInfo(storePath); auto info = queryPathInfo(storePath);
jsonPath.attr("path", printStorePath(info->path)); jsonPath["path"] = printStorePath(info->path);
jsonPath jsonPath["narHash"] = info->narHash.to_string(hashBase, true);
.attr("narHash", info->narHash.to_string(hashBase, true)) jsonPath["narSize"] = info->narSize;
.attr("narSize", info->narSize);
{ {
auto jsonRefs = jsonPath.list("references"); auto& jsonRefs = (jsonPath["references"] = json::array());
for (auto & ref : info->references) for (auto & ref : info->references)
jsonRefs.elem(printStorePath(ref)); jsonRefs.emplace_back(printStorePath(ref));
} }
if (info->ca) if (info->ca)
jsonPath.attr("ca", renderContentAddress(info->ca)); jsonPath["ca"] = renderContentAddress(info->ca);
std::pair<uint64_t, uint64_t> closureSizes; std::pair<uint64_t, uint64_t> closureSizes;
if (showClosureSize) { if (showClosureSize) {
closureSizes = getClosureSize(info->path); closureSizes = getClosureSize(info->path);
jsonPath.attr("closureSize", closureSizes.first); jsonPath["closureSize"] = closureSizes.first;
} }
if (includeImpureInfo) { if (includeImpureInfo) {
if (info->deriver) if (info->deriver)
jsonPath.attr("deriver", printStorePath(*info->deriver)); jsonPath["deriver"] = printStorePath(*info->deriver);
if (info->registrationTime) if (info->registrationTime)
jsonPath.attr("registrationTime", info->registrationTime); jsonPath["registrationTime"] = info->registrationTime;
if (info->ultimate) if (info->ultimate)
jsonPath.attr("ultimate", info->ultimate); jsonPath["ultimate"] = info->ultimate;
if (!info->sigs.empty()) { if (!info->sigs.empty()) {
auto jsonSigs = jsonPath.list("signatures");
for (auto & sig : info->sigs) for (auto & sig : info->sigs)
jsonSigs.elem(sig); jsonPath["signatures"].push_back(sig);
} }
auto narInfo = std::dynamic_pointer_cast<const NarInfo>( auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
@ -895,21 +894,22 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
if (narInfo) { if (narInfo) {
if (!narInfo->url.empty()) if (!narInfo->url.empty())
jsonPath.attr("url", narInfo->url); jsonPath["url"] = narInfo->url;
if (narInfo->fileHash) if (narInfo->fileHash)
jsonPath.attr("downloadHash", narInfo->fileHash->to_string(hashBase, true)); jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashBase, true);
if (narInfo->fileSize) if (narInfo->fileSize)
jsonPath.attr("downloadSize", narInfo->fileSize); jsonPath["downloadSize"] = narInfo->fileSize;
if (showClosureSize) if (showClosureSize)
jsonPath.attr("closureDownloadSize", closureSizes.second); jsonPath["closureDownloadSize"] = closureSizes.second;
} }
} }
} catch (InvalidPath &) { } catch (InvalidPath &) {
jsonPath.attr("path", printStorePath(storePath)); jsonPath["path"] = printStorePath(storePath);
jsonPath.attr("valid", false); jsonPath["valid"] = false;
} }
} }
return jsonList;
} }

View file

@ -14,6 +14,7 @@
#include "path-info.hh" #include "path-info.hh"
#include "repair-flag.hh" #include "repair-flag.hh"
#include <nlohmann/json_fwd.hpp>
#include <atomic> #include <atomic>
#include <limits> #include <limits>
#include <map> #include <map>
@ -68,7 +69,6 @@ struct Derivation;
class FSAccessor; class FSAccessor;
class NarInfoDiskCache; class NarInfoDiskCache;
class Store; class Store;
class JSONPlaceholder;
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true }; enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
@ -512,7 +512,7 @@ public:
variable elements such as the registration time are variable elements such as the registration time are
included. If showClosureSize is true, the closure size of included. If showClosureSize is true, the closure size of
each path is included. */ each path is included. */
void pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths, nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize, bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32, Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid); AllowInvalidFlag allowInvalid = DisallowInvalid);

View file

@ -14,6 +14,8 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
{ Xp::NoUrlLiterals, "no-url-literals" }, { Xp::NoUrlLiterals, "no-url-literals" },
{ Xp::FetchClosure, "fetch-closure" }, { Xp::FetchClosure, "fetch-closure" },
{ Xp::ReplFlake, "repl-flake" }, { Xp::ReplFlake, "repl-flake" },
{ Xp::AutoAllocateUids, "auto-allocate-uids" },
{ Xp::Cgroups, "cgroups" },
}; };
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name) const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)

View file

@ -23,6 +23,8 @@ enum struct ExperimentalFeature
NoUrlLiterals, NoUrlLiterals,
FetchClosure, FetchClosure,
ReplFlake, ReplFlake,
AutoAllocateUids,
Cgroups,
}; };
/** /**

View file

@ -1,5 +1,6 @@
#include <sys/time.h> #include <sys/time.h>
#include <filesystem> #include <filesystem>
#include <atomic>
#include "finally.hh" #include "finally.hh"
#include "util.hh" #include "util.hh"
@ -10,7 +11,7 @@ namespace fs = std::filesystem;
namespace nix { namespace nix {
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid, static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
int & counter) std::atomic<unsigned int> & counter)
{ {
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
if (includePid) if (includePid)
@ -22,9 +23,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
Path createTempDir(const Path & tmpRoot, const Path & prefix, Path createTempDir(const Path & tmpRoot, const Path & prefix,
bool includePid, bool useGlobalCounter, mode_t mode) bool includePid, bool useGlobalCounter, mode_t mode)
{ {
static int globalCounter = 0; static std::atomic<unsigned int> globalCounter = 0;
int localCounter = 0; std::atomic<unsigned int> localCounter = 0;
int & counter(useGlobalCounter ? globalCounter : localCounter); auto & counter(useGlobalCounter ? globalCounter : localCounter);
while (1) { while (1) {
checkInterrupt(); checkInterrupt();

View file

@ -1,203 +0,0 @@
#include "json.hh"
#include <iomanip>
#include <cstdint>
#include <cstring>
namespace nix {
template<>
void toJSON<std::string_view>(std::ostream & str, const std::string_view & s)
{
constexpr size_t BUF_SIZE = 4096;
char buf[BUF_SIZE + 7]; // BUF_SIZE + largest single sequence of puts
size_t bufPos = 0;
const auto flush = [&] {
str.write(buf, bufPos);
bufPos = 0;
};
const auto put = [&] (char c) {
buf[bufPos++] = c;
};
put('"');
for (auto i = s.begin(); i != s.end(); i++) {
if (bufPos >= BUF_SIZE) flush();
if (*i == '\"' || *i == '\\') { put('\\'); put(*i); }
else if (*i == '\n') { put('\\'); put('n'); }
else if (*i == '\r') { put('\\'); put('r'); }
else if (*i == '\t') { put('\\'); put('t'); }
else if (*i >= 0 && *i < 32) {
const char hex[17] = "0123456789abcdef";
put('\\');
put('u');
put(hex[(uint16_t(*i) >> 12) & 0xf]);
put(hex[(uint16_t(*i) >> 8) & 0xf]);
put(hex[(uint16_t(*i) >> 4) & 0xf]);
put(hex[(uint16_t(*i) >> 0) & 0xf]);
}
else put(*i);
}
put('"');
flush();
}
void toJSON(std::ostream & str, const char * s)
{
if (!s) str << "null"; else toJSON(str, std::string_view(s));
}
template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
template<> void toJSON<unsigned int>(std::ostream & str, const unsigned int & n) { str << n; }
template<> void toJSON<long>(std::ostream & str, const long & n) { str << n; }
template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long & n) { str << n; }
template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
template<> void toJSON<std::string>(std::ostream & str, const std::string & s) { toJSON(str, (std::string_view) s); }
template<> void toJSON<bool>(std::ostream & str, const bool & b)
{
str << (b ? "true" : "false");
}
template<> void toJSON<std::nullptr_t>(std::ostream & str, const std::nullptr_t & b)
{
str << "null";
}
JSONWriter::JSONWriter(std::ostream & str, bool indent)
: state(new JSONState(str, indent))
{
state->stack++;
}
JSONWriter::JSONWriter(JSONState * state)
: state(state)
{
state->stack++;
}
JSONWriter::~JSONWriter()
{
if (state) {
assertActive();
state->stack--;
if (state->stack == 0) delete state;
}
}
void JSONWriter::comma()
{
assertActive();
if (first) {
first = false;
} else {
state->str << ',';
}
if (state->indent) indent();
}
void JSONWriter::indent()
{
state->str << '\n' << std::string(state->depth * 2, ' ');
}
void JSONList::open()
{
state->depth++;
state->str << '[';
}
JSONList::~JSONList()
{
state->depth--;
if (state->indent && !first) indent();
state->str << "]";
}
JSONList JSONList::list()
{
comma();
return JSONList(state);
}
JSONObject JSONList::object()
{
comma();
return JSONObject(state);
}
JSONPlaceholder JSONList::placeholder()
{
comma();
return JSONPlaceholder(state);
}
void JSONObject::open()
{
state->depth++;
state->str << '{';
}
JSONObject::~JSONObject()
{
if (state) {
state->depth--;
if (state->indent && !first) indent();
state->str << "}";
}
}
void JSONObject::attr(std::string_view s)
{
comma();
toJSON(state->str, s);
state->str << ':';
if (state->indent) state->str << ' ';
}
JSONList JSONObject::list(std::string_view name)
{
attr(name);
return JSONList(state);
}
JSONObject JSONObject::object(std::string_view name)
{
attr(name);
return JSONObject(state);
}
JSONPlaceholder JSONObject::placeholder(std::string_view name)
{
attr(name);
return JSONPlaceholder(state);
}
JSONList JSONPlaceholder::list()
{
assertValid();
first = false;
return JSONList(state);
}
JSONObject JSONPlaceholder::object()
{
assertValid();
first = false;
return JSONObject(state);
}
JSONPlaceholder::~JSONPlaceholder()
{
if (first) {
assert(std::uncaught_exceptions());
if (state->stack != 0)
write(nullptr);
}
}
}

View file

@ -1,185 +0,0 @@
#pragma once
#include <iostream>
#include <vector>
#include <cassert>
namespace nix {
void toJSON(std::ostream & str, const char * s);
template<typename T>
void toJSON(std::ostream & str, const T & n);
class JSONWriter
{
protected:
struct JSONState
{
std::ostream & str;
bool indent;
size_t depth = 0;
size_t stack = 0;
JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
~JSONState()
{
assert(stack == 0);
}
};
JSONState * state;
bool first = true;
JSONWriter(std::ostream & str, bool indent);
JSONWriter(JSONState * state);
~JSONWriter();
void assertActive()
{
assert(state->stack != 0);
}
void comma();
void indent();
};
class JSONObject;
class JSONPlaceholder;
class JSONList : JSONWriter
{
private:
friend class JSONObject;
friend class JSONPlaceholder;
void open();
JSONList(JSONState * state)
: JSONWriter(state)
{
open();
}
public:
JSONList(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
open();
}
~JSONList();
template<typename T>
JSONList & elem(const T & v)
{
comma();
toJSON(state->str, v);
return *this;
}
JSONList list();
JSONObject object();
JSONPlaceholder placeholder();
};
class JSONObject : JSONWriter
{
private:
friend class JSONList;
friend class JSONPlaceholder;
void open();
JSONObject(JSONState * state)
: JSONWriter(state)
{
open();
}
void attr(std::string_view s);
public:
JSONObject(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
open();
}
JSONObject(const JSONObject & obj) = delete;
JSONObject(JSONObject && obj)
: JSONWriter(obj.state)
{
obj.state = 0;
}
~JSONObject();
template<typename T>
JSONObject & attr(std::string_view name, const T & v)
{
attr(name);
toJSON(state->str, v);
return *this;
}
JSONList list(std::string_view name);
JSONObject object(std::string_view name);
JSONPlaceholder placeholder(std::string_view name);
};
class JSONPlaceholder : JSONWriter
{
private:
friend class JSONList;
friend class JSONObject;
JSONPlaceholder(JSONState * state)
: JSONWriter(state)
{
}
void assertValid()
{
assertActive();
assert(first);
}
public:
JSONPlaceholder(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
}
~JSONPlaceholder();
template<typename T>
void write(const T & v)
{
assertValid();
first = false;
toJSON(state->str, v);
}
JSONList list();
JSONObject object();
};
}

View file

@ -77,9 +77,7 @@ TarArchive::~TarArchive()
static void extract_archive(TarArchive & archive, const Path & destDir) static void extract_archive(TarArchive & archive, const Path & destDir)
{ {
int flags = ARCHIVE_EXTRACT_FFLAGS int flags = ARCHIVE_EXTRACT_TIME
| ARCHIVE_EXTRACT_PERM
| ARCHIVE_EXTRACT_TIME
| ARCHIVE_EXTRACT_SECURE_SYMLINKS | ARCHIVE_EXTRACT_SECURE_SYMLINKS
| ARCHIVE_EXTRACT_SECURE_NODOTDOT; | ARCHIVE_EXTRACT_SECURE_NODOTDOT;
@ -98,6 +96,10 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
archive_entry_copy_pathname(entry, archive_entry_copy_pathname(entry,
(destDir + "/" + name).c_str()); (destDir + "/" + name).c_str());
// sources can and do contain dirs with no rx bits
if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500)
archive_entry_set_mode(entry, archive_entry_mode(entry) | 0500);
// Patch hardlink path // Patch hardlink path
const char *original_hardlink = archive_entry_hardlink(entry); const char *original_hardlink = archive_entry_hardlink(entry);
if (original_hardlink) { if (original_hardlink) {

View file

@ -1,193 +0,0 @@
#include "json.hh"
#include <gtest/gtest.h>
#include <sstream>
namespace nix {
/* ----------------------------------------------------------------------------
* toJSON
* --------------------------------------------------------------------------*/
TEST(toJSON, quotesCharPtr) {
const char* input = "test";
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "\"test\"");
}
TEST(toJSON, quotesStdString) {
std::string input = "test";
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "\"test\"");
}
TEST(toJSON, convertsNullptrtoNull) {
auto input = nullptr;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "null");
}
TEST(toJSON, convertsNullToNull) {
const char* input = 0;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "null");
}
TEST(toJSON, convertsFloat) {
auto input = 1.024f;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "1.024");
}
TEST(toJSON, convertsDouble) {
const double input = 1.024;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "1.024");
}
TEST(toJSON, convertsBool) {
auto input = false;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "false");
}
TEST(toJSON, quotesTab) {
std::stringstream out;
toJSON(out, "\t");
ASSERT_EQ(out.str(), "\"\\t\"");
}
TEST(toJSON, quotesNewline) {
std::stringstream out;
toJSON(out, "\n");
ASSERT_EQ(out.str(), "\"\\n\"");
}
TEST(toJSON, quotesCreturn) {
std::stringstream out;
toJSON(out, "\r");
ASSERT_EQ(out.str(), "\"\\r\"");
}
TEST(toJSON, quotesCreturnNewLine) {
std::stringstream out;
toJSON(out, "\r\n");
ASSERT_EQ(out.str(), "\"\\r\\n\"");
}
TEST(toJSON, quotesDoublequotes) {
std::stringstream out;
toJSON(out, "\"");
ASSERT_EQ(out.str(), "\"\\\"\"");
}
TEST(toJSON, substringEscape) {
std::stringstream out;
std::string_view s = "foo\t";
toJSON(out, s.substr(3));
ASSERT_EQ(out.str(), "\"\\t\"");
}
/* ----------------------------------------------------------------------------
* JSONObject
* --------------------------------------------------------------------------*/
TEST(JSONObject, emptyObject) {
std::stringstream out;
{
JSONObject t(out);
}
ASSERT_EQ(out.str(), "{}");
}
TEST(JSONObject, objectWithList) {
std::stringstream out;
{
JSONObject t(out);
auto l = t.list("list");
l.elem("element");
}
ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
}
TEST(JSONObject, objectWithListIndent) {
std::stringstream out;
{
JSONObject t(out, true);
auto l = t.list("list");
l.elem("element");
}
ASSERT_EQ(out.str(),
R"#({
"list": [
"element"
]
})#");
}
TEST(JSONObject, objectWithPlaceholderAndList) {
std::stringstream out;
{
JSONObject t(out);
auto l = t.placeholder("list");
l.list().elem("element");
}
ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
}
TEST(JSONObject, objectWithPlaceholderAndObject) {
std::stringstream out;
{
JSONObject t(out);
auto l = t.placeholder("object");
l.object().attr("key", "value");
}
ASSERT_EQ(out.str(), R"#({"object":{"key":"value"}})#");
}
/* ----------------------------------------------------------------------------
* JSONList
* --------------------------------------------------------------------------*/
TEST(JSONList, empty) {
std::stringstream out;
{
JSONList l(out);
}
ASSERT_EQ(out.str(), R"#([])#");
}
TEST(JSONList, withElements) {
std::stringstream out;
{
JSONList l(out);
l.elem("one");
l.object();
l.placeholder().write("three");
}
ASSERT_EQ(out.str(), R"#(["one",{},"three"])#");
}
}

View file

@ -12,7 +12,6 @@
#include "local-fs-store.hh" #include "local-fs-store.hh"
#include "user-env.hh" #include "user-env.hh"
#include "util.hh" #include "util.hh"
#include "json.hh"
#include "value-to-json.hh" #include "value-to-json.hh"
#include "xml-writer.hh" #include "xml-writer.hh"
#include "legacy.hh" #include "legacy.hh"
@ -26,6 +25,7 @@
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <unistd.h> #include <unistd.h>
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
using std::cout; using std::cout;
@ -911,43 +911,47 @@ static VersionDiff compareVersionAgainstSet(
static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool printOutPath, bool printMeta) static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool printOutPath, bool printMeta)
{ {
JSONObject topObj(cout, true); using nlohmann::json;
json topObj = json::object();
for (auto & i : elems) { for (auto & i : elems) {
try { try {
if (i.hasFailed()) continue; if (i.hasFailed()) continue;
JSONObject pkgObj = topObj.object(i.attrPath);
auto drvName = DrvName(i.queryName()); auto drvName = DrvName(i.queryName());
pkgObj.attr("name", drvName.fullName); json &pkgObj = topObj[i.attrPath];
pkgObj.attr("pname", drvName.name); pkgObj = {
pkgObj.attr("version", drvName.version); {"name", drvName.fullName},
pkgObj.attr("system", i.querySystem()); {"pname", drvName.name},
pkgObj.attr("outputName", i.queryOutputName()); {"version", drvName.version},
{"system", i.querySystem()},
{"outputName", i.queryOutputName()},
};
{ {
DrvInfo::Outputs outputs = i.queryOutputs(printOutPath); DrvInfo::Outputs outputs = i.queryOutputs(printOutPath);
JSONObject outputObj = pkgObj.object("outputs"); json &outputObj = pkgObj["outputs"];
outputObj = json::object();
for (auto & j : outputs) { for (auto & j : outputs) {
if (j.second) if (j.second)
outputObj.attr(j.first, globals.state->store->printStorePath(*j.second)); outputObj[j.first] = globals.state->store->printStorePath(*j.second);
else else
outputObj.attr(j.first, nullptr); outputObj[j.first] = nullptr;
} }
} }
if (printMeta) { if (printMeta) {
JSONObject metaObj = pkgObj.object("meta"); json &metaObj = pkgObj["meta"];
metaObj = json::object();
StringSet metaNames = i.queryMetaNames(); StringSet metaNames = i.queryMetaNames();
for (auto & j : metaNames) { for (auto & j : metaNames) {
Value * v = i.queryMeta(j); Value * v = i.queryMeta(j);
if (!v) { if (!v) {
printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j); printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j);
metaObj.attr(j, nullptr); metaObj[j] = nullptr;
} else { } else {
auto placeholder = metaObj.placeholder(j);
PathSet context; PathSet context;
printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context); metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context);
} }
} }
} }
@ -958,6 +962,7 @@ static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool prin
throw; throw;
} }
} }
std::cout << topObj.dump(2);
} }

View file

@ -516,7 +516,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
if (!store->isValidPath(info->path) || reregister) { if (!store->isValidPath(info->path) || reregister) {
/* !!! races */ /* !!! races */
if (canonicalise) if (canonicalise)
canonicalisePathMetaData(store->printStorePath(info->path), -1); canonicalisePathMetaData(store->printStorePath(info->path), {});
if (!hashGiven) { if (!hashGiven) {
HashResult hash = hashPath(htSHA256, store->printStorePath(info->path)); HashResult hash = hashPath(htSHA256, store->printStorePath(info->path));
info->narHash = hash.first; info->narHash = hash.first;

View file

@ -37,11 +37,13 @@ struct InstallableDerivedPath : Installable
* Return the rewrites that are needed to resolve a string whose context is * Return the rewrites that are needed to resolve a string whose context is
* included in `dependencies`. * included in `dependencies`.
*/ */
StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies) StringPairs resolveRewrites(
Store & store,
const std::vector<BuiltPathWithResult> & dependencies)
{ {
StringPairs res; StringPairs res;
for (auto & dep : dependencies) for (auto & dep : dependencies)
if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep)) if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep.path))
for (auto & [ outputName, outputPath ] : drvDep->outputs) for (auto & [ outputName, outputPath ] : drvDep->outputs)
res.emplace( res.emplace(
downstreamPlaceholder(store, drvDep->drvPath, outputName), downstreamPlaceholder(store, drvDep->drvPath, outputName),
@ -53,7 +55,10 @@ StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
/** /**
* Resolve the given string assuming the given context. * Resolve the given string assuming the given context.
*/ */
std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies) std::string resolveString(
Store & store,
const std::string & toResolve,
const std::vector<BuiltPathWithResult> & dependencies)
{ {
auto rewrites = resolveRewrites(store, dependencies); auto rewrites = resolveRewrites(store, dependencies);
return rewriteStrings(toResolve, rewrites); return rewriteStrings(toResolve, rewrites);
@ -66,7 +71,9 @@ UnresolvedApp Installable::toApp(EvalState & state)
auto type = cursor->getAttr("type")->getString(); auto type = cursor->getAttr("type")->getString();
std::string expected = !attrPath.empty() && state.symbols[attrPath[0]] == "apps" ? "app" : "derivation"; std::string expected = !attrPath.empty() &&
(state.symbols[attrPath[0]] == "apps" || state.symbols[attrPath[0]] == "defaultApp")
? "app" : "derivation";
if (type != expected) if (type != expected)
throw Error("attribute '%s' should have type '%s'", cursor->getAttrPathStr(), expected); throw Error("attribute '%s' should have type '%s'", cursor->getAttrPathStr(), expected);

View file

@ -10,6 +10,37 @@
using namespace nix; using namespace nix;
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
{
auto res = nlohmann::json::array();
for (auto & t : paths) {
std::visit([&res, store](const auto & t) {
res.push_back(t.toJSON(store));
}, t.raw());
}
return res;
}
nlohmann::json builtPathsWithResultToJSON(const std::vector<BuiltPathWithResult> & buildables, ref<Store> store)
{
auto res = nlohmann::json::array();
for (auto & b : buildables) {
std::visit([&](const auto & t) {
auto j = t.toJSON(store);
if (b.result) {
j["startTime"] = b.result->startTime;
j["stopTime"] = b.result->stopTime;
if (b.result->cpuUser)
j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000;
if (b.result->cpuSystem)
j["cpuSystem"] = ((double) b.result->cpuSystem->count()) / 1000000;
}
res.push_back(j);
}, b.path.raw());
}
return res;
}
struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
{ {
Path outLink = "result"; Path outLink = "result";
@ -78,7 +109,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
Realise::Outputs, Realise::Outputs,
installables, buildMode); installables, buildMode);
if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump()); if (json) logger->cout("%s", builtPathsWithResultToJSON(buildables, store).dump());
if (outLink != "") if (outLink != "")
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>()) if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
@ -98,7 +129,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
store2->addPermRoot(output.second, absPath(symlink)); store2->addPermRoot(output.second, absPath(symlink));
} }
}, },
}, buildable.raw()); }, buildable.path.raw());
} }
if (printOutputPaths) { if (printOutputPaths) {
@ -113,11 +144,14 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
std::cout << store->printStorePath(output.second) << std::endl; std::cout << store->printStorePath(output.second) << std::endl;
} }
}, },
}, buildable.raw()); }, buildable.path.raw());
} }
} }
updateProfile(buildables); BuiltPaths buildables2;
for (auto & b : buildables)
buildables2.push_back(b.path);
updateProfile(buildables2);
} }
}; };

View file

@ -11,7 +11,7 @@ R""(
# Description # Description
This command runs the Nix daemon, which is a required component in This command runs the Nix daemon, which is a required component in
multi-user Nix installations. It performs build actions and other multi-user Nix installations. It runs build tasks and other
operations on the Nix store on behalf of non-root users. Usually you operations on the Nix store on behalf of non-root users. Usually you
don't run the daemon directly; instead it's managed by a service don't run the daemon directly; instead it's managed by a service
management framework such as `systemd`. management framework such as `systemd`.

View file

@ -4,10 +4,11 @@
#include "store-api.hh" #include "store-api.hh"
#include "eval.hh" #include "eval.hh"
#include "eval-inline.hh" #include "eval-inline.hh"
#include "json.hh"
#include "value-to-json.hh" #include "value-to-json.hh"
#include "progress-bar.hh" #include "progress-bar.hh"
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
struct CmdEval : MixJSON, InstallableCommand struct CmdEval : MixJSON, InstallableCommand
@ -115,9 +116,7 @@ struct CmdEval : MixJSON, InstallableCommand
} }
else if (json) { else if (json) {
JSONPlaceholder jsonOut(std::cout); std::cout << printValueAsJSON(*state, true, *v, pos, context, false).dump() << std::endl;
printValueAsJSON(*state, true, *v, pos, jsonOut, context, false);
std::cout << std::endl;
} }
else { else {

View file

@ -11,7 +11,6 @@
#include "attr-path.hh" #include "attr-path.hh"
#include "fetchers.hh" #include "fetchers.hh"
#include "registry.hh" #include "registry.hh"
#include "json.hh"
#include "eval-cache.hh" #include "eval-cache.hh"
#include "markdown.hh" #include "markdown.hh"
@ -21,6 +20,7 @@
using namespace nix; using namespace nix;
using namespace nix::flake; using namespace nix::flake;
using json = nlohmann::json;
class FlakeCommand : virtual Args, public MixFlakeOptions class FlakeCommand : virtual Args, public MixFlakeOptions
{ {
@ -915,35 +915,44 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
{ {
auto flake = lockFlake(); auto flake = lockFlake();
auto jsonRoot = json ? std::optional<JSONObject>(std::cout) : std::nullopt;
StorePathSet sources; StorePathSet sources;
sources.insert(flake.flake.sourceInfo->storePath); sources.insert(flake.flake.sourceInfo->storePath);
if (jsonRoot)
jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath));
// FIXME: use graph output, handle cycles. // FIXME: use graph output, handle cycles.
std::function<void(const Node & node, std::optional<JSONObject> & jsonObj)> traverse; std::function<nlohmann::json(const Node & node)> traverse;
traverse = [&](const Node & node, std::optional<JSONObject> & jsonObj) traverse = [&](const Node & node)
{ {
auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>(); nlohmann::json jsonObj2 = json ? json::object() : nlohmann::json(nullptr);
for (auto & [inputName, input] : node.inputs) { for (auto & [inputName, input] : node.inputs) {
if (auto inputNode = std::get_if<0>(&input)) { if (auto inputNode = std::get_if<0>(&input)) {
auto jsonObj3 = jsonObj2 ? jsonObj2->object(inputName) : std::optional<JSONObject>();
auto storePath = auto storePath =
dryRun dryRun
? (*inputNode)->lockedRef.input.computeStorePath(*store) ? (*inputNode)->lockedRef.input.computeStorePath(*store)
: (*inputNode)->lockedRef.input.fetch(store).first.storePath; : (*inputNode)->lockedRef.input.fetch(store).first.storePath;
if (jsonObj3) if (json) {
jsonObj3->attr("path", store->printStorePath(storePath)); auto& jsonObj3 = jsonObj2[inputName];
sources.insert(std::move(storePath)); jsonObj3["path"] = store->printStorePath(storePath);
traverse(**inputNode, jsonObj3); sources.insert(std::move(storePath));
jsonObj3["inputs"] = traverse(**inputNode);
} else {
sources.insert(std::move(storePath));
traverse(**inputNode);
}
} }
} }
return jsonObj2;
}; };
traverse(*flake.lockFile.root, jsonRoot); if (json) {
nlohmann::json jsonRoot = {
{"path", store->printStorePath(flake.flake.sourceInfo->storePath)},
{"inputs", traverse(*flake.lockFile.root)},
};
std::cout << jsonRoot.dump() << std::endl;
} else {
traverse(*flake.lockFile.root);
}
if (!dryRun && !dstUri.empty()) { if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri); ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);

View file

@ -18,7 +18,7 @@ nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr
nix_LIBS = libexpr libmain libfetchers libstore libutil libcmd nix_LIBS = libexpr libmain libfetchers libstore libutil libcmd
nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -llowdown nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) $(LOWDOWN_LIBS)
$(foreach name, \ $(foreach name, \
nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \ nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \

View file

@ -3,7 +3,7 @@
#include "fs-accessor.hh" #include "fs-accessor.hh"
#include "nar-accessor.hh" #include "nar-accessor.hh"
#include "common-args.hh" #include "common-args.hh"
#include "json.hh" #include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
@ -91,10 +91,9 @@ struct MixLs : virtual Args, MixJSON
if (path == "/") path = ""; if (path == "/") path = "";
if (json) { if (json) {
JSONPlaceholder jsonRoot(std::cout);
if (showDirectory) if (showDirectory)
throw UsageError("'--directory' is useless with '--json'"); throw UsageError("'--directory' is useless with '--json'");
listNar(jsonRoot, accessor, path, recursive); std::cout << listNar(accessor, path, recursive);
} else } else
listText(accessor); listText(accessor);
} }

View file

@ -53,7 +53,6 @@ static bool haveInternet()
} }
std::string programPath; std::string programPath;
char * * savedArgv;
struct HelpRequested { }; struct HelpRequested { };
@ -270,7 +269,7 @@ void mainWrapped(int argc, char * * argv)
programPath = argv[0]; programPath = argv[0];
auto programName = std::string(baseNameOf(programPath)); auto programName = std::string(baseNameOf(programPath));
if (argc > 0 && std::string_view(argv[0]) == "__build-remote") { if (argc > 1 && std::string_view(argv[1]) == "__build-remote") {
programName = "build-remote"; programName = "build-remote";
argv++; argc--; argv++; argc--;
} }

View file

@ -2,10 +2,13 @@
#include "store-api.hh" #include "store-api.hh"
#include "make-content-addressed.hh" #include "make-content-addressed.hh"
#include "common-args.hh" #include "common-args.hh"
#include "json.hh"
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
using nlohmann::json;
struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, MixJSON struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, MixJSON
{ {
CmdMakeContentAddressed() CmdMakeContentAddressed()
@ -25,6 +28,7 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
; ;
} }
using StorePathsCommand::run;
void run(ref<Store> srcStore, StorePaths && storePaths) override void run(ref<Store> srcStore, StorePaths && storePaths) override
{ {
auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri); auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
@ -33,13 +37,15 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
StorePathSet(storePaths.begin(), storePaths.end())); StorePathSet(storePaths.begin(), storePaths.end()));
if (json) { if (json) {
JSONObject jsonRoot(std::cout); auto jsonRewrites = json::object();
JSONObject jsonRewrites(jsonRoot.object("rewrites"));
for (auto & path : storePaths) { for (auto & path : storePaths) {
auto i = remappings.find(path); auto i = remappings.find(path);
assert(i != remappings.end()); assert(i != remappings.end());
jsonRewrites.attr(srcStore->printStorePath(path), srcStore->printStorePath(i->second)); jsonRewrites[srcStore->printStorePath(path)] = srcStore->printStorePath(i->second);
} }
auto json = json::object();
json["rewrites"] = jsonRewrites;
std::cout << json.dump();
} else { } else {
for (auto & path : storePaths) { for (auto & path : storePaths) {
auto i = remappings.find(path); auto i = remappings.find(path);

View file

@ -1,12 +1,13 @@
#include "command.hh" #include "command.hh"
#include "shared.hh" #include "shared.hh"
#include "store-api.hh" #include "store-api.hh"
#include "json.hh"
#include "common-args.hh" #include "common-args.hh"
#include <algorithm> #include <algorithm>
#include <array> #include <array>
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
struct CmdPathInfo : StorePathsCommand, MixJSON struct CmdPathInfo : StorePathsCommand, MixJSON
@ -86,11 +87,10 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
pathLen = std::max(pathLen, store->printStorePath(storePath).size()); pathLen = std::max(pathLen, store->printStorePath(storePath).size());
if (json) { if (json) {
JSONPlaceholder jsonRoot(std::cout); std::cout << store->pathInfoToJSON(
store->pathInfoToJSON(jsonRoot,
// FIXME: preserve order? // FIXME: preserve order?
StorePathSet(storePaths.begin(), storePaths.end()), StorePathSet(storePaths.begin(), storePaths.end()),
true, showClosureSize, SRI, AllowInvalid); true, showClosureSize, SRI, AllowInvalid).dump();
} }
else { else {

View file

@ -253,11 +253,11 @@ struct ProfileManifest
static std::map<Installable *, BuiltPaths> static std::map<Installable *, BuiltPaths>
builtPathsPerInstallable( builtPathsPerInstallable(
const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> & builtPaths) const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> & builtPaths)
{ {
std::map<Installable *, BuiltPaths> res; std::map<Installable *, BuiltPaths> res;
for (auto & [installable, builtPath] : builtPaths) for (auto & [installable, builtPath] : builtPaths)
res[installable.get()].push_back(builtPath); res[installable.get()].push_back(builtPath.path);
return res; return res;
} }

View file

@ -5,7 +5,6 @@
#include "names.hh" #include "names.hh"
#include "get-drvs.hh" #include "get-drvs.hh"
#include "common-args.hh" #include "common-args.hh"
#include "json.hh"
#include "shared.hh" #include "shared.hh"
#include "eval-cache.hh" #include "eval-cache.hh"
#include "attr-path.hh" #include "attr-path.hh"
@ -13,8 +12,10 @@
#include <regex> #include <regex>
#include <fstream> #include <fstream>
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
using json = nlohmann::json;
std::string wrap(std::string prefix, std::string s) std::string wrap(std::string prefix, std::string s)
{ {
@ -106,7 +107,8 @@ struct CmdSearch : SourceExprCommand, MixJSON
auto state = getEvalState(); auto state = getEvalState();
auto jsonOut = json ? std::make_unique<JSONObject>(std::cout) : nullptr; std::optional<nlohmann::json> jsonOut;
if (json) jsonOut = json::object();
uint64_t results = 0; uint64_t results = 0;
@ -173,10 +175,11 @@ struct CmdSearch : SourceExprCommand, MixJSON
{ {
results++; results++;
if (json) { if (json) {
auto jsonElem = jsonOut->object(attrPath2); (*jsonOut)[attrPath2] = {
jsonElem.attr("pname", name.name); {"pname", name.name},
jsonElem.attr("version", name.version); {"version", name.version},
jsonElem.attr("description", description); {"description", description},
};
} else { } else {
auto name2 = hiliteMatches(name.name, nameMatches, ANSI_GREEN, "\e[0;2m"); auto name2 = hiliteMatches(name.name, nameMatches, ANSI_GREEN, "\e[0;2m");
if (results > 1) logger->cout(""); if (results > 1) logger->cout("");
@ -215,6 +218,10 @@ struct CmdSearch : SourceExprCommand, MixJSON
for (auto & cursor : installable->getCursors(*state)) for (auto & cursor : installable->getCursors(*state))
visit(*cursor, cursor->getAttrPath(), true); visit(*cursor, cursor->getAttrPath(), true);
if (json) {
std::cout << jsonOut->dump() << std::endl;
}
if (!json && !results) if (!json && !results)
throw Error("no results for the given search term(s)!"); throw Error("no results for the given search term(s)!");
} }

View file

@ -5,10 +5,11 @@
#include "common-args.hh" #include "common-args.hh"
#include "store-api.hh" #include "store-api.hh"
#include "archive.hh" #include "archive.hh"
#include "json.hh"
#include "derivations.hh" #include "derivations.hh"
#include <nlohmann/json.hpp>
using namespace nix; using namespace nix;
using json = nlohmann::json;
struct CmdShowDerivation : InstallablesCommand struct CmdShowDerivation : InstallablesCommand
{ {
@ -48,77 +49,63 @@ struct CmdShowDerivation : InstallablesCommand
drvPaths = std::move(closure); drvPaths = std::move(closure);
} }
{ json jsonRoot = json::object();
JSONObject jsonRoot(std::cout, true);
for (auto & drvPath : drvPaths) { for (auto & drvPath : drvPaths) {
if (!drvPath.isDerivation()) continue; if (!drvPath.isDerivation()) continue;
auto drvObj(jsonRoot.object(store->printStorePath(drvPath))); json& drvObj = jsonRoot[store->printStorePath(drvPath)];
auto drv = store->readDerivation(drvPath); auto drv = store->readDerivation(drvPath);
{ {
auto outputsObj(drvObj.object("outputs")); json& outputsObj = drvObj["outputs"];
outputsObj = json::object();
for (auto & [_outputName, output] : drv.outputs) { for (auto & [_outputName, output] : drv.outputs) {
auto & outputName = _outputName; // work around clang bug auto & outputName = _outputName; // work around clang bug
auto outputObj { outputsObj.object(outputName) }; auto& outputObj = outputsObj[outputName];
outputObj = json::object();
std::visit(overloaded { std::visit(overloaded {
[&](const DerivationOutput::InputAddressed & doi) { [&](const DerivationOutput::InputAddressed & doi) {
outputObj.attr("path", store->printStorePath(doi.path)); outputObj["path"] = store->printStorePath(doi.path);
}, },
[&](const DerivationOutput::CAFixed & dof) { [&](const DerivationOutput::CAFixed & dof) {
outputObj.attr("path", store->printStorePath(dof.path(*store, drv.name, outputName))); outputObj["path"] = store->printStorePath(dof.path(*store, drv.name, outputName));
outputObj.attr("hashAlgo", dof.hash.printMethodAlgo()); outputObj["hashAlgo"] = dof.hash.printMethodAlgo();
outputObj.attr("hash", dof.hash.hash.to_string(Base16, false)); outputObj["hash"] = dof.hash.hash.to_string(Base16, false);
}, },
[&](const DerivationOutput::CAFloating & dof) { [&](const DerivationOutput::CAFloating & dof) {
outputObj.attr("hashAlgo", makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType)); outputObj["hashAlgo"] = makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType);
}, },
[&](const DerivationOutput::Deferred &) {}, [&](const DerivationOutput::Deferred &) {},
[&](const DerivationOutput::Impure & doi) { [&](const DerivationOutput::Impure & doi) {
outputObj.attr("hashAlgo", makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType)); outputObj["hashAlgo"] = makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType);
outputObj.attr("impure", true); outputObj["impure"] = true;
}, },
}, output.raw()); }, output.raw());
} }
} }
{ {
auto inputsList(drvObj.list("inputSrcs")); auto& inputsList = drvObj["inputSrcs"];
inputsList = json::array();
for (auto & input : drv.inputSrcs) for (auto & input : drv.inputSrcs)
inputsList.elem(store->printStorePath(input)); inputsList.emplace_back(store->printStorePath(input));
} }
{ {
auto inputDrvsObj(drvObj.object("inputDrvs")); auto& inputDrvsObj = drvObj["inputDrvs"];
for (auto & input : drv.inputDrvs) { inputDrvsObj = json::object();
auto inputList(inputDrvsObj.list(store->printStorePath(input.first))); for (auto & input : drv.inputDrvs)
for (auto & outputId : input.second) inputDrvsObj[store->printStorePath(input.first)] = input.second;
inputList.elem(outputId);
}
} }
drvObj.attr("system", drv.platform); drvObj["system"] = drv.platform;
drvObj.attr("builder", drv.builder); drvObj["builder"] = drv.builder;
drvObj["args"] = drv.args;
{ drvObj["env"] = drv.env;
auto argsList(drvObj.list("args"));
for (auto & arg : drv.args)
argsList.elem(arg);
}
{
auto envObj(drvObj.object("env"));
for (auto & var : drv.env)
envObj.attr(var.first, var.second);
}
} }
std::cout << jsonRoot.dump(2) << std::endl;
}
std::cout << "\n";
} }
}; };

View file

@ -83,20 +83,47 @@ struct CmdWhyDepends : SourceExprCommand
{ {
auto package = parseInstallable(store, _package); auto package = parseInstallable(store, _package);
auto packagePath = Installable::toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, package); auto packagePath = Installable::toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, package);
/* We don't need to build `dependency`. We try to get the store
* path if it's already known, and if not, then it's not a dependency.
*
* Why? If `package` does depends on `dependency`, then getting the
* store path of `package` above necessitated having the store path
* of `dependency`. The contrapositive is, if the store path of
* `dependency` is not already known at this point (i.e. it's a CA
* derivation which hasn't been built), then `package` did not need it
* to build.
*/
auto dependency = parseInstallable(store, _dependency); auto dependency = parseInstallable(store, _dependency);
auto dependencyPath = Installable::toStorePath(getEvalStore(), store, Realise::Derivation, operateOn, dependency); auto derivedDependency = dependency->toDerivedPath();
auto dependencyPathHash = dependencyPath.hashPart(); auto optDependencyPath = std::visit(overloaded {
[](const DerivedPath::Opaque & nodrv) -> std::optional<StorePath> {
return { nodrv.path };
},
[&](const DerivedPath::Built & hasdrv) -> std::optional<StorePath> {
if (hasdrv.outputs.size() != 1) {
throw Error("argument '%s' should evaluate to one store path", dependency->what());
}
auto outputMap = store->queryPartialDerivationOutputMap(hasdrv.drvPath);
auto maybePath = outputMap.find(*hasdrv.outputs.begin());
if (maybePath == outputMap.end()) {
throw Error("unexpected end of iterator");
}
return maybePath->second;
},
}, derivedDependency.raw());
StorePathSet closure; StorePathSet closure;
store->computeFSClosure({packagePath}, closure, false, false); store->computeFSClosure({packagePath}, closure, false, false);
if (!closure.count(dependencyPath)) { if (!optDependencyPath.has_value() || !closure.count(*optDependencyPath)) {
printError("'%s' does not depend on '%s'", printError("'%s' does not depend on '%s'", package->what(), dependency->what());
store->printStorePath(packagePath),
store->printStorePath(dependencyPath));
return; return;
} }
auto dependencyPath = *optDependencyPath;
auto dependencyPathHash = dependencyPath.hashPart();
stopProgressBar(); // FIXME stopProgressBar(); // FIXME
auto accessor = store->getFSAccessor(); auto accessor = store->getFSAccessor();

View file

@ -70,3 +70,54 @@ testNormalization () {
} }
testNormalization testNormalization
# https://github.com/NixOS/nix/issues/6572
issue_6572_independent_outputs() {
nix build -f multiple-outputs.nix --json independent --no-link > $TEST_ROOT/independent.json
# Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
nix-store --delete "$p" # Clean up for next test
# Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
nix-store --delete "$(jq -r <$TEST_ROOT/independent.json .[0].outputs.first)"
p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
cmp $p <<EOF
first
second
EOF
nix-store --delete "$p" # Clean up for next test
# Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
nix-store --delete "$(jq -r <$TEST_ROOT/independent.json .[0].outputs.second)"
p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
cmp $p <<EOF
first
second
EOF
nix-store --delete "$p" # Clean up for next test
}
issue_6572_independent_outputs
# https://github.com/NixOS/nix/issues/6572
issue_6572_dependent_outputs() {
nix build -f multiple-outputs.nix --json a --no-link > $TEST_ROOT/a.json
# # Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
nix-store --delete "$p" # Clean up for next test
# Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
nix-store --delete "$(jq -r <$TEST_ROOT/a.json .[0].outputs.second)"
p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
cmp $p <<EOF
first
second
EOF
nix-store --delete "$p" # Clean up for next test
}
if isDaemonNewer "2.12pre0"; then
issue_6572_dependent_outputs
fi

5
tests/ca/why-depends.sh Normal file
View file

@ -0,0 +1,5 @@
source common.sh
export NIX_TESTS_CA_BY_DEFAULT=1
cd .. && source why-depends.sh

View file

@ -28,6 +28,10 @@ cat <<EOF > bar/flake.nix
}; };
} }
EOF EOF
mkdir -p err
cat <<EOF > err/flake.nix
throw "error"
EOF
# Test the completion of a subcommand # Test the completion of a subcommand
[[ "$(NIX_GET_COMPLETIONS=1 nix buil)" == $'normal\nbuild\t' ]] [[ "$(NIX_GET_COMPLETIONS=1 nix buil)" == $'normal\nbuild\t' ]]
@ -60,3 +64,5 @@ NIX_GET_COMPLETIONS=3 nix build --option allow-import-from | grep -- "allow-impo
# Attr path completions # Attr path completions
[[ "$(NIX_GET_COMPLETIONS=2 nix eval ./foo\#sam)" == $'attrs\n./foo#sampleOutput\t' ]] [[ "$(NIX_GET_COMPLETIONS=2 nix eval ./foo\#sam)" == $'attrs\n./foo#sampleOutput\t' ]]
[[ "$(NIX_GET_COMPLETIONS=4 nix eval --file ./foo/flake.nix outp)" == $'attrs\noutputs\t' ]] [[ "$(NIX_GET_COMPLETIONS=4 nix eval --file ./foo/flake.nix outp)" == $'attrs\noutputs\t' ]]
[[ "$(NIX_GET_COMPLETIONS=4 nix eval --file ./err/flake.nix outp 2>&1)" == $'attrs' ]]
[[ "$(NIX_GET_COMPLETIONS=2 nix eval ./err\# 2>&1)" == $'attrs' ]]

68
tests/containers.nix Normal file
View file

@ -0,0 +1,68 @@
# Test whether we can run a NixOS container inside a Nix build using systemd-nspawn.
{ nixpkgs, system, overlay }:
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
inherit system;
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
};
makeTest ({
name = "containers";
nodes =
{
host =
{ config, lib, pkgs, nodes, ... }:
{ virtualisation.writableStore = true;
virtualisation.diskSize = 2048;
virtualisation.additionalPaths =
[ pkgs.stdenv
(import ./systemd-nspawn.nix { inherit nixpkgs; }).toplevel
];
virtualisation.memorySize = 4096;
nix.binaryCaches = lib.mkForce [ ];
nix.extraOptions =
''
extra-experimental-features = nix-command auto-allocate-uids cgroups
extra-system-features = uid-range
'';
nix.nixPath = [ "nixpkgs=${nixpkgs}" ];
};
};
testScript = { nodes }: ''
start_all()
host.succeed("nix --version >&2")
# Test that 'id' gives the expected result in various configurations.
# Existing UIDs, sandbox.
host.succeed("nix build --no-auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-1")
host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]")
# Existing UIDs, no sandbox.
host.succeed("nix build --no-auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-2")
host.succeed("[[ $(cat ./result) = 'uid=30001(nixbld1) gid=30000(nixbld) groups=30000(nixbld)' ]]")
# Auto-allocated UIDs, sandbox.
host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-3")
host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]")
# Auto-allocated UIDs, no sandbox.
host.succeed("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-4")
host.succeed("[[ $(cat ./result) = 'uid=872415232 gid=30000(nixbld) groups=30000(nixbld)' ]]")
# Auto-allocated UIDs, UID range, sandbox.
host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-5 --arg uidRange true")
host.succeed("[[ $(cat ./result) = 'uid=0(root) gid=0(root) groups=0(root)' ]]")
# Auto-allocated UIDs, UID range, no sandbox.
host.fail("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-6 --arg uidRange true")
# Run systemd-nspawn in a Nix build.
host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./systemd-nspawn.nix} --argstr nixpkgs ${nixpkgs}")
host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]")
'';
})

View file

@ -1,7 +1,6 @@
source common.sh source common.sh
enableFeatures "fetch-closure" enableFeatures "fetch-closure"
needLocalStore "'--no-require-sigs' cant be used with the daemon"
clearStore clearStore
clearCacheCache clearCacheCache
@ -28,15 +27,19 @@ clearStore
[ ! -e $nonCaPath ] [ ! -e $nonCaPath ]
[ -e $caPath ] [ -e $caPath ]
# In impure mode, we can use non-CA paths. if [[ "$NIX_REMOTE" != "daemon" ]]; then
[[ $(nix eval --raw --no-require-sigs --impure --expr "
builtins.fetchClosure {
fromStore = \"file://$cacheDir\";
fromPath = $nonCaPath;
}
") = $nonCaPath ]]
[ -e $nonCaPath ] # In impure mode, we can use non-CA paths.
[[ $(nix eval --raw --no-require-sigs --impure --expr "
builtins.fetchClosure {
fromStore = \"file://$cacheDir\";
fromPath = $nonCaPath;
}
") = $nonCaPath ]]
[ -e $nonCaPath ]
fi
# 'toPath' set to empty string should fail but print the expected path. # 'toPath' set to empty string should fail but print the expected path.
nix eval -v --json --expr " nix eval -v --json --expr "

View file

@ -24,12 +24,14 @@ touch $repo/.gitignore
git -C $repo add hello .gitignore git -C $repo add hello .gitignore
git -C $repo commit -m 'Bla1' git -C $repo commit -m 'Bla1'
rev1=$(git -C $repo rev-parse HEAD) rev1=$(git -C $repo rev-parse HEAD)
git -C $repo tag -a tag1 -m tag1
echo world > $repo/hello echo world > $repo/hello
git -C $repo commit -m 'Bla2' -a git -C $repo commit -m 'Bla2' -a
git -C $repo worktree add $TEST_ROOT/worktree git -C $repo worktree add $TEST_ROOT/worktree
echo hello >> $TEST_ROOT/worktree/hello echo hello >> $TEST_ROOT/worktree/hello
rev2=$(git -C $repo rev-parse HEAD) rev2=$(git -C $repo rev-parse HEAD)
git -C $repo tag -a tag2 -m tag2
# Fetch a worktree # Fetch a worktree
unset _NIX_FORCE_HTTP unset _NIX_FORCE_HTTP
@ -217,6 +219,16 @@ rev4_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$
path9=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; name = \"foo\"; }).outPath") path9=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; name = \"foo\"; }).outPath")
[[ $path9 =~ -foo$ ]] [[ $path9 =~ -foo$ ]]
# Specifying a ref without a rev shouldn't pick a cached rev for a different ref
export _NIX_FORCE_HTTP=1
rev_tag1_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"refs/tags/tag1\"; }).rev")
rev_tag1=$(git -C $repo rev-parse refs/tags/tag1)
[[ $rev_tag1_nix = $rev_tag1 ]]
rev_tag2_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"refs/tags/tag2\"; }).rev")
rev_tag2=$(git -C $repo rev-parse refs/tags/tag2)
[[ $rev_tag2_nix = $rev_tag2 ]]
unset _NIX_FORCE_HTTP
# should fail if there is no repo # should fail if there is no repo
rm -rf $repo/.git rm -rf $repo/.git
(! nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") (! nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath")

View file

@ -7,7 +7,7 @@ with import (nixpkgs + "/nixos/lib/testing-python.nix") {
let let
# Generate a fake root CA and a fake api.github.com / channels.nixos.org certificate. # Generate a fake root CA and a fake api.github.com / github.com / channels.nixos.org certificate.
cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; } cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; }
'' ''
mkdir -p $out mkdir -p $out
@ -18,7 +18,7 @@ let
openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \ openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \
-subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr -subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr
openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:channels.nixos.org") \ openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org") \
-days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt -days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt
''; '';
@ -37,6 +37,17 @@ let
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs" "repo": "nixpkgs"
} }
},
{
"from": {
"type": "indirect",
"id": "private-flake"
},
"to": {
"type": "github",
"owner": "fancy-enterprise",
"repo": "private-flake"
}
} }
], ],
"version": 2 "version": 2
@ -45,20 +56,40 @@ let
destination = "/flake-registry.json"; destination = "/flake-registry.json";
}; };
api = pkgs.runCommand "nixpkgs-flake" {} private-flake-rev = "9f1dd0df5b54a7dc75b618034482ed42ce34383d";
private-flake-api = pkgs.runCommand "private-flake" {}
'' ''
mkdir -p $out/tarball mkdir -p $out/{commits,tarball}
# Setup https://docs.github.com/en/rest/commits/commits#get-a-commit
echo '{"sha": "${private-flake-rev}"}' > $out/commits/HEAD
# Setup tarball download via API
dir=private-flake
mkdir $dir
echo '{ outputs = {...}: {}; }' > $dir/flake.nix
tar cfz $out/tarball/${private-flake-rev} $dir --hard-dereference
'';
nixpkgs-api = pkgs.runCommand "nixpkgs-flake" {}
''
mkdir -p $out/commits
# Setup https://docs.github.com/en/rest/commits/commits#get-a-commit
echo '{"sha": "${nixpkgs.rev}"}' > $out/commits/HEAD
'';
archive = pkgs.runCommand "nixpkgs-flake" {}
''
mkdir -p $out/archive
dir=NixOS-nixpkgs-${nixpkgs.shortRev} dir=NixOS-nixpkgs-${nixpkgs.shortRev}
cp -prd ${nixpkgs} $dir cp -prd ${nixpkgs} $dir
# Set the correct timestamp in the tarball. # Set the correct timestamp in the tarball.
find $dir -print0 | xargs -0 touch -t ${builtins.substring 0 12 nixpkgs.lastModifiedDate}.${builtins.substring 12 2 nixpkgs.lastModifiedDate} -- find $dir -print0 | xargs -0 touch -t ${builtins.substring 0 12 nixpkgs.lastModifiedDate}.${builtins.substring 12 2 nixpkgs.lastModifiedDate} --
tar cfz $out/tarball/${nixpkgs.rev} $dir --hard-dereference tar cfz $out/archive/${nixpkgs.rev}.tar.gz $dir --hard-dereference
mkdir -p $out/commits
echo '{"sha": "${nixpkgs.rev}"}' > $out/commits/HEAD
''; '';
in in
makeTest ( makeTest (
@ -93,7 +124,20 @@ makeTest (
sslServerCert = "${cert}/server.crt"; sslServerCert = "${cert}/server.crt";
servedDirs = servedDirs =
[ { urlPath = "/repos/NixOS/nixpkgs"; [ { urlPath = "/repos/NixOS/nixpkgs";
dir = api; dir = nixpkgs-api;
}
{ urlPath = "/repos/fancy-enterprise/private-flake";
dir = private-flake-api;
}
];
};
services.httpd.virtualHosts."github.com" =
{ forceSSL = true;
sslServerKey = "${cert}/server.key";
sslServerCert = "${cert}/server.crt";
servedDirs =
[ { urlPath = "/NixOS/nixpkgs";
dir = archive;
} }
]; ];
}; };
@ -107,9 +151,8 @@ makeTest (
virtualisation.memorySize = 4096; virtualisation.memorySize = 4096;
nix.binaryCaches = lib.mkForce [ ]; nix.binaryCaches = lib.mkForce [ ];
nix.extraOptions = "experimental-features = nix-command flakes"; nix.extraOptions = "experimental-features = nix-command flakes";
environment.systemPackages = [ pkgs.jq ];
networking.hosts.${(builtins.head nodes.github.config.networking.interfaces.eth1.ipv4.addresses).address} = networking.hosts.${(builtins.head nodes.github.config.networking.interfaces.eth1.ipv4.addresses).address} =
[ "channels.nixos.org" "api.github.com" ]; [ "channels.nixos.org" "api.github.com" "github.com" ];
security.pki.certificateFiles = [ "${cert}/ca.crt" ]; security.pki.certificateFiles = [ "${cert}/ca.crt" ];
}; };
}; };
@ -121,22 +164,39 @@ makeTest (
start_all() start_all()
def cat_log():
github.succeed("cat /var/log/httpd/*.log >&2")
github.wait_for_unit("httpd.service") github.wait_for_unit("httpd.service")
client.succeed("curl -v https://api.github.com/ >&2") client.succeed("curl -v https://github.com/ >&2")
client.succeed("nix registry list | grep nixpkgs") out = client.succeed("nix registry list")
print(out)
assert "github:NixOS/nixpkgs" in out, "nixpkgs flake not found"
assert "github:fancy-enterprise/private-flake" in out, "private flake not found"
cat_log()
rev = client.succeed("nix flake info nixpkgs --json | jq -r .revision") # If no github access token is provided, nix should use the public archive url...
assert rev.strip() == "${nixpkgs.rev}", "revision mismatch" out = client.succeed("nix flake metadata nixpkgs --json")
print(out)
info = json.loads(out)
assert info["revision"] == "${nixpkgs.rev}", f"revision mismatch: {info['revision']} != ${nixpkgs.rev}"
cat_log()
# ... otherwise it should use the API
out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")
print(out)
info = json.loads(out)
assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}"
cat_log()
client.succeed("nix registry pin nixpkgs") client.succeed("nix registry pin nixpkgs")
client.succeed("nix flake metadata nixpkgs --tarball-ttl 0 >&2")
client.succeed("nix flake info nixpkgs --tarball-ttl 0 >&2")
# Shut down the web server. The flake should be cached on the client. # Shut down the web server. The flake should be cached on the client.
github.succeed("systemctl stop httpd.service") github.succeed("systemctl stop httpd.service")
info = json.loads(client.succeed("nix flake info nixpkgs --json")) info = json.loads(client.succeed("nix flake metadata nixpkgs --json"))
date = time.strftime("%Y%m%d%H%M%S", time.gmtime(info['lastModified'])) date = time.strftime("%Y%m%d%H%M%S", time.gmtime(info['lastModified']))
assert date == "${nixpkgs.lastModifiedDate}", "time mismatch" assert date == "${nixpkgs.lastModifiedDate}", "time mismatch"

8
tests/id-test.nix Normal file
View file

@ -0,0 +1,8 @@
{ name, uidRange ? false }:
with import <nixpkgs> {};
runCommand name
{ requiredSystemFeatures = if uidRange then ["uid-range"] else [];
}
"id; id > $out"

View file

@ -2,7 +2,7 @@ source common.sh
requireDaemonNewerThan "2.8pre20220311" requireDaemonNewerThan "2.8pre20220311"
enableFeatures "ca-derivations ca-references impure-derivations" enableFeatures "ca-derivations impure-derivations"
restartDaemon restartDaemon
set -o pipefail set -o pipefail

View file

@ -0,0 +1 @@
true

View file

@ -1 +0,0 @@
Bool(True)

View file

@ -31,6 +31,15 @@ rec {
helloString = "Hello, world!"; helloString = "Hello, world!";
}; };
use-a = mkDerivation {
name = "use-a";
inherit (a) first second;
builder = builtins.toFile "builder.sh"
''
cat $first/file $second/file >$out
'';
};
b = mkDerivation { b = mkDerivation {
defaultOutput = assert a.second.helloString == "Hello, world!"; a; defaultOutput = assert a.second.helloString == "Hello, world!"; a;
firstOutput = assert a.outputName == "first"; a.first.first; firstOutput = assert a.outputName == "first"; a.first.first;
@ -87,4 +96,25 @@ rec {
buildCommand = "mkdir $a $b $c"; buildCommand = "mkdir $a $b $c";
}; };
independent = mkDerivation {
name = "multiple-outputs-independent";
outputs = [ "first" "second" ];
builder = builtins.toFile "builder.sh"
''
mkdir $first $second
test -z $all
echo "first" > $first/file
echo "second" > $second/file
'';
};
use-independent = mkDerivation {
name = "use-independent";
inherit (a) first second;
builder = builtins.toFile "builder.sh"
''
cat $first/file $second/file >$out
'';
};
} }

View file

@ -1 +0,0 @@
echo "$input" > $out

Some files were not shown because too many files have changed in this diff Show more