Merge remote-tracking branch 'nixos/master'

This commit is contained in:
Max Headroom 2022-12-01 17:52:54 +01:00
commit e95428e16a
102 changed files with 2714 additions and 1615 deletions

5
.github/assign-by-files.yml vendored Normal file
View file

@ -0,0 +1,5 @@
---
# This files is used by https://github.com/marketplace/actions/auto-assign-reviewer-by-files
# to assign maintainers
"doc/**/*":
- fricklerhandwerk

12
.github/workflows/assign-reviewer.yml vendored Normal file
View file

@ -0,0 +1,12 @@
name: "Auto Assign"
on:
- pull_request
jobs:
assign_reviewer:
runs-on: ubuntu-latest
steps:
- uses: shufo/auto-assign-reviewer-by-files@v1.1.4
with:
config: ".github/assign-by-files.yml"
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
fetch-depth: 0
- name: Create backport PRs
# should be kept in sync with `version`
uses: zeebe-io/backport-action@v0.0.8
uses: zeebe-io/backport-action@v0.0.9
with:
# Config README: https://github.com/zeebe-io/backport-action#backport-action
github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
fetch-depth: 0
- uses: cachix/install-nix-action@v18
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
- uses: cachix/cachix-action@v12
if: needs.check_secrets.outputs.cachix == 'true'
with:
name: '${{ env.CACHIX_NAME }}'
@ -59,7 +59,7 @@ jobs:
fetch-depth: 0
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- uses: cachix/install-nix-action@v18
- uses: cachix/cachix-action@v10
- uses: cachix/cachix-action@v12
with:
name: '${{ env.CACHIX_NAME }}'
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
@ -105,7 +105,7 @@ jobs:
- uses: cachix/install-nix-action@v18
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
- uses: cachix/cachix-action@v12
if: needs.check_secrets.outputs.cachix == 'true'
with:
name: '${{ env.CACHIX_NAME }}'

View file

@ -1,16 +1,20 @@
with builtins;
with import ./utils.nix;
builtinsDump:
let
showBuiltin = name:
let
inherit (builtinsDump.${name}) doc args;
in
''
<dt id="builtins-${name}">
<a href="#builtins-${name}"><code>${name} ${listArgs args}</code></a>
</dt>
<dd>
builtins:
${doc}
</dd>
'';
listArgs = args: builtins.concatStringsSep " " (map (s: "<var>${s}</var>") args);
in
with builtins; concatStringsSep "\n" (map showBuiltin (attrNames builtinsDump))
concatStrings (map
(name:
let builtin = builtins.${name}; in
"<dt id=\"builtins-${name}\"><a href=\"#builtins-${name}\"><code>${name} "
+ concatStringsSep " " (map (s: "<var>${s}</var>") builtin.args)
+ "</code></a></dt>"
+ "<dd>\n\n"
+ builtin.doc
+ "\n\n</dd>"
)
(attrNames builtins))

View file

@ -99,6 +99,7 @@ let
in [ cmd ] ++ concatMap subcommand (attrNames details.commands or {});
parsedToplevel = builtins.fromJSON toplevel;
manpages = processCommand {
command = "nix";
details = parsedToplevel;

View file

@ -11,16 +11,16 @@ concatStrings (map
+ concatStrings (map (s: " ${s}\n") (splitLines option.description)) + "\n\n"
+ (if option.documentDefault
then " **Default:** " + (
if option.value == "" || option.value == []
if option.defaultValue == "" || option.defaultValue == []
then "*empty*"
else if isBool option.value
then (if option.value then "`true`" else "`false`")
else if isBool option.defaultValue
then (if option.defaultValue then "`true`" else "`false`")
else
# n.b. a StringMap value type is specified as a string, but
# this shows the value type. The empty stringmap is "null" in
# JSON, but that converts to "{ }" here.
(if isAttrs option.value then "`\"\"`"
else "`" + toString option.value + "`")) + "\n\n"
(if isAttrs option.defaultValue then "`\"\"`"
else "`" + toString option.defaultValue + "`")) + "\n\n"
else " **Default:** *machine-specific*\n")
+ (if option.aliases != []
then " **Deprecated alias:** " + (concatStringsSep ", " (map (s: "`${s}`") option.aliases)) + "\n\n"

View file

@ -1,330 +1,424 @@
// Redirects from old DocBook manual.
var redirects = {
"#part-advanced-topics": "advanced-topics/advanced-topics.html",
"#chap-tuning-cores-and-jobs": "advanced-topics/cores-vs-jobs.html",
"#chap-diff-hook": "advanced-topics/diff-hook.html",
"#check-dirs-are-unregistered": "advanced-topics/diff-hook.html#check-dirs-are-unregistered",
"#chap-distributed-builds": "advanced-topics/distributed-builds.html",
"#chap-post-build-hook": "advanced-topics/post-build-hook.html",
"#chap-post-build-hook-caveats": "advanced-topics/post-build-hook.html#implementation-caveats",
"#part-command-ref": "command-ref/command-ref.html",
"#conf-allow-import-from-derivation": "command-ref/conf-file.html#conf-allow-import-from-derivation",
"#conf-allow-new-privileges": "command-ref/conf-file.html#conf-allow-new-privileges",
"#conf-allowed-uris": "command-ref/conf-file.html#conf-allowed-uris",
"#conf-allowed-users": "command-ref/conf-file.html#conf-allowed-users",
"#conf-auto-optimise-store": "command-ref/conf-file.html#conf-auto-optimise-store",
"#conf-binary-cache-public-keys": "command-ref/conf-file.html#conf-binary-cache-public-keys",
"#conf-binary-caches": "command-ref/conf-file.html#conf-binary-caches",
"#conf-build-compress-log": "command-ref/conf-file.html#conf-build-compress-log",
"#conf-build-cores": "command-ref/conf-file.html#conf-build-cores",
"#conf-build-extra-chroot-dirs": "command-ref/conf-file.html#conf-build-extra-chroot-dirs",
"#conf-build-extra-sandbox-paths": "command-ref/conf-file.html#conf-build-extra-sandbox-paths",
"#conf-build-fallback": "command-ref/conf-file.html#conf-build-fallback",
"#conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs",
"#conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size",
"#conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time",
"#conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat",
"#conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout",
"#conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot",
"#conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox",
"#conf-build-use-substitutes": "command-ref/conf-file.html#conf-build-use-substitutes",
"#conf-build-users-group": "command-ref/conf-file.html#conf-build-users-group",
"#conf-builders": "command-ref/conf-file.html#conf-builders",
"#conf-builders-use-substitutes": "command-ref/conf-file.html#conf-builders-use-substitutes",
"#conf-compress-build-log": "command-ref/conf-file.html#conf-compress-build-log",
"#conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout",
"#conf-cores": "command-ref/conf-file.html#conf-cores",
"#conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook",
"#conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism",
"#conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations",
"#conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches",
"#conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms",
"#conf-extra-sandbox-paths": "command-ref/conf-file.html#conf-extra-sandbox-paths",
"#conf-extra-substituters": "command-ref/conf-file.html#conf-extra-substituters",
"#conf-fallback": "command-ref/conf-file.html#conf-fallback",
"#conf-fsync-metadata": "command-ref/conf-file.html#conf-fsync-metadata",
"#conf-gc-keep-derivations": "command-ref/conf-file.html#conf-gc-keep-derivations",
"#conf-gc-keep-outputs": "command-ref/conf-file.html#conf-gc-keep-outputs",
"#conf-hashed-mirrors": "command-ref/conf-file.html#conf-hashed-mirrors",
"#conf-http-connections": "command-ref/conf-file.html#conf-http-connections",
"#conf-keep-build-log": "command-ref/conf-file.html#conf-keep-build-log",
"#conf-keep-derivations": "command-ref/conf-file.html#conf-keep-derivations",
"#conf-keep-env-derivations": "command-ref/conf-file.html#conf-keep-env-derivations",
"#conf-keep-outputs": "command-ref/conf-file.html#conf-keep-outputs",
"#conf-max-build-log-size": "command-ref/conf-file.html#conf-max-build-log-size",
"#conf-max-free": "command-ref/conf-file.html#conf-max-free",
"#conf-max-jobs": "command-ref/conf-file.html#conf-max-jobs",
"#conf-max-silent-time": "command-ref/conf-file.html#conf-max-silent-time",
"#conf-min-free": "command-ref/conf-file.html#conf-min-free",
"#conf-narinfo-cache-negative-ttl": "command-ref/conf-file.html#conf-narinfo-cache-negative-ttl",
"#conf-narinfo-cache-positive-ttl": "command-ref/conf-file.html#conf-narinfo-cache-positive-ttl",
"#conf-netrc-file": "command-ref/conf-file.html#conf-netrc-file",
"#conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files",
"#conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook",
"#conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook",
"#conf-repeat": "command-ref/conf-file.html#conf-repeat",
"#conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs",
"#conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval",
"#conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook",
"#conf-sandbox": "command-ref/conf-file.html#conf-sandbox",
"#conf-sandbox-dev-shm-size": "command-ref/conf-file.html#conf-sandbox-dev-shm-size",
"#conf-sandbox-paths": "command-ref/conf-file.html#conf-sandbox-paths",
"#conf-secret-key-files": "command-ref/conf-file.html#conf-secret-key-files",
"#conf-show-trace": "command-ref/conf-file.html#conf-show-trace",
"#conf-stalled-download-timeout": "command-ref/conf-file.html#conf-stalled-download-timeout",
"#conf-substitute": "command-ref/conf-file.html#conf-substitute",
"#conf-substituters": "command-ref/conf-file.html#conf-substituters",
"#conf-system": "command-ref/conf-file.html#conf-system",
"#conf-system-features": "command-ref/conf-file.html#conf-system-features",
"#conf-tarball-ttl": "command-ref/conf-file.html#conf-tarball-ttl",
"#conf-timeout": "command-ref/conf-file.html#conf-timeout",
"#conf-trace-function-calls": "command-ref/conf-file.html#conf-trace-function-calls",
"#conf-trusted-binary-caches": "command-ref/conf-file.html#conf-trusted-binary-caches",
"#conf-trusted-public-keys": "command-ref/conf-file.html#conf-trusted-public-keys",
"#conf-trusted-substituters": "command-ref/conf-file.html#conf-trusted-substituters",
"#conf-trusted-users": "command-ref/conf-file.html#conf-trusted-users",
"#extra-sandbox-paths": "command-ref/conf-file.html#extra-sandbox-paths",
"#sec-conf-file": "command-ref/conf-file.html",
"#env-NIX_PATH": "command-ref/env-common.html#env-NIX_PATH",
"#env-common": "command-ref/env-common.html",
"#envar-remote": "command-ref/env-common.html#env-NIX_REMOTE",
"#sec-common-env": "command-ref/env-common.html",
"#ch-files": "command-ref/files.html",
"#ch-main-commands": "command-ref/main-commands.html",
"#opt-out-link": "command-ref/nix-build.html#opt-out-link",
"#sec-nix-build": "command-ref/nix-build.html",
"#sec-nix-channel": "command-ref/nix-channel.html",
"#sec-nix-collect-garbage": "command-ref/nix-collect-garbage.html",
"#sec-nix-copy-closure": "command-ref/nix-copy-closure.html",
"#sec-nix-daemon": "command-ref/nix-daemon.html",
"#refsec-nix-env-install-examples": "command-ref/nix-env.html#examples",
"#rsec-nix-env-install": "command-ref/nix-env.html#operation---install",
"#rsec-nix-env-set": "command-ref/nix-env.html#operation---set",
"#rsec-nix-env-set-flag": "command-ref/nix-env.html#operation---set-flag",
"#rsec-nix-env-upgrade": "command-ref/nix-env.html#operation---upgrade",
"#sec-nix-env": "command-ref/nix-env.html",
"#ssec-version-comparisons": "command-ref/nix-env.html#versions",
"#sec-nix-hash": "command-ref/nix-hash.html",
"#sec-nix-instantiate": "command-ref/nix-instantiate.html",
"#sec-nix-prefetch-url": "command-ref/nix-prefetch-url.html",
"#sec-nix-shell": "command-ref/nix-shell.html",
"#ssec-nix-shell-shebang": "command-ref/nix-shell.html#use-as-a--interpreter",
"#nixref-queries": "command-ref/nix-store.html#queries",
"#opt-add-root": "command-ref/nix-store.html#opt-add-root",
"#refsec-nix-store-dump": "command-ref/nix-store.html#operation---dump",
"#refsec-nix-store-export": "command-ref/nix-store.html#operation---export",
"#refsec-nix-store-import": "command-ref/nix-store.html#operation---import",
"#refsec-nix-store-query": "command-ref/nix-store.html#operation---query",
"#refsec-nix-store-verify": "command-ref/nix-store.html#operation---verify",
"#rsec-nix-store-gc": "command-ref/nix-store.html#operation---gc",
"#rsec-nix-store-generate-binary-cache-key": "command-ref/nix-store.html#operation---generate-binary-cache-key",
"#rsec-nix-store-realise": "command-ref/nix-store.html#operation---realise",
"#rsec-nix-store-serve": "command-ref/nix-store.html#operation---serve",
"#sec-nix-store": "command-ref/nix-store.html",
"#opt-I": "command-ref/opt-common.html#opt-I",
"#opt-attr": "command-ref/opt-common.html#opt-attr",
"#opt-common": "command-ref/opt-common.html",
"#opt-cores": "command-ref/opt-common.html#opt-cores",
"#opt-log-format": "command-ref/opt-common.html#opt-log-format",
"#opt-max-jobs": "command-ref/opt-common.html#opt-max-jobs",
"#opt-max-silent-time": "command-ref/opt-common.html#opt-max-silent-time",
"#opt-timeout": "command-ref/opt-common.html#opt-timeout",
"#sec-common-options": "command-ref/opt-common.html",
"#ch-utilities": "command-ref/utilities.html",
"#chap-hacking": "contributing/hacking.html",
"#adv-attr-allowSubstitutes": "language/advanced-attributes.html#adv-attr-allowSubstitutes",
"#adv-attr-allowedReferences": "language/advanced-attributes.html#adv-attr-allowedReferences",
"#adv-attr-allowedRequisites": "language/advanced-attributes.html#adv-attr-allowedRequisites",
"#adv-attr-disallowedReferences": "language/advanced-attributes.html#adv-attr-disallowedReferences",
"#adv-attr-disallowedRequisites": "language/advanced-attributes.html#adv-attr-disallowedRequisites",
"#adv-attr-exportReferencesGraph": "language/advanced-attributes.html#adv-attr-exportReferencesGraph",
"#adv-attr-impureEnvVars": "language/advanced-attributes.html#adv-attr-impureEnvVars",
"#adv-attr-outputHash": "language/advanced-attributes.html#adv-attr-outputHash",
"#adv-attr-outputHashAlgo": "language/advanced-attributes.html#adv-attr-outputHashAlgo",
"#adv-attr-outputHashMode": "language/advanced-attributes.html#adv-attr-outputHashMode",
"#adv-attr-passAsFile": "language/advanced-attributes.html#adv-attr-passAsFile",
"#adv-attr-preferLocalBuild": "language/advanced-attributes.html#adv-attr-preferLocalBuild",
"#fixed-output-drvs": "language/advanced-attributes.html#adv-attr-outputHash",
"#sec-advanced-attributes": "language/advanced-attributes.html",
"#builtin-abort": "language/builtins.html#builtins-abort",
"#builtin-add": "language/builtins.html#builtins-add",
"#builtin-all": "language/builtins.html#builtins-all",
"#builtin-any": "language/builtins.html#builtins-any",
"#builtin-attrNames": "language/builtins.html#builtins-attrNames",
"#builtin-attrValues": "language/builtins.html#builtins-attrValues",
"#builtin-baseNameOf": "language/builtins.html#builtins-baseNameOf",
"#builtin-bitAnd": "language/builtins.html#builtins-bitAnd",
"#builtin-bitOr": "language/builtins.html#builtins-bitOr",
"#builtin-bitXor": "language/builtins.html#builtins-bitXor",
"#builtin-builtins": "language/builtins.html#builtins-builtins",
"#builtin-compareVersions": "language/builtins.html#builtins-compareVersions",
"#builtin-concatLists": "language/builtins.html#builtins-concatLists",
"#builtin-concatStringsSep": "language/builtins.html#builtins-concatStringsSep",
"#builtin-currentSystem": "language/builtins.html#builtins-currentSystem",
"#builtin-deepSeq": "language/builtins.html#builtins-deepSeq",
"#builtin-derivation": "language/builtins.html#builtins-derivation",
"#builtin-dirOf": "language/builtins.html#builtins-dirOf",
"#builtin-div": "language/builtins.html#builtins-div",
"#builtin-elem": "language/builtins.html#builtins-elem",
"#builtin-elemAt": "language/builtins.html#builtins-elemAt",
"#builtin-fetchGit": "language/builtins.html#builtins-fetchGit",
"#builtin-fetchTarball": "language/builtins.html#builtins-fetchTarball",
"#builtin-fetchurl": "language/builtins.html#builtins-fetchurl",
"#builtin-filterSource": "language/builtins.html#builtins-filterSource",
"#builtin-foldl-prime": "language/builtins.html#builtins-foldl-prime",
"#builtin-fromJSON": "language/builtins.html#builtins-fromJSON",
"#builtin-functionArgs": "language/builtins.html#builtins-functionArgs",
"#builtin-genList": "language/builtins.html#builtins-genList",
"#builtin-getAttr": "language/builtins.html#builtins-getAttr",
"#builtin-getEnv": "language/builtins.html#builtins-getEnv",
"#builtin-hasAttr": "language/builtins.html#builtins-hasAttr",
"#builtin-hashFile": "language/builtins.html#builtins-hashFile",
"#builtin-hashString": "language/builtins.html#builtins-hashString",
"#builtin-head": "language/builtins.html#builtins-head",
"#builtin-import": "language/builtins.html#builtins-import",
"#builtin-intersectAttrs": "language/builtins.html#builtins-intersectAttrs",
"#builtin-isAttrs": "language/builtins.html#builtins-isAttrs",
"#builtin-isBool": "language/builtins.html#builtins-isBool",
"#builtin-isFloat": "language/builtins.html#builtins-isFloat",
"#builtin-isFunction": "language/builtins.html#builtins-isFunction",
"#builtin-isInt": "language/builtins.html#builtins-isInt",
"#builtin-isList": "language/builtins.html#builtins-isList",
"#builtin-isNull": "language/builtins.html#builtins-isNull",
"#builtin-isString": "language/builtins.html#builtins-isString",
"#builtin-length": "language/builtins.html#builtins-length",
"#builtin-lessThan": "language/builtins.html#builtins-lessThan",
"#builtin-listToAttrs": "language/builtins.html#builtins-listToAttrs",
"#builtin-map": "language/builtins.html#builtins-map",
"#builtin-match": "language/builtins.html#builtins-match",
"#builtin-mul": "language/builtins.html#builtins-mul",
"#builtin-parseDrvName": "language/builtins.html#builtins-parseDrvName",
"#builtin-path": "language/builtins.html#builtins-path",
"#builtin-pathExists": "language/builtins.html#builtins-pathExists",
"#builtin-placeholder": "language/builtins.html#builtins-placeholder",
"#builtin-readDir": "language/builtins.html#builtins-readDir",
"#builtin-readFile": "language/builtins.html#builtins-readFile",
"#builtin-removeAttrs": "language/builtins.html#builtins-removeAttrs",
"#builtin-replaceStrings": "language/builtins.html#builtins-replaceStrings",
"#builtin-seq": "language/builtins.html#builtins-seq",
"#builtin-sort": "language/builtins.html#builtins-sort",
"#builtin-split": "language/builtins.html#builtins-split",
"#builtin-splitVersion": "language/builtins.html#builtins-splitVersion",
"#builtin-stringLength": "language/builtins.html#builtins-stringLength",
"#builtin-sub": "language/builtins.html#builtins-sub",
"#builtin-substring": "language/builtins.html#builtins-substring",
"#builtin-tail": "language/builtins.html#builtins-tail",
"#builtin-throw": "language/builtins.html#builtins-throw",
"#builtin-toFile": "language/builtins.html#builtins-toFile",
"#builtin-toJSON": "language/builtins.html#builtins-toJSON",
"#builtin-toPath": "language/builtins.html#builtins-toPath",
"#builtin-toString": "language/builtins.html#builtins-toString",
"#builtin-toXML": "language/builtins.html#builtins-toXML",
"#builtin-trace": "language/builtins.html#builtins-trace",
"#builtin-tryEval": "language/builtins.html#builtins-tryEval",
"#builtin-typeOf": "language/builtins.html#builtins-typeOf",
"#ssec-builtins": "language/builtins.html",
"#attr-system": "language/derivations.html#attr-system",
"#ssec-derivation": "language/derivations.html",
"#ch-expression-language": "language/index.html",
"#sec-constructs": "language/constructs.html",
"#sect-let-language": "language/constructs.html#let-language",
"#ss-functions": "language/constructs.html#functions",
"#sec-language-operators": "language/operators.html",
"#table-operators": "language/operators.html",
"#ssec-values": "language/values.html",
"#gloss-closure": "glossary.html#gloss-closure",
"#gloss-derivation": "glossary.html#gloss-derivation",
"#gloss-deriver": "glossary.html#gloss-deriver",
"#gloss-nar": "glossary.html#gloss-nar",
"#gloss-output-path": "glossary.html#gloss-output-path",
"#gloss-profile": "glossary.html#gloss-profile",
"#gloss-reachable": "glossary.html#gloss-reachable",
"#gloss-reference": "glossary.html#gloss-reference",
"#gloss-substitute": "glossary.html#gloss-substitute",
"#gloss-user-env": "glossary.html#gloss-user-env",
"#gloss-validity": "glossary.html#gloss-validity",
"#part-glossary": "glossary.html",
"#sec-building-source": "installation/building-source.html",
"#ch-env-variables": "installation/env-variables.html",
"#sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables",
"#sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file",
"#sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file-with-macos-and-the-nix-daemon",
"#chap-installation": "installation/installation.html",
"#ch-installing-binary": "installation/installing-binary.html",
"#sect-macos-installation": "installation/installing-binary.html#macos-installation",
"#sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation",
"#sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation",
"#sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation",
"#sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation",
"#sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation",
"#sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball",
"#sect-nix-install-pinned-version-url": "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url",
"#sect-single-user-installation": "installation/installing-binary.html#single-user-installation",
"#ch-installing-source": "installation/installing-source.html",
"#ssec-multi-user": "installation/multi-user.html",
"#ch-nix-security": "installation/nix-security.html",
"#sec-obtaining-source": "installation/obtaining-source.html",
"#sec-prerequisites-source": "installation/prerequisites-source.html",
"#sec-single-user": "installation/single-user.html",
"#ch-supported-platforms": "installation/supported-platforms.html",
"#ch-upgrading-nix": "installation/upgrading.html",
"#ch-about-nix": "introduction.html",
"#chap-introduction": "introduction.html",
"#ch-basic-package-mgmt": "package-management/basic-package-mgmt.html",
"#ssec-binary-cache-substituter": "package-management/binary-cache-substituter.html",
"#sec-channels": "package-management/channels.html",
"#ssec-copy-closure": "package-management/copy-closure.html",
"#sec-garbage-collection": "package-management/garbage-collection.html",
"#ssec-gc-roots": "package-management/garbage-collector-roots.html",
"#chap-package-management": "package-management/package-management.html",
"#sec-profiles": "package-management/profiles.html",
"#ssec-s3-substituter": "package-management/s3-substituter.html",
"#ssec-s3-substituter-anonymous-reads": "package-management/s3-substituter.html#anonymous-reads-to-your-s3-compatible-binary-cache",
"#ssec-s3-substituter-authenticated-reads": "package-management/s3-substituter.html#authenticated-reads-to-your-s3-binary-cache",
"#ssec-s3-substituter-authenticated-writes": "package-management/s3-substituter.html#authenticated-writes-to-your-s3-compatible-binary-cache",
"#sec-sharing-packages": "package-management/sharing-packages.html",
"#ssec-ssh-substituter": "package-management/ssh-substituter.html",
"#chap-quick-start": "quick-start.html",
"#sec-relnotes": "release-notes/release-notes.html",
"#ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html",
"#ch-relnotes-0.10": "release-notes/rl-0.10.html",
"#ssec-relnotes-0.11": "release-notes/rl-0.11.html",
"#ssec-relnotes-0.12": "release-notes/rl-0.12.html",
"#ssec-relnotes-0.13": "release-notes/rl-0.13.html",
"#ssec-relnotes-0.14": "release-notes/rl-0.14.html",
"#ssec-relnotes-0.15": "release-notes/rl-0.15.html",
"#ssec-relnotes-0.16": "release-notes/rl-0.16.html",
"#ch-relnotes-0.5": "release-notes/rl-0.5.html",
"#ch-relnotes-0.6": "release-notes/rl-0.6.html",
"#ch-relnotes-0.7": "release-notes/rl-0.7.html",
"#ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html",
"#ch-relnotes-0.8": "release-notes/rl-0.8.html",
"#ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html",
"#ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html",
"#ch-relnotes-0.9": "release-notes/rl-0.9.html",
"#ssec-relnotes-1.0": "release-notes/rl-1.0.html",
"#ssec-relnotes-1.1": "release-notes/rl-1.1.html",
"#ssec-relnotes-1.10": "release-notes/rl-1.10.html",
"#ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html",
"#ssec-relnotes-1.11": "release-notes/rl-1.11.html",
"#ssec-relnotes-1.2": "release-notes/rl-1.2.html",
"#ssec-relnotes-1.3": "release-notes/rl-1.3.html",
"#ssec-relnotes-1.4": "release-notes/rl-1.4.html",
"#ssec-relnotes-1.5.1": "release-notes/rl-1.5.1.html",
"#ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html",
"#ssec-relnotes-1.5": "release-notes/rl-1.5.html",
"#ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html",
"#ssec-relnotes-1.6.0": "release-notes/rl-1.6.html",
"#ssec-relnotes-1.7": "release-notes/rl-1.7.html",
"#ssec-relnotes-1.8": "release-notes/rl-1.8.html",
"#ssec-relnotes-1.9": "release-notes/rl-1.9.html",
"#ssec-relnotes-2.0": "release-notes/rl-2.0.html",
"#ssec-relnotes-2.1": "release-notes/rl-2.1.html",
"#ssec-relnotes-2.2": "release-notes/rl-2.2.html",
"#ssec-relnotes-2.3": "release-notes/rl-2.3.html"
// redirect rules for anchors ensure backwards compatibility of URLs.
// this must be done on the client side, as web servers do not see the anchor part of the URL.
// redirections are declared as follows:
// each entry has as its key a path matching the requested URL path, relative to the mdBook document root.
//
// IMPORTANT: it must specify the full path with file name and suffix
//
// each entry is itself a set of key-value pairs, where
// - keys are anchors on the matched path.
// - values are redirection targets relative to the current path.
const redirects = {
"index.html": {
"part-advanced-topics": "advanced-topics/advanced-topics.html",
"chap-tuning-cores-and-jobs": "advanced-topics/cores-vs-jobs.html",
"chap-diff-hook": "advanced-topics/diff-hook.html",
"check-dirs-are-unregistered": "advanced-topics/diff-hook.html#check-dirs-are-unregistered",
"chap-distributed-builds": "advanced-topics/distributed-builds.html",
"chap-post-build-hook": "advanced-topics/post-build-hook.html",
"chap-post-build-hook-caveats": "advanced-topics/post-build-hook.html#implementation-caveats",
"part-command-ref": "command-ref/command-ref.html",
"conf-allow-import-from-derivation": "command-ref/conf-file.html#conf-allow-import-from-derivation",
"conf-allow-new-privileges": "command-ref/conf-file.html#conf-allow-new-privileges",
"conf-allowed-uris": "command-ref/conf-file.html#conf-allowed-uris",
"conf-allowed-users": "command-ref/conf-file.html#conf-allowed-users",
"conf-auto-optimise-store": "command-ref/conf-file.html#conf-auto-optimise-store",
"conf-binary-cache-public-keys": "command-ref/conf-file.html#conf-binary-cache-public-keys",
"conf-binary-caches": "command-ref/conf-file.html#conf-binary-caches",
"conf-build-compress-log": "command-ref/conf-file.html#conf-build-compress-log",
"conf-build-cores": "command-ref/conf-file.html#conf-build-cores",
"conf-build-extra-chroot-dirs": "command-ref/conf-file.html#conf-build-extra-chroot-dirs",
"conf-build-extra-sandbox-paths": "command-ref/conf-file.html#conf-build-extra-sandbox-paths",
"conf-build-fallback": "command-ref/conf-file.html#conf-build-fallback",
"conf-build-max-jobs": "command-ref/conf-file.html#conf-build-max-jobs",
"conf-build-max-log-size": "command-ref/conf-file.html#conf-build-max-log-size",
"conf-build-max-silent-time": "command-ref/conf-file.html#conf-build-max-silent-time",
"conf-build-repeat": "command-ref/conf-file.html#conf-build-repeat",
"conf-build-timeout": "command-ref/conf-file.html#conf-build-timeout",
"conf-build-use-chroot": "command-ref/conf-file.html#conf-build-use-chroot",
"conf-build-use-sandbox": "command-ref/conf-file.html#conf-build-use-sandbox",
"conf-build-use-substitutes": "command-ref/conf-file.html#conf-build-use-substitutes",
"conf-build-users-group": "command-ref/conf-file.html#conf-build-users-group",
"conf-builders": "command-ref/conf-file.html#conf-builders",
"conf-builders-use-substitutes": "command-ref/conf-file.html#conf-builders-use-substitutes",
"conf-compress-build-log": "command-ref/conf-file.html#conf-compress-build-log",
"conf-connect-timeout": "command-ref/conf-file.html#conf-connect-timeout",
"conf-cores": "command-ref/conf-file.html#conf-cores",
"conf-diff-hook": "command-ref/conf-file.html#conf-diff-hook",
"conf-enforce-determinism": "command-ref/conf-file.html#conf-enforce-determinism",
"conf-env-keep-derivations": "command-ref/conf-file.html#conf-env-keep-derivations",
"conf-extra-binary-caches": "command-ref/conf-file.html#conf-extra-binary-caches",
"conf-extra-platforms": "command-ref/conf-file.html#conf-extra-platforms",
"conf-extra-sandbox-paths": "command-ref/conf-file.html#conf-extra-sandbox-paths",
"conf-extra-substituters": "command-ref/conf-file.html#conf-extra-substituters",
"conf-fallback": "command-ref/conf-file.html#conf-fallback",
"conf-fsync-metadata": "command-ref/conf-file.html#conf-fsync-metadata",
"conf-gc-keep-derivations": "command-ref/conf-file.html#conf-gc-keep-derivations",
"conf-gc-keep-outputs": "command-ref/conf-file.html#conf-gc-keep-outputs",
"conf-hashed-mirrors": "command-ref/conf-file.html#conf-hashed-mirrors",
"conf-http-connections": "command-ref/conf-file.html#conf-http-connections",
"conf-keep-build-log": "command-ref/conf-file.html#conf-keep-build-log",
"conf-keep-derivations": "command-ref/conf-file.html#conf-keep-derivations",
"conf-keep-env-derivations": "command-ref/conf-file.html#conf-keep-env-derivations",
"conf-keep-outputs": "command-ref/conf-file.html#conf-keep-outputs",
"conf-max-build-log-size": "command-ref/conf-file.html#conf-max-build-log-size",
"conf-max-free": "command-ref/conf-file.html#conf-max-free",
"conf-max-jobs": "command-ref/conf-file.html#conf-max-jobs",
"conf-max-silent-time": "command-ref/conf-file.html#conf-max-silent-time",
"conf-min-free": "command-ref/conf-file.html#conf-min-free",
"conf-narinfo-cache-negative-ttl": "command-ref/conf-file.html#conf-narinfo-cache-negative-ttl",
"conf-narinfo-cache-positive-ttl": "command-ref/conf-file.html#conf-narinfo-cache-positive-ttl",
"conf-netrc-file": "command-ref/conf-file.html#conf-netrc-file",
"conf-plugin-files": "command-ref/conf-file.html#conf-plugin-files",
"conf-post-build-hook": "command-ref/conf-file.html#conf-post-build-hook",
"conf-pre-build-hook": "command-ref/conf-file.html#conf-pre-build-hook",
"conf-repeat": "command-ref/conf-file.html#conf-repeat",
"conf-require-sigs": "command-ref/conf-file.html#conf-require-sigs",
"conf-restrict-eval": "command-ref/conf-file.html#conf-restrict-eval",
"conf-run-diff-hook": "command-ref/conf-file.html#conf-run-diff-hook",
"conf-sandbox": "command-ref/conf-file.html#conf-sandbox",
"conf-sandbox-dev-shm-size": "command-ref/conf-file.html#conf-sandbox-dev-shm-size",
"conf-sandbox-paths": "command-ref/conf-file.html#conf-sandbox-paths",
"conf-secret-key-files": "command-ref/conf-file.html#conf-secret-key-files",
"conf-show-trace": "command-ref/conf-file.html#conf-show-trace",
"conf-stalled-download-timeout": "command-ref/conf-file.html#conf-stalled-download-timeout",
"conf-substitute": "command-ref/conf-file.html#conf-substitute",
"conf-substituters": "command-ref/conf-file.html#conf-substituters",
"conf-system": "command-ref/conf-file.html#conf-system",
"conf-system-features": "command-ref/conf-file.html#conf-system-features",
"conf-tarball-ttl": "command-ref/conf-file.html#conf-tarball-ttl",
"conf-timeout": "command-ref/conf-file.html#conf-timeout",
"conf-trace-function-calls": "command-ref/conf-file.html#conf-trace-function-calls",
"conf-trusted-binary-caches": "command-ref/conf-file.html#conf-trusted-binary-caches",
"conf-trusted-public-keys": "command-ref/conf-file.html#conf-trusted-public-keys",
"conf-trusted-substituters": "command-ref/conf-file.html#conf-trusted-substituters",
"conf-trusted-users": "command-ref/conf-file.html#conf-trusted-users",
"extra-sandbox-paths": "command-ref/conf-file.html#extra-sandbox-paths",
"sec-conf-file": "command-ref/conf-file.html",
"env-NIX_PATH": "command-ref/env-common.html#env-NIX_PATH",
"env-common": "command-ref/env-common.html",
"envar-remote": "command-ref/env-common.html#env-NIX_REMOTE",
"sec-common-env": "command-ref/env-common.html",
"ch-files": "command-ref/files.html",
"ch-main-commands": "command-ref/main-commands.html",
"opt-out-link": "command-ref/nix-build.html#opt-out-link",
"sec-nix-build": "command-ref/nix-build.html",
"sec-nix-channel": "command-ref/nix-channel.html",
"sec-nix-collect-garbage": "command-ref/nix-collect-garbage.html",
"sec-nix-copy-closure": "command-ref/nix-copy-closure.html",
"sec-nix-daemon": "command-ref/nix-daemon.html",
"refsec-nix-env-install-examples": "command-ref/nix-env.html#examples",
"rsec-nix-env-install": "command-ref/nix-env.html#operation---install",
"rsec-nix-env-set": "command-ref/nix-env.html#operation---set",
"rsec-nix-env-set-flag": "command-ref/nix-env.html#operation---set-flag",
"rsec-nix-env-upgrade": "command-ref/nix-env.html#operation---upgrade",
"sec-nix-env": "command-ref/nix-env.html",
"ssec-version-comparisons": "command-ref/nix-env.html#versions",
"sec-nix-hash": "command-ref/nix-hash.html",
"sec-nix-instantiate": "command-ref/nix-instantiate.html",
"sec-nix-prefetch-url": "command-ref/nix-prefetch-url.html",
"sec-nix-shell": "command-ref/nix-shell.html",
"ssec-nix-shell-shebang": "command-ref/nix-shell.html#use-as-a--interpreter",
"nixref-queries": "command-ref/nix-store.html#queries",
"opt-add-root": "command-ref/nix-store.html#opt-add-root",
"refsec-nix-store-dump": "command-ref/nix-store.html#operation---dump",
"refsec-nix-store-export": "command-ref/nix-store.html#operation---export",
"refsec-nix-store-import": "command-ref/nix-store.html#operation---import",
"refsec-nix-store-query": "command-ref/nix-store.html#operation---query",
"refsec-nix-store-verify": "command-ref/nix-store.html#operation---verify",
"rsec-nix-store-gc": "command-ref/nix-store.html#operation---gc",
"rsec-nix-store-generate-binary-cache-key": "command-ref/nix-store.html#operation---generate-binary-cache-key",
"rsec-nix-store-realise": "command-ref/nix-store.html#operation---realise",
"rsec-nix-store-serve": "command-ref/nix-store.html#operation---serve",
"sec-nix-store": "command-ref/nix-store.html",
"opt-I": "command-ref/opt-common.html#opt-I",
"opt-attr": "command-ref/opt-common.html#opt-attr",
"opt-common": "command-ref/opt-common.html",
"opt-cores": "command-ref/opt-common.html#opt-cores",
"opt-log-format": "command-ref/opt-common.html#opt-log-format",
"opt-max-jobs": "command-ref/opt-common.html#opt-max-jobs",
"opt-max-silent-time": "command-ref/opt-common.html#opt-max-silent-time",
"opt-timeout": "command-ref/opt-common.html#opt-timeout",
"sec-common-options": "command-ref/opt-common.html",
"ch-utilities": "command-ref/utilities.html",
"chap-hacking": "contributing/hacking.html",
"adv-attr-allowSubstitutes": "language/advanced-attributes.html#adv-attr-allowSubstitutes",
"adv-attr-allowedReferences": "language/advanced-attributes.html#adv-attr-allowedReferences",
"adv-attr-allowedRequisites": "language/advanced-attributes.html#adv-attr-allowedRequisites",
"adv-attr-disallowedReferences": "language/advanced-attributes.html#adv-attr-disallowedReferences",
"adv-attr-disallowedRequisites": "language/advanced-attributes.html#adv-attr-disallowedRequisites",
"adv-attr-exportReferencesGraph": "language/advanced-attributes.html#adv-attr-exportReferencesGraph",
"adv-attr-impureEnvVars": "language/advanced-attributes.html#adv-attr-impureEnvVars",
"adv-attr-outputHash": "language/advanced-attributes.html#adv-attr-outputHash",
"adv-attr-outputHashAlgo": "language/advanced-attributes.html#adv-attr-outputHashAlgo",
"adv-attr-outputHashMode": "language/advanced-attributes.html#adv-attr-outputHashMode",
"adv-attr-passAsFile": "language/advanced-attributes.html#adv-attr-passAsFile",
"adv-attr-preferLocalBuild": "language/advanced-attributes.html#adv-attr-preferLocalBuild",
"fixed-output-drvs": "language/advanced-attributes.html#adv-attr-outputHash",
"sec-advanced-attributes": "language/advanced-attributes.html",
"builtin-abort": "language/builtins.html#builtins-abort",
"builtin-add": "language/builtins.html#builtins-add",
"builtin-all": "language/builtins.html#builtins-all",
"builtin-any": "language/builtins.html#builtins-any",
"builtin-attrNames": "language/builtins.html#builtins-attrNames",
"builtin-attrValues": "language/builtins.html#builtins-attrValues",
"builtin-baseNameOf": "language/builtins.html#builtins-baseNameOf",
"builtin-bitAnd": "language/builtins.html#builtins-bitAnd",
"builtin-bitOr": "language/builtins.html#builtins-bitOr",
"builtin-bitXor": "language/builtins.html#builtins-bitXor",
"builtin-builtins": "language/builtins.html#builtins-builtins",
"builtin-compareVersions": "language/builtins.html#builtins-compareVersions",
"builtin-concatLists": "language/builtins.html#builtins-concatLists",
"builtin-concatStringsSep": "language/builtins.html#builtins-concatStringsSep",
"builtin-currentSystem": "language/builtins.html#builtins-currentSystem",
"builtin-deepSeq": "language/builtins.html#builtins-deepSeq",
"builtin-derivation": "language/builtins.html#builtins-derivation",
"builtin-dirOf": "language/builtins.html#builtins-dirOf",
"builtin-div": "language/builtins.html#builtins-div",
"builtin-elem": "language/builtins.html#builtins-elem",
"builtin-elemAt": "language/builtins.html#builtins-elemAt",
"builtin-fetchGit": "language/builtins.html#builtins-fetchGit",
"builtin-fetchTarball": "language/builtins.html#builtins-fetchTarball",
"builtin-fetchurl": "language/builtins.html#builtins-fetchurl",
"builtin-filterSource": "language/builtins.html#builtins-filterSource",
"builtin-foldl-prime": "language/builtins.html#builtins-foldl-prime",
"builtin-fromJSON": "language/builtins.html#builtins-fromJSON",
"builtin-functionArgs": "language/builtins.html#builtins-functionArgs",
"builtin-genList": "language/builtins.html#builtins-genList",
"builtin-getAttr": "language/builtins.html#builtins-getAttr",
"builtin-getEnv": "language/builtins.html#builtins-getEnv",
"builtin-hasAttr": "language/builtins.html#builtins-hasAttr",
"builtin-hashFile": "language/builtins.html#builtins-hashFile",
"builtin-hashString": "language/builtins.html#builtins-hashString",
"builtin-head": "language/builtins.html#builtins-head",
"builtin-import": "language/builtins.html#builtins-import",
"builtin-intersectAttrs": "language/builtins.html#builtins-intersectAttrs",
"builtin-isAttrs": "language/builtins.html#builtins-isAttrs",
"builtin-isBool": "language/builtins.html#builtins-isBool",
"builtin-isFloat": "language/builtins.html#builtins-isFloat",
"builtin-isFunction": "language/builtins.html#builtins-isFunction",
"builtin-isInt": "language/builtins.html#builtins-isInt",
"builtin-isList": "language/builtins.html#builtins-isList",
"builtin-isNull": "language/builtins.html#builtins-isNull",
"builtin-isString": "language/builtins.html#builtins-isString",
"builtin-length": "language/builtins.html#builtins-length",
"builtin-lessThan": "language/builtins.html#builtins-lessThan",
"builtin-listToAttrs": "language/builtins.html#builtins-listToAttrs",
"builtin-map": "language/builtins.html#builtins-map",
"builtin-match": "language/builtins.html#builtins-match",
"builtin-mul": "language/builtins.html#builtins-mul",
"builtin-parseDrvName": "language/builtins.html#builtins-parseDrvName",
"builtin-path": "language/builtins.html#builtins-path",
"builtin-pathExists": "language/builtins.html#builtins-pathExists",
"builtin-placeholder": "language/builtins.html#builtins-placeholder",
"builtin-readDir": "language/builtins.html#builtins-readDir",
"builtin-readFile": "language/builtins.html#builtins-readFile",
"builtin-removeAttrs": "language/builtins.html#builtins-removeAttrs",
"builtin-replaceStrings": "language/builtins.html#builtins-replaceStrings",
"builtin-seq": "language/builtins.html#builtins-seq",
"builtin-sort": "language/builtins.html#builtins-sort",
"builtin-split": "language/builtins.html#builtins-split",
"builtin-splitVersion": "language/builtins.html#builtins-splitVersion",
"builtin-stringLength": "language/builtins.html#builtins-stringLength",
"builtin-sub": "language/builtins.html#builtins-sub",
"builtin-substring": "language/builtins.html#builtins-substring",
"builtin-tail": "language/builtins.html#builtins-tail",
"builtin-throw": "language/builtins.html#builtins-throw",
"builtin-toFile": "language/builtins.html#builtins-toFile",
"builtin-toJSON": "language/builtins.html#builtins-toJSON",
"builtin-toPath": "language/builtins.html#builtins-toPath",
"builtin-toString": "language/builtins.html#builtins-toString",
"builtin-toXML": "language/builtins.html#builtins-toXML",
"builtin-trace": "language/builtins.html#builtins-trace",
"builtin-tryEval": "language/builtins.html#builtins-tryEval",
"builtin-typeOf": "language/builtins.html#builtins-typeOf",
"ssec-builtins": "language/builtins.html",
"attr-system": "language/derivations.html#attr-system",
"ssec-derivation": "language/derivations.html",
"ch-expression-language": "language/index.html",
"sec-constructs": "language/constructs.html",
"sect-let-language": "language/constructs.html#let-language",
"ss-functions": "language/constructs.html#functions",
"sec-language-operators": "language/operators.html",
"table-operators": "language/operators.html",
"ssec-values": "language/values.html",
"gloss-closure": "glossary.html#gloss-closure",
"gloss-derivation": "glossary.html#gloss-derivation",
"gloss-deriver": "glossary.html#gloss-deriver",
"gloss-nar": "glossary.html#gloss-nar",
"gloss-output-path": "glossary.html#gloss-output-path",
"gloss-profile": "glossary.html#gloss-profile",
"gloss-reachable": "glossary.html#gloss-reachable",
"gloss-reference": "glossary.html#gloss-reference",
"gloss-substitute": "glossary.html#gloss-substitute",
"gloss-user-env": "glossary.html#gloss-user-env",
"gloss-validity": "glossary.html#gloss-validity",
"part-glossary": "glossary.html",
"sec-building-source": "installation/building-source.html",
"ch-env-variables": "installation/env-variables.html",
"sec-installer-proxy-settings": "installation/env-variables.html#proxy-environment-variables",
"sec-nix-ssl-cert-file": "installation/env-variables.html#nix_ssl_cert_file",
"sec-nix-ssl-cert-file-with-nix-daemon-and-macos": "installation/env-variables.html#nix_ssl_cert_file-with-macos-and-the-nix-daemon",
"chap-installation": "installation/installation.html",
"ch-installing-binary": "installation/installing-binary.html",
"sect-macos-installation": "installation/installing-binary.html#macos-installation",
"sect-macos-installation-change-store-prefix": "installation/installing-binary.html#macos-installation",
"sect-macos-installation-encrypted-volume": "installation/installing-binary.html#macos-installation",
"sect-macos-installation-recommended-notes": "installation/installing-binary.html#macos-installation",
"sect-macos-installation-symlink": "installation/installing-binary.html#macos-installation",
"sect-multi-user-installation": "installation/installing-binary.html#multi-user-installation",
"sect-nix-install-binary-tarball": "installation/installing-binary.html#installing-from-a-binary-tarball",
"sect-nix-install-pinned-version-url": "installation/installing-binary.html#installing-a-pinned-nix-version-from-a-url",
"sect-single-user-installation": "installation/installing-binary.html#single-user-installation",
"ch-installing-source": "installation/installing-source.html",
"ssec-multi-user": "installation/multi-user.html",
"ch-nix-security": "installation/nix-security.html",
"sec-obtaining-source": "installation/obtaining-source.html",
"sec-prerequisites-source": "installation/prerequisites-source.html",
"sec-single-user": "installation/single-user.html",
"ch-supported-platforms": "installation/supported-platforms.html",
"ch-upgrading-nix": "installation/upgrading.html",
"ch-about-nix": "introduction.html",
"chap-introduction": "introduction.html",
"ch-basic-package-mgmt": "package-management/basic-package-mgmt.html",
"ssec-binary-cache-substituter": "package-management/binary-cache-substituter.html",
"sec-channels": "package-management/channels.html",
"ssec-copy-closure": "package-management/copy-closure.html",
"sec-garbage-collection": "package-management/garbage-collection.html",
"ssec-gc-roots": "package-management/garbage-collector-roots.html",
"chap-package-management": "package-management/package-management.html",
"sec-profiles": "package-management/profiles.html",
"ssec-s3-substituter": "package-management/s3-substituter.html",
"ssec-s3-substituter-anonymous-reads": "package-management/s3-substituter.html#anonymous-reads-to-your-s3-compatible-binary-cache",
"ssec-s3-substituter-authenticated-reads": "package-management/s3-substituter.html#authenticated-reads-to-your-s3-binary-cache",
"ssec-s3-substituter-authenticated-writes": "package-management/s3-substituter.html#authenticated-writes-to-your-s3-compatible-binary-cache",
"sec-sharing-packages": "package-management/sharing-packages.html",
"ssec-ssh-substituter": "package-management/ssh-substituter.html",
"chap-quick-start": "quick-start.html",
"sec-relnotes": "release-notes/release-notes.html",
"ch-relnotes-0.10.1": "release-notes/rl-0.10.1.html",
"ch-relnotes-0.10": "release-notes/rl-0.10.html",
"ssec-relnotes-0.11": "release-notes/rl-0.11.html",
"ssec-relnotes-0.12": "release-notes/rl-0.12.html",
"ssec-relnotes-0.13": "release-notes/rl-0.13.html",
"ssec-relnotes-0.14": "release-notes/rl-0.14.html",
"ssec-relnotes-0.15": "release-notes/rl-0.15.html",
"ssec-relnotes-0.16": "release-notes/rl-0.16.html",
"ch-relnotes-0.5": "release-notes/rl-0.5.html",
"ch-relnotes-0.6": "release-notes/rl-0.6.html",
"ch-relnotes-0.7": "release-notes/rl-0.7.html",
"ch-relnotes-0.8.1": "release-notes/rl-0.8.1.html",
"ch-relnotes-0.8": "release-notes/rl-0.8.html",
"ch-relnotes-0.9.1": "release-notes/rl-0.9.1.html",
"ch-relnotes-0.9.2": "release-notes/rl-0.9.2.html",
"ch-relnotes-0.9": "release-notes/rl-0.9.html",
"ssec-relnotes-1.0": "release-notes/rl-1.0.html",
"ssec-relnotes-1.1": "release-notes/rl-1.1.html",
"ssec-relnotes-1.10": "release-notes/rl-1.10.html",
"ssec-relnotes-1.11.10": "release-notes/rl-1.11.10.html",
"ssec-relnotes-1.11": "release-notes/rl-1.11.html",
"ssec-relnotes-1.2": "release-notes/rl-1.2.html",
"ssec-relnotes-1.3": "release-notes/rl-1.3.html",
"ssec-relnotes-1.4": "release-notes/rl-1.4.html",
"ssec-relnotes-1.5.1": "release-notes/rl-1.5.1.html",
"ssec-relnotes-1.5.2": "release-notes/rl-1.5.2.html",
"ssec-relnotes-1.5": "release-notes/rl-1.5.html",
"ssec-relnotes-1.6.1": "release-notes/rl-1.6.1.html",
"ssec-relnotes-1.6.0": "release-notes/rl-1.6.html",
"ssec-relnotes-1.7": "release-notes/rl-1.7.html",
"ssec-relnotes-1.8": "release-notes/rl-1.8.html",
"ssec-relnotes-1.9": "release-notes/rl-1.9.html",
"ssec-relnotes-2.0": "release-notes/rl-2.0.html",
"ssec-relnotes-2.1": "release-notes/rl-2.1.html",
"ssec-relnotes-2.2": "release-notes/rl-2.2.html",
"ssec-relnotes-2.3": "release-notes/rl-2.3.html"
},
"language/values.html": {
"simple-values": "#primitives",
"lists": "#list",
"strings": "#string",
"lists": "#list",
"attribute-sets": "#attribute-set"
}
};
var isRoot = (document.location.pathname.endsWith('/') || document.location.pathname.endsWith('/index.html')) && path_to_root === '';
if (isRoot && redirects[document.location.hash]) {
document.location.href = path_to_root + redirects[document.location.hash];
// the following code matches the current page's URL against the set of redirects.
//
// it is written to minimize the latency between page load and redirect.
// therefore we avoid function calls, copying data, and unnecessary loops.
// IMPORTANT: we use stateful array operations and their order matters!
//
// matching URLs is more involved than it should be:
//
// 1. `document.location.pathname` can have an arbitrary prefix.
//
// 2. `path_to_root` is set by mdBook. it consists only of `../`s and
// determines the depth of `<path>` relative to the prefix:
//
// `document.location.pathname`
// |------------------------------|
// /<prefix>/<path>/[<file>[.html]][#<anchor>]
// |----|
// `path_to_root` has same number of path segments
//
// source: https://phaiax.github.io/mdBook/format/theme/index-hbs.html#data
//
// 3. the following paths are equivalent:
//
// /foo/bar/
// /foo/bar/index.html
// /foo/bar/index
//
// 4. the following paths are also equivalent:
//
// /foo/bar/baz
// /foo/bar/baz.html
//
let segments = document.location.pathname.split('/');
let file = segments.pop();
// normalize file name
if (file === '') { file = "index.html"; }
else if (!file.endsWith('.html')) { file = file + '.html'; }
segments.push(file);
// use `path_to_root` to discern prefix from path.
const depth = path_to_root.split('/').length;
// remove segments containing prefix. the following works because
// 1. the original `document.location.pathname` is absolute,
// hence first element of `segments` is always empty.
// 2. last element of splitting `path_to_root` is also always empty.
// 3. last element of `segments` is the file name.
//
// visual example:
//
// '/foo/bar/baz.html'.split('/') -> [ '', 'foo', 'bar', 'baz.html' ]
// '../'.split('/') -> [ '..', '' ]
//
// the following operations will then result in
//
// path = 'bar/baz.html'
//
segments.splice(0, segments.length - depth);
const path = segments.join('/');
// anchor starts with the hash character (`#`),
// but our redirect declarations don't, so we strip it.
// example:
// document.location.hash -> '#foo'
// document.location.hash.substring(1) -> 'foo'
const anchor = document.location.hash.substring(1);
const redirect = redirects[path];
if (redirect) {
const target = redirect[anchor];
if (target) {
document.location.href = target;
}
}

View file

@ -8,6 +8,6 @@
# Description
The Nix daemon is necessary in multi-user Nix installations. It performs
build actions and other operations on the Nix store on behalf of
The Nix daemon is necessary in multi-user Nix installations. It runs
build tasks and other operations on the Nix store on behalf of
unprivileged users.

View file

@ -71,7 +71,7 @@ paths. Realisation is a somewhat overloaded term:
outputs are already valid, in which case we are done
immediately. Otherwise, there may be [substitutes](../glossary.md)
that produce the outputs (e.g., by downloading them). Finally, the
outputs can be produced by performing the build action described
outputs can be produced by running the build task described
by the derivation.
- If the store path is not a derivation, realisation ensures that the

View file

@ -1,7 +1,7 @@
# Glossary
- [derivation]{#gloss-derivation}\
A description of a build action. The result of a derivation is a
A description of a build task. The result of a derivation is a
store object. Derivations are typically specified in Nix expressions
using the [`derivation` primitive](language/derivations.md). These are
translated into low-level *store derivations* (implicitly by
@ -53,8 +53,8 @@
A file that is an immediate child of the Nix store directory. These
can be regular files, but also entire directory trees. Store objects
can be sources (objects copied from outside of the store),
derivation outputs (objects produced by running a build action), or
derivations (files describing a build action).
derivation outputs (objects produced by running a build task), or
derivations (files describing a build task).
- [input-addressed store object]{#gloss-input-addressed-store-object}\
A store object produced by building a

View file

@ -88,10 +88,24 @@ extension. The installer will also create `/etc/profile.d/nix.sh`.
### Linux
```console
sudo rm -rf /etc/profile/nix.sh /etc/nix /nix ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
Remove files created by Nix:
# If you are on Linux with systemd, you will need to run:
```console
sudo rm -rf /nix /etc/nix /etc/profile/nix.sh ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels
```
Remove build users and their group:
```console
for i in $(seq 30001 30032); do
sudo userdel $i
done
sudo groupdel 30000
```
If you are on Linux with systemd, remove the Nix daemon service:
```console
sudo systemctl stop nix-daemon.socket
sudo systemctl stop nix-daemon.service
sudo systemctl disable nix-daemon.socket
@ -99,8 +113,13 @@ sudo systemctl disable nix-daemon.service
sudo systemctl daemon-reload
```
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
and `/etc/zshrc` which you may remove.
There may also be references to Nix in
- `/etc/profile`
- `/etc/bashrc`
- `/etc/zshrc`
which you may remove.
### macOS

View file

@ -104,7 +104,7 @@ a currently running program.
Packages are built from _Nix expressions_, which is a simple
functional language. A Nix expression describes everything that goes
into a package build action (a “derivation”): other packages, sources,
into a package build task (a “derivation”): other packages, sources,
the build script, environment variables for the build script, etc.
Nix tries very hard to ensure that Nix expressions are
_deterministic_: building a Nix expression twice should yield the same

View file

@ -1,7 +1,7 @@
# Derivations
The most important built-in function is `derivation`, which is used to
describe a single derivation (a build action). It takes as input a set,
describe a single derivation (a build task). It takes as input a set,
the attributes of which specify the inputs of the build.
- There must be an attribute named [`system`]{#attr-system} whose value must be a

View file

@ -31,3 +31,551 @@ The Nix language is
Type errors are only detected when expressions are evaluated.
# Overview
This is an incomplete overview of language features, by example.
<table>
<tr>
<th>
Example
</th>
<th>
Description
</th>
</tr>
<tr>
<td>
*Basic values*
</td>
<td>
</td>
</tr>
<tr>
<td>
`"hello world"`
</td>
<td>
A string
</td>
</tr>
<tr>
<td>
```
''
multi
line
string
''
```
</td>
<td>
A multi-line string. Strips common prefixed whitespace. Evaluates to `"multi\n line\n string"`.
</td>
</tr>
<tr>
<td>
`"hello ${ { a = "world" }.a }"`
`"1 2 ${toString 3}"`
`"${pkgs.bash}/bin/sh"`
</td>
<td>
String interpolation (expands to `"hello world"`, `"1 2 3"`, `"/nix/store/<hash>-bash-<version>/bin/sh"`)
</td>
</tr>
<tr>
<td>
`true`, `false`
</td>
<td>
Booleans
</td>
</tr>
<tr>
<td>
`null`
</td>
<td>
Null value
</td>
</tr>
<tr>
<td>
`123`
</td>
<td>
An integer
</td>
</tr>
<tr>
<td>
`3.141`
</td>
<td>
A floating point number
</td>
</tr>
<tr>
<td>
`/etc`
</td>
<td>
An absolute path
</td>
</tr>
<tr>
<td>
`./foo.png`
</td>
<td>
A path relative to the file containing this Nix expression
</td>
</tr>
<tr>
<td>
`~/.config`
</td>
<td>
A home path. Evaluates to the `"<user's home directory>/.config"`.
</td>
</tr>
<tr>
<td>
<nixpkgs>
</td>
<td>
Search path. Value determined by [`$NIX_PATH` environment variable](../command-ref/env-common.md#env-NIX_PATH).
</td>
</tr>
<tr>
<td>
*Compound values*
</td>
<td>
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; }`
</td>
<td>
A set with attributes named `x` and `y`
</td>
</tr>
<tr>
<td>
`{ foo.bar = 1; }`
</td>
<td>
A nested set, equivalent to `{ foo = { bar = 1; }; }`
</td>
</tr>
<tr>
<td>
`rec { x = "foo"; y = x + "bar"; }`
</td>
<td>
A recursive set, equivalent to `{ x = "foo"; y = "foobar"; }`
</td>
</tr>
<tr>
<td>
`[ "foo" "bar" "baz" ]`
`[ 1 2 3 ]`
`[ (f 1) { a = 1; b = 2; } [ "c" ] ]`
</td>
<td>
Lists with three elements.
</td>
</tr>
<tr>
<td>
*Operators*
</td>
<td>
</td>
</tr>
<tr>
<td>
`"foo" + "bar"`
</td>
<td>
String concatenation
</td>
</tr>
<tr>
<td>
`1 + 2`
</td>
<td>
Integer addition
</td>
</tr>
<tr>
<td>
`"foo" == "f" + "oo"`
</td>
<td>
Equality test (evaluates to `true`)
</td>
</tr>
<tr>
<td>
`"foo" != "bar"`
</td>
<td>
Inequality test (evaluates to `true`)
</td>
</tr>
<tr>
<td>
`!true`
</td>
<td>
Boolean negation
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; }.x`
</td>
<td>
Attribute selection (evaluates to `1`)
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; }.z or 3`
</td>
<td>
Attribute selection with default (evaluates to `3`)
</td>
</tr>
<tr>
<td>
`{ x = 1; y = 2; } // { z = 3; }`
</td>
<td>
Merge two sets (attributes in the right-hand set taking precedence)
</td>
</tr>
<tr>
<td>
*Control structures*
</td>
<td>
</td>
</tr>
<tr>
<td>
`if 1 + 1 == 2 then "yes!" else "no!"`
</td>
<td>
Conditional expression
</td>
</tr>
<tr>
<td>
`assert 1 + 1 == 2; "yes!"`
</td>
<td>
Assertion check (evaluates to `"yes!"`).
</td>
</tr>
<tr>
<td>
`let x = "foo"; y = "bar"; in x + y`
</td>
<td>
Variable definition
</td>
</tr>
<tr>
<td>
`with builtins; head [ 1 2 3 ]`
</td>
<td>
Add all attributes from the given set to the scope (evaluates to `1`)
</td>
</tr>
<tr>
<td>
*Functions (lambdas)*
</td>
<td>
</td>
</tr>
<tr>
<td>
`x: x + 1`
</td>
<td>
A function that expects an integer and returns it increased by 1
</td>
</tr>
<tr>
<td>
`x: y: x + y`
</td>
<td>
Curried function, equivalent to `x: (y: x + y)`. Can be used like a function that takes two arguments and returns their sum.
</td>
</tr>
<tr>
<td>
`(x: x + 1) 100`
</td>
<td>
A function call (evaluates to 101)
</td>
</tr>
<tr>
<td>
`let inc = x: x + 1; in inc (inc (inc 100))`
</td>
<td>
A function bound to a variable and subsequently called by name (evaluates to 103)
</td>
</tr>
<tr>
<td>
`{ x, y }: x + y`
</td>
<td>
A function that expects a set with required attributes `x` and `y` and concatenates them
</td>
</tr>
<tr>
<td>
`{ x, y ? "bar" }: x + y`
</td>
<td>
A function that expects a set with required attribute `x` and optional `y`, using `"bar"` as default value for `y`
</td>
</tr>
<tr>
<td>
`{ x, y, ... }: x + y`
</td>
<td>
A function that expects a set with required attributes `x` and `y` and ignores any other attributes
</td>
</tr>
<tr>
<td>
`{ x, y } @ args: x + y`
`args @ { x, y }: x + y`
</td>
<td>
A function that expects a set with required attributes `x` and `y`, and binds the whole set to `args`
</td>
</tr>
<tr>
<td>
*Built-in functions*
</td>
<td>
</td>
</tr>
<tr>
<td>
`import ./foo.nix`
</td>
<td>
Load and return Nix expression in given file
</td>
</tr>
<tr>
<td>
`map (x: x + x) [ 1 2 3 ]`
</td>
<td>
Apply a function to every element of a list (evaluates to `[ 2 4 6 ]`)
</td>
</tr>
</table>

View file

@ -150,6 +150,20 @@
recognized as a path. `a.${foo}/b.${bar}` is a syntactically valid division
operation. `./a.${foo}/b.${bar}` is a path.
When a path appears in an antiquotation, and is thus coerced into a string,
the path is first copied into the Nix store and the resulting string is
the Nix store path. For instance `"${./foo.txt}" will cause `foo.txt` in
the current directory to be copied into the Nix store and result in the
string `"/nix/store/<HASH>-foo.txt"`.
Note that the Nix language assumes that all input files will remain
_unchanged_ during the course of the Nix expression evaluation.
If you for example antiquote a file path during a `nix repl` session, and
then later in the same session, after having changed the file contents,
evaluate the antiquotation with the file path again, then Nix will still
return the first store path. It will _not_ reread the file contents to
produce a different Nix store path.
- <a id="type-boolean" href="#type-boolean">Boolean</a>
*Booleans* with values `true` and `false`.

View file

@ -5,3 +5,50 @@
arguments will be ignored and the resulting derivation will have
`__impure` set to `true`, making it an impure derivation.
* If `builtins.readFile` is called on a file with context, then only the parts
of that context that appear in the content of the file are retained.
This avoids a lot of spurious errors where some benign strings end-up having
a context just because they are read from a store path
([#7260](https://github.com/NixOS/nix/pull/7260)).
* Nix can now automatically pick UIDs for builds, removing the need to
create `nixbld*` user accounts. These UIDs are allocated starting at
872415232 (0x34000000) on Linux and 56930 on macOS.
This is an experimental feature. To enable it, add the following to
`nix.conf`:
```
extra-experimental-features = auto-allocate-uids
auto-allocate-uids = true
```
* On Linux, Nix can now run builds in a user namespace where the build
runs as root (UID 0) and has 65,536 UIDs available. This is
primarily useful for running containers such as `systemd-nspawn`
inside a Nix build. For an example, see
https://github.com/NixOS/nix/blob/67bcb99700a0da1395fa063d7c6586740b304598/tests/systemd-nspawn.nix.
A build can enable this by requiring the `uid-range` system feature,
i.e. by setting the derivation attribute
```
requiredSystemFeatures = [ "uid-range" ];
```
The `uid-range` system feature requires the `auto-allocate-uids`
setting to be enabled (see above).
* On Linux, Nix has experimental support for running builds inside a
cgroup. It can be enabled by adding
```
extra-experimental-features = cgroups
use-cgroups = true
```
to `nix.conf`. Cgroups are required for derivations that require the
`uid-range` system feature.
* `nix build --json` now prints some statistics about top-level
derivations, such as CPU statistics when cgroups are enabled.

View file

@ -36,6 +36,17 @@ let
shell = "${pkgs.bashInteractive}/bin/bash";
home = "/root";
gid = 0;
groups = [ "root" ];
description = "System administrator";
};
nobody = {
uid = 65534;
shell = "${pkgs.shadow}/bin/nologin";
home = "/var/empty";
gid = 65534;
groups = [ "nobody" ];
description = "Unprivileged account (don't use!)";
};
} // lib.listToAttrs (
@ -57,6 +68,7 @@ let
groups = {
root.gid = 0;
nixbld.gid = 30000;
nobody.gid = 65534;
};
userToPasswd = (

View file

@ -506,6 +506,12 @@
overlay = self.overlays.default;
});
tests.containers = (import ./tests/containers.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
overlay = self.overlays.default;
});
tests.setuid = nixpkgs.lib.genAttrs
["i686-linux" "x86_64-linux"]
(system:

View file

@ -58,7 +58,7 @@ readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
readonly ROOT_HOME=~root
if [ -t 0 ]; then
if [ -t 0 ] && [ -z "${NIX_INSTALLER_YES:-}" ]; then
readonly IS_HEADLESS='no'
else
readonly IS_HEADLESS='yes'

View file

@ -71,6 +71,8 @@ while [ $# -gt 0 ]; do
# # intentional tail space
# ACTIONS="${ACTIONS}uninstall "
# ;;
--yes)
export NIX_INSTALLER_YES=1;;
--no-channel-add)
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
--daemon-user-count)
@ -90,7 +92,7 @@ while [ $# -gt 0 ]; do
shift;;
*)
{
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--yes] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
echo "Choose installation method."
echo ""
@ -104,6 +106,8 @@ while [ $# -gt 0 ]; do
echo " trivial to uninstall."
echo " (default)"
echo ""
echo " --yes: Run the script non-interactively, accepting all prompts."
echo ""
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
echo ""
echo " --no-modify-profile: Don't modify the user profile to automatically load nix."

View file

@ -186,12 +186,12 @@ static int main_build_remote(int argc, char * * argv)
// build the hint template.
std::string errorText =
"Failed to find a machine for remote build!\n"
"derivation: %s\nrequired (system, features): (%s, %s)";
"derivation: %s\nrequired (system, features): (%s, [%s])";
errorText += "\n%s available machines:";
errorText += "\n(systems, maxjobs, supportedFeatures, mandatoryFeatures)";
for (unsigned int i = 0; i < machines.size(); ++i)
errorText += "\n(%s, %s, %s, %s)";
errorText += "\n([%s], %s, [%s], [%s])";
// add the template values.
std::string drvstr;

View file

@ -226,7 +226,7 @@ MixProfile::MixProfile()
{
addFlag({
.longName = "profile",
.description = "The profile to update.",
.description = "The profile to operate on.",
.labels = {"path"},
.handler = {&profile},
.completer = completePath

View file

@ -270,6 +270,7 @@ Strings SourceExprCommand::getDefaultFlakeAttrPathPrefixes()
void SourceExprCommand::completeInstallable(std::string_view prefix)
{
try {
if (file) {
completionType = ctAttrs;
@ -320,6 +321,9 @@ void SourceExprCommand::completeInstallable(std::string_view prefix)
getDefaultFlakeAttrPaths(),
prefix);
}
} catch (EvalError&) {
// Don't want eval errors to mess-up with the completion engine, so let's just swallow them
}
}
void completeFlakeRefWithFragment(
@ -1040,20 +1044,20 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
return installables.front();
}
BuiltPaths Installable::build(
std::vector<BuiltPathWithResult> Installable::build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode)
{
BuiltPaths res;
for (auto & [_, builtPath] : build2(evalStore, store, mode, installables, bMode))
res.push_back(builtPath);
std::vector<BuiltPathWithResult> res;
for (auto & [_, builtPathWithResult] : build2(evalStore, store, mode, installables, bMode))
res.push_back(builtPathWithResult);
return res;
}
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::build2(
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> Installable::build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
@ -1073,7 +1077,7 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
}
}
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> res;
std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> res;
switch (mode) {
@ -1114,10 +1118,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
output, *drvOutput->second);
}
}
res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }}});
},
[&](const DerivedPath::Opaque & bo) {
res.push_back({installable, BuiltPath::Opaque { bo.path }});
res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }}});
},
}, path.raw());
}
@ -1139,10 +1143,10 @@ std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> Installable::bui
std::map<std::string, StorePath> outputs;
for (auto & path : buildResult.builtOutputs)
outputs.emplace(path.first.outputName, path.second.outPath);
res.push_back({installable, BuiltPath::Built { bfd.drvPath, outputs }});
res.push_back({installable, {.path = BuiltPath::Built { bfd.drvPath, outputs }, .result = buildResult}});
},
[&](const DerivedPath::Opaque & bo) {
res.push_back({installable, BuiltPath::Opaque { bo.path }});
res.push_back({installable, {.path = BuiltPath::Opaque { bo.path }, .result = buildResult}});
},
}, buildResult.path.raw());
}
@ -1165,9 +1169,12 @@ BuiltPaths Installable::toBuiltPaths(
OperateOn operateOn,
const std::vector<std::shared_ptr<Installable>> & installables)
{
if (operateOn == OperateOn::Output)
return Installable::build(evalStore, store, mode, installables);
else {
if (operateOn == OperateOn::Output) {
BuiltPaths res;
for (auto & p : Installable::build(evalStore, store, mode, installables))
res.push_back(p.path);
return res;
} else {
if (mode == Realise::Nothing)
settings.readOnlyMode = true;

View file

@ -7,6 +7,7 @@
#include "eval.hh"
#include "store-api.hh"
#include "flake/flake.hh"
#include "build-result.hh"
#include <optional>
@ -51,6 +52,12 @@ enum class OperateOn {
Derivation
};
struct BuiltPathWithResult
{
BuiltPath path;
std::optional<BuildResult> result;
};
struct Installable
{
virtual ~Installable() { }
@ -91,14 +98,14 @@ struct Installable
return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}});
}
static BuiltPaths build(
static std::vector<BuiltPathWithResult> build(
ref<Store> evalStore,
ref<Store> store,
Realise mode,
const std::vector<std::shared_ptr<Installable>> & installables,
BuildMode bMode = bmNormal);
static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> build2(
static std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> build2(
ref<Store> evalStore,
ref<Store> store,
Realise mode,

View file

@ -8,7 +8,7 @@ libcmd_SOURCES := $(wildcard $(d)/*.cc)
libcmd_CXXFLAGS += -I src/libutil -I src/libstore -I src/libexpr -I src/libmain -I src/libfetchers -I src/nix
libcmd_LDFLAGS = $(EDITLINE_LIBS) -llowdown -pthread
libcmd_LDFLAGS = $(EDITLINE_LIBS) $(LOWDOWN_LIBS) -pthread
libcmd_LIBS = libstore libutil libexpr libmain libfetchers

View file

@ -270,6 +270,7 @@ void NixRepl::mainLoop()
// ctrl-D should exit the debugger.
state->debugStop = false;
state->debugQuit = true;
logger->cout("");
break;
}
try {
@ -384,6 +385,10 @@ StringSet NixRepl::completePrefix(const std::string & prefix)
i++;
}
} else {
/* Temporarily disable the debugger, to avoid re-entering readline. */
auto debug_repl = state->debugRepl;
state->debugRepl = nullptr;
Finally restoreDebug([&]() { state->debugRepl = debug_repl; });
try {
/* This is an expression that should evaluate to an
attribute set. Evaluate it to get the names of the

View file

@ -7,7 +7,6 @@
#include "globals.hh"
#include "eval-inline.hh"
#include "filetransfer.hh"
#include "json.hh"
#include "function-trace.hh"
#include <algorithm>
@ -21,6 +20,7 @@
#include <functional>
#include <sys/resource.h>
#include <nlohmann/json.hpp>
#if HAVE_BOEHMGC
@ -35,6 +35,8 @@
#endif
using json = nlohmann::json;
namespace nix {
static char * allocString(size_t size)
@ -69,15 +71,11 @@ static char * dupString(const char * s)
// empty string.
static const char * makeImmutableStringWithLen(const char * s, size_t size)
{
char * t;
if (size == 0)
return "";
#if HAVE_BOEHMGC
t = GC_STRNDUP(s, size);
#else
t = strndup(s, size);
#endif
if (!t) throw std::bad_alloc();
auto t = allocString(size + 1);
memcpy(t, s, size);
t[size] = 0;
return t;
}
@ -904,7 +902,7 @@ void EvalState::throwEvalError(const char * s, const std::string & s2,
const std::string & s3)
{
debugThrowLastTrace(EvalError({
.msg = hintfmt(s, s2),
.msg = hintfmt(s, s2, s3),
.errPos = positions[noPos]
}));
}
@ -913,7 +911,7 @@ void EvalState::throwEvalError(const PosIdx pos, const char * s, const std::stri
const std::string & s3)
{
debugThrowLastTrace(EvalError({
.msg = hintfmt(s, s2),
.msg = hintfmt(s, s2, s3),
.errPos = positions[pos]
}));
}
@ -922,7 +920,7 @@ void EvalState::throwEvalError(const PosIdx pos, const char * s, const std::stri
const std::string & s3, Env & env, Expr & expr)
{
debugThrow(EvalError({
.msg = hintfmt(s, s2),
.msg = hintfmt(s, s2, s3),
.errPos = positions[pos]
}), env, expr);
}
@ -2441,97 +2439,97 @@ void EvalState::printStats()
std::fstream fs;
if (outPath != "-")
fs.open(outPath, std::fstream::out);
JSONObject topObj(outPath == "-" ? std::cerr : fs, true);
topObj.attr("cpuTime",cpuTime);
{
auto envs = topObj.object("envs");
envs.attr("number", nrEnvs);
envs.attr("elements", nrValuesInEnvs);
envs.attr("bytes", bEnvs);
}
{
auto lists = topObj.object("list");
lists.attr("elements", nrListElems);
lists.attr("bytes", bLists);
lists.attr("concats", nrListConcats);
}
{
auto values = topObj.object("values");
values.attr("number", nrValues);
values.attr("bytes", bValues);
}
{
auto syms = topObj.object("symbols");
syms.attr("number", symbols.size());
syms.attr("bytes", symbols.totalSize());
}
{
auto sets = topObj.object("sets");
sets.attr("number", nrAttrsets);
sets.attr("bytes", bAttrsets);
sets.attr("elements", nrAttrsInAttrsets);
}
{
auto sizes = topObj.object("sizes");
sizes.attr("Env", sizeof(Env));
sizes.attr("Value", sizeof(Value));
sizes.attr("Bindings", sizeof(Bindings));
sizes.attr("Attr", sizeof(Attr));
}
topObj.attr("nrOpUpdates", nrOpUpdates);
topObj.attr("nrOpUpdateValuesCopied", nrOpUpdateValuesCopied);
topObj.attr("nrThunks", nrThunks);
topObj.attr("nrAvoided", nrAvoided);
topObj.attr("nrLookups", nrLookups);
topObj.attr("nrPrimOpCalls", nrPrimOpCalls);
topObj.attr("nrFunctionCalls", nrFunctionCalls);
json topObj = json::object();
topObj["cpuTime"] = cpuTime;
topObj["envs"] = {
{"number", nrEnvs},
{"elements", nrValuesInEnvs},
{"bytes", bEnvs},
};
topObj["list"] = {
{"elements", nrListElems},
{"bytes", bLists},
{"concats", nrListConcats},
};
topObj["values"] = {
{"number", nrValues},
{"bytes", bValues},
};
topObj["symbols"] = {
{"number", symbols.size()},
{"bytes", symbols.totalSize()},
};
topObj["sets"] = {
{"number", nrAttrsets},
{"bytes", bAttrsets},
{"elements", nrAttrsInAttrsets},
};
topObj["sizes"] = {
{"Env", sizeof(Env)},
{"Value", sizeof(Value)},
{"Bindings", sizeof(Bindings)},
{"Attr", sizeof(Attr)},
};
topObj["nrOpUpdates"] = nrOpUpdates;
topObj["nrOpUpdateValuesCopied"] = nrOpUpdateValuesCopied;
topObj["nrThunks"] = nrThunks;
topObj["nrAvoided"] = nrAvoided;
topObj["nrLookups"] = nrLookups;
topObj["nrPrimOpCalls"] = nrPrimOpCalls;
topObj["nrFunctionCalls"] = nrFunctionCalls;
#if HAVE_BOEHMGC
{
auto gc = topObj.object("gc");
gc.attr("heapSize", heapSize);
gc.attr("totalBytes", totalBytes);
}
topObj["gc"] = {
{"heapSize", heapSize},
{"totalBytes", totalBytes},
};
#endif
if (countCalls) {
topObj["primops"] = primOpCalls;
{
auto obj = topObj.object("primops");
for (auto & i : primOpCalls)
obj.attr(i.first, i.second);
}
{
auto list = topObj.list("functions");
auto& list = topObj["functions"];
list = json::array();
for (auto & [fun, count] : functionCalls) {
auto obj = list.object();
json obj = json::object();
if (fun->name)
obj.attr("name", (std::string_view) symbols[fun->name]);
obj["name"] = (std::string_view) symbols[fun->name];
else
obj.attr("name", nullptr);
obj["name"] = nullptr;
if (auto pos = positions[fun->pos]) {
obj.attr("file", (std::string_view) pos.file);
obj.attr("line", pos.line);
obj.attr("column", pos.column);
obj["file"] = (std::string_view) pos.file;
obj["line"] = pos.line;
obj["column"] = pos.column;
}
obj.attr("count", count);
obj["count"] = count;
list.push_back(obj);
}
}
{
auto list = topObj.list("attributes");
auto list = topObj["attributes"];
list = json::array();
for (auto & i : attrSelects) {
auto obj = list.object();
json obj = json::object();
if (auto pos = positions[i.first]) {
obj.attr("file", (const std::string &) pos.file);
obj.attr("line", pos.line);
obj.attr("column", pos.column);
obj["file"] = (const std::string &) pos.file;
obj["line"] = pos.line;
obj["column"] = pos.column;
}
obj.attr("count", i.second);
obj["count"] = i.second;
list.push_back(obj);
}
}
}
if (getEnv("NIX_SHOW_SYMBOLS").value_or("0") != "0") {
auto list = topObj.list("symbols");
symbols.dump([&](const std::string & s) { list.elem(s); });
// XXX: overrides earlier assignment
topObj["symbols"] = json::array();
auto &list = topObj["symbols"];
symbols.dump([&](const std::string & s) { list.emplace_back(s); });
}
if (outPath == "-") {
std::cerr << topObj.dump(2) << std::endl;
} else {
fs << topObj.dump(2) << std::endl;
}
}
}

View file

@ -43,7 +43,7 @@ let
outputs = flake.outputs (inputs // { self = result; });
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; };
result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; };
in
if node.flake or true then
assert builtins.isFunction flake.outputs;

View file

@ -5,14 +5,15 @@
#include "globals.hh"
#include "json-to-value.hh"
#include "names.hh"
#include "references.hh"
#include "store-api.hh"
#include "util.hh"
#include "json.hh"
#include "value-to-json.hh"
#include "value-to-xml.hh"
#include "primops.hh"
#include <boost/container/small_vector.hpp>
#include <nlohmann/json.hpp>
#include <sys/types.h>
#include <sys/stat.h>
@ -1010,6 +1011,7 @@ static void prim_second(EvalState & state, const PosIdx pos, Value * * args, Val
derivation. */
static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * * args, Value & v)
{
using nlohmann::json;
state.forceAttrs(*args[0], pos);
/* Figure out the name first (for stack backtraces). */
@ -1031,11 +1033,10 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
/* Check whether attributes should be passed as a JSON file. */
std::ostringstream jsonBuf;
std::unique_ptr<JSONObject> jsonObject;
std::optional<json> jsonObject;
attr = args[0]->attrs->find(state.sStructuredAttrs);
if (attr != args[0]->attrs->end() && state.forceBool(*attr->value, pos))
jsonObject = std::make_unique<JSONObject>(jsonBuf);
jsonObject = json::object();
/* Check whether null attributes should be ignored. */
bool ignoreNulls = false;
@ -1137,8 +1138,7 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
if (i->name == state.sStructuredAttrs) continue;
auto placeholder(jsonObject->placeholder(key));
printValueAsJSON(state, true, *i->value, pos, placeholder, context);
(*jsonObject)[key] = printValueAsJSON(state, true, *i->value, pos, context);
if (i->name == state.sBuilder)
drv.builder = state.forceString(*i->value, context, posDrvName);
@ -1182,8 +1182,8 @@ static void prim_derivationStrict(EvalState & state, const PosIdx pos, Value * *
}
if (jsonObject) {
drv.env.emplace("__json", jsonObject->dump());
jsonObject.reset();
drv.env.emplace("__json", jsonBuf.str());
}
/* Everything in the context of the strings in the derivation
@ -1542,6 +1542,10 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, V
refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references;
} catch (Error &) { // FIXME: should be InvalidPathError
}
// Re-scan references to filter down to just the ones that actually occur in the file.
auto refsSink = PathRefScanSink::fromPaths(refs);
refsSink << s;
refs = refsSink.getResultPaths();
}
auto context = state.store->printStorePathSet(refs);
v.mkString(s, context);

View file

@ -1,84 +1,82 @@
#include "value-to-json.hh"
#include "json.hh"
#include "eval-inline.hh"
#include "util.hh"
#include <cstdlib>
#include <iomanip>
#include <nlohmann/json.hpp>
namespace nix {
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore)
using json = nlohmann::json;
json printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, PathSet & context, bool copyToStore)
{
checkInterrupt();
if (strict) state.forceValue(v, pos);
json out;
switch (v.type()) {
case nInt:
out.write(v.integer);
out = v.integer;
break;
case nBool:
out.write(v.boolean);
out = v.boolean;
break;
case nString:
copyContext(v, context);
out.write(v.string.s);
out = v.string.s;
break;
case nPath:
if (copyToStore)
out.write(state.copyPathToStore(context, v.path));
out = state.copyPathToStore(context, v.path);
else
out.write(v.path);
out = v.path;
break;
case nNull:
out.write(nullptr);
break;
case nAttrs: {
auto maybeString = state.tryAttrsToString(pos, v, context, false, false);
if (maybeString) {
out.write(*maybeString);
out = *maybeString;
break;
}
auto i = v.attrs->find(state.sOutPath);
if (i == v.attrs->end()) {
auto obj(out.object());
out = json::object();
StringSet names;
for (auto & j : *v.attrs)
names.emplace(state.symbols[j.name]);
for (auto & j : names) {
Attr & a(*v.attrs->find(state.symbols.create(j)));
auto placeholder(obj.placeholder(j));
printValueAsJSON(state, strict, *a.value, a.pos, placeholder, context, copyToStore);
out[j] = printValueAsJSON(state, strict, *a.value, a.pos, context, copyToStore);
}
} else
printValueAsJSON(state, strict, *i->value, i->pos, out, context, copyToStore);
return printValueAsJSON(state, strict, *i->value, i->pos, context, copyToStore);
break;
}
case nList: {
auto list(out.list());
for (auto elem : v.listItems()) {
auto placeholder(list.placeholder());
printValueAsJSON(state, strict, *elem, pos, placeholder, context, copyToStore);
}
out = json::array();
for (auto elem : v.listItems())
out.push_back(printValueAsJSON(state, strict, *elem, pos, context, copyToStore));
break;
}
case nExternal:
v.external->printValueAsJSON(state, strict, out, context, copyToStore);
return v.external->printValueAsJSON(state, strict, context, copyToStore);
break;
case nFloat:
out.write(v.fpoint);
out = v.fpoint;
break;
case nThunk:
@ -91,17 +89,17 @@ void printValueAsJSON(EvalState & state, bool strict,
state.debugThrowLastTrace(e);
throw e;
}
return out;
}
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore)
{
JSONPlaceholder out(str);
printValueAsJSON(state, strict, v, pos, out, context, copyToStore);
str << printValueAsJSON(state, strict, v, pos, context, copyToStore);
}
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
JSONPlaceholder & out, PathSet & context, bool copyToStore) const
json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
PathSet & context, bool copyToStore) const
{
state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType()));
}

View file

@ -5,13 +5,12 @@
#include <string>
#include <map>
#include <nlohmann/json_fwd.hpp>
namespace nix {
class JSONPlaceholder;
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, JSONPlaceholder & out, PathSet & context, bool copyToStore = true);
nlohmann::json printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true);
void printValueAsJSON(EvalState & state, bool strict,
Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true);

View file

@ -7,6 +7,7 @@
#if HAVE_BOEHMGC
#include <gc/gc_allocator.h>
#endif
#include <nlohmann/json_fwd.hpp>
namespace nix {
@ -62,7 +63,6 @@ class StorePath;
class Store;
class EvalState;
class XMLWriter;
class JSONPlaceholder;
typedef int64_t NixInt;
@ -98,8 +98,8 @@ class ExternalValueBase
virtual bool operator ==(const ExternalValueBase & b) const;
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
virtual void printValueAsJSON(EvalState & state, bool strict,
JSONPlaceholder & out, PathSet & context, bool copyToStore = true) const;
virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict,
PathSet & context, bool copyToStore = true) const;
/* Print the value as XML. Defaults to unevaluated */
virtual void printValueAsXML(EvalState & state, bool strict, bool location,

View file

@ -486,6 +486,10 @@ struct GitInputScheme : InputScheme
}
input.attrs.insert_or_assign("ref", *head);
unlockedAttrs.insert_or_assign("ref", *head);
} else {
if (!input.getRev()) {
unlockedAttrs.insert_or_assign("ref", input.getRef().value());
}
}
if (auto res = getCache()->lookup(store, unlockedAttrs)) {

View file

@ -262,17 +262,20 @@ struct GitHubInputScheme : GitArchiveInputScheme
DownloadUrl getDownloadUrl(const Input & input) const override
{
// FIXME: use regular /archive URLs instead? api.github.com
// might have stricter rate limits.
auto host = maybeGetStrAttr(input.attrs, "host").value_or("github.com");
auto url = fmt(
host == "github.com"
? "https://api.%s/repos/%s/%s/tarball/%s"
: "https://%s/api/v3/repos/%s/%s/tarball/%s",
host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
Headers headers = makeHeadersWithAuthTokens(host);
// If we have no auth headers then we default to the public archive
// urls so we do not run into rate limits.
const auto urlFmt =
host != "github.com"
? "https://%s/api/v3/repos/%s/%s/tarball/%s"
: headers.empty()
? "https://%s/%s/%s/archive/%s.tar.gz"
: "https://api.%s/repos/%s/%s/tarball/%s";
const auto url = fmt(urlFmt, host, getStrAttr(input.attrs, "owner"), getStrAttr(input.attrs, "repo"),
input.getRev()->to_string(Base16, false));
Headers headers = makeHeadersWithAuthTokens(host);
return DownloadUrl { url, headers };
}

View file

@ -33,6 +33,7 @@
namespace nix {
char * * savedArgv;
static bool gcWarning = true;

View file

@ -9,7 +9,6 @@
#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include "thread-pool.hh"
#include "callback.hh"
@ -194,19 +193,12 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
std::ostringstream jsonOut;
nlohmann::json j = {
{"version", 1},
{"root", listNar(ref<FSAccessor>(narAccessor), "", true)},
};
{
JSONObject jsonRoot(jsonOut);
jsonRoot.attr("version", 1);
{
auto res = jsonRoot.placeholder("root");
listNar(res, ref<FSAccessor>(narAccessor), "", true);
}
}
upsertFile(std::string(info.path.hashPart()) + ".ls", jsonOut.str(), "application/json");
upsertFile(std::string(info.path.hashPart()) + ".ls", j.dump(), "application/json");
}
/* Optionally maintain an index of DWARF debug info files

View file

@ -5,7 +5,7 @@
#include <string>
#include <chrono>
#include <optional>
namespace nix {
@ -78,6 +78,9 @@ struct BuildResult
was repeated). */
time_t startTime = 0, stopTime = 0;
/* User and system CPU time the build took. */
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
bool success()
{
return status == Built || status == Substituted || status == AlreadyValid || status == ResolvesToAlreadyValid;

View file

@ -7,7 +7,6 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "json.hh"
#include "compression.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
@ -528,14 +527,32 @@ void DerivationGoal::inputsRealised()
/* Add the relevant output closures of the input derivation
`i' as input paths. Only add the closures of output paths
that are specified as inputs. */
for (auto & j : wantedDepOutputs)
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j }))
for (auto & j : wantedDepOutputs) {
/* TODO (impure derivations-induced tech debt):
Tracking input derivation outputs statefully through the
goals is error prone and has led to bugs.
For a robust nix, we need to move towards the `else` branch,
which does not rely on goal state to match up with the
reality of the store, which is our real source of truth.
However, the impure derivations feature still relies on this
fragile way of doing things, because its builds do not have
a representation in the store, which is a usability problem
in itself */
if (auto outPath = get(inputDrvOutputs, { depDrvPath, j })) {
worker.store.computeFSClosure(*outPath, inputPaths);
else
}
else {
auto outMap = worker.evalStore.queryDerivationOutputMap(depDrvPath);
auto outMapPath = outMap.find(j);
if (outMapPath == outMap.end()) {
throw Error(
"derivation '%s' requires non-existent output '%s' from input derivation '%s'",
worker.store.printStorePath(drvPath), j, worker.store.printStorePath(depDrvPath));
}
worker.store.computeFSClosure(outMapPath->second, inputPaths);
}
}
}
}
/* Second, the input sources. */
@ -869,6 +886,14 @@ void DerivationGoal::buildDone()
cleanupPostChildKill();
if (buildResult.cpuUser && buildResult.cpuSystem) {
debug("builder for '%s' terminated with status %d, user CPU %.3fs, system CPU %.3fs",
worker.store.printStorePath(drvPath),
status,
((double) buildResult.cpuUser->count()) / 1000000,
((double) buildResult.cpuSystem->count()) / 1000000);
}
bool diskFull = false;
try {

View file

@ -16,11 +16,11 @@ HookInstance::HookInstance()
buildHookArgs.pop_front();
Strings args;
args.push_back(std::string(baseNameOf(buildHook)));
for (auto & arg : buildHookArgs)
args.push_back(arg);
args.push_back(std::string(baseNameOf(settings.buildHook.get())));
args.push_back(std::to_string(verbosity));
/* Create a pipe to get the output of the child. */

View file

@ -8,13 +8,13 @@
#include "finally.hh"
#include "util.hh"
#include "archive.hh"
#include "json.hh"
#include "compression.hh"
#include "daemon.hh"
#include "worker-protocol.hh"
#include "topo-sort.hh"
#include "callback.hh"
#include "json-utils.hh"
#include "cgroup.hh"
#include <regex>
#include <queue>
@ -56,6 +56,7 @@
#include <pwd.h>
#include <grp.h>
#include <iostream>
namespace nix {
@ -129,26 +130,44 @@ void LocalDerivationGoal::killChild()
if (pid != -1) {
worker.childTerminated(this);
if (buildUser) {
/* If we're using a build user, then there is a tricky
race condition: if we kill the build user before the
child has done its setuid() to the build user uid, then
it won't be killed, and we'll potentially lock up in
pid.wait(). So also send a conventional kill to the
child. */
/* If we're using a build user, then there is a tricky race
condition: if we kill the build user before the child has
done its setuid() to the build user uid, then it won't be
killed, and we'll potentially lock up in pid.wait(). So
also send a conventional kill to the child. */
::kill(-pid, SIGKILL); /* ignore the result */
buildUser->kill();
pid.wait();
} else
pid.kill();
assert(pid == -1);
killSandbox(true);
pid.wait();
}
DerivationGoal::killChild();
}
void LocalDerivationGoal::killSandbox(bool getStats)
{
if (cgroup) {
#if __linux__
auto stats = destroyCgroup(*cgroup);
if (getStats) {
buildResult.cpuUser = stats.cpuUser;
buildResult.cpuSystem = stats.cpuSystem;
}
#else
abort();
#endif
}
else if (buildUser) {
auto uid = buildUser->getUID();
assert(uid != 0);
killUser(uid);
}
}
void LocalDerivationGoal::tryLocalBuild() {
unsigned int curBuilds = worker.getNrLocalBuilds();
if (curBuilds >= settings.maxBuildJobs) {
@ -158,28 +177,46 @@ void LocalDerivationGoal::tryLocalBuild() {
return;
}
/* If `build-users-group' is not empty, then we have to build as
one of the members of that group. */
if (settings.buildUsersGroup != "" && getuid() == 0) {
#if defined(__linux__) || defined(__APPLE__)
if (!buildUser) buildUser = std::make_unique<UserLock>();
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
if (buildUser->findFreeUser()) {
/* Make sure that no other processes are executing under this
uid. */
buildUser->kill();
} else {
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
if (useBuildUsers()) {
if (!buildUser)
buildUser = acquireUserLock(parsedDrv->useUidRange() ? 65536 : 1, useChroot);
if (!buildUser) {
if (!actLock)
actLock = std::make_unique<Activity>(*logger, lvlWarn, actBuildWaiting,
fmt("waiting for UID to build '%s'", yellowtxt(worker.store.printStorePath(drvPath))));
worker.waitForAWhile(shared_from_this());
return;
}
#else
/* Don't know how to block the creation of setuid/setgid
binaries on this platform. */
throw Error("build users are not supported on this platform for security reasons");
#endif
}
actLock.reset();
@ -270,7 +307,7 @@ void LocalDerivationGoal::cleanupPostChildKill()
malicious user from leaving behind a process that keeps files
open and modifies them after they have been chown'ed to
root. */
if (buildUser) buildUser->kill();
killSandbox(true);
/* Terminate the recursive Nix daemon. */
stopDaemon();
@ -363,6 +400,60 @@ static void linkOrCopy(const Path & from, const Path & to)
void LocalDerivationGoal::startBuilder()
{
if ((buildUser && buildUser->getUIDCount() != 1)
#if __linux__
|| settings.useCgroups
#endif
)
{
#if __linux__
settings.requireExperimentalFeature(Xp::Cgroups);
auto ourCgroups = getCgroups("/proc/self/cgroup");
auto ourCgroup = ourCgroups[""];
if (ourCgroup == "")
throw Error("cannot determine cgroup name from /proc/self/cgroup");
auto ourCgroupPath = canonPath("/sys/fs/cgroup/" + ourCgroup);
if (!pathExists(ourCgroupPath))
throw Error("expected cgroup directory '%s'", ourCgroupPath);
static std::atomic<unsigned int> counter{0};
cgroup = buildUser
? fmt("%s/nix-build-uid-%d", ourCgroupPath, buildUser->getUID())
: fmt("%s/nix-build-pid-%d-%d", ourCgroupPath, getpid(), counter++);
debug("using cgroup '%s'", *cgroup);
/* When using a build user, record the cgroup we used for that
user so that if we got interrupted previously, we can kill
any left-over cgroup first. */
if (buildUser) {
auto cgroupsDir = settings.nixStateDir + "/cgroups";
createDirs(cgroupsDir);
auto cgroupFile = fmt("%s/%d", cgroupsDir, buildUser->getUID());
if (pathExists(cgroupFile)) {
auto prevCgroup = readFile(cgroupFile);
destroyCgroup(prevCgroup);
}
writeFile(cgroupFile, *cgroup);
}
#else
throw Error("cgroups are not supported on this platform");
#endif
}
/* Make sure that no other processes are executing under the
sandbox uids. This must be done before any chownToBuilder()
calls. */
killSandbox(false);
/* Right platform? */
if (!parsedDrv->canBuildLocally(worker.store))
throw Error("a '%s' with features {%s} is required to build '%s', but I am a '%s' with features {%s}",
@ -376,35 +467,6 @@ void LocalDerivationGoal::startBuilder()
additionalSandboxProfile = parsedDrv->getStringAttr("__sandboxProfile").value_or("");
#endif
/* Are we doing a chroot build? */
{
auto noChroot = parsedDrv->getBoolAttr("__noChroot");
if (settings.sandboxMode == smEnabled) {
if (noChroot)
throw Error("derivation '%s' has '__noChroot' set, "
"but that's not allowed when 'sandbox' is 'true'", worker.store.printStorePath(drvPath));
#if __APPLE__
if (additionalSandboxProfile != "")
throw Error("derivation '%s' specifies a sandbox profile, "
"but this is only allowed when 'sandbox' is 'relaxed'", worker.store.printStorePath(drvPath));
#endif
useChroot = true;
}
else if (settings.sandboxMode == smDisabled)
useChroot = false;
else if (settings.sandboxMode == smRelaxed)
useChroot = derivationType.isSandboxed() && !noChroot;
}
auto & localStore = getLocalStore();
if (localStore.storeDir != localStore.realStoreDir.get()) {
#if __linux__
useChroot = true;
#else
throw Error("building using a diverted store is not supported on this platform");
#endif
}
/* Create a temporary directory where the build will take
place. */
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
@ -580,10 +642,11 @@ void LocalDerivationGoal::startBuilder()
printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir);
if (mkdir(chrootRootDir.c_str(), 0750) == -1)
// FIXME: make this 0700
if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1)
throw SysError("cannot create '%1%'", chrootRootDir);
if (buildUser && chown(chrootRootDir.c_str(), 0, buildUser->getGID()) == -1)
if (buildUser && chown(chrootRootDir.c_str(), buildUser->getUIDCount() != 1 ? buildUser->getUID() : 0, buildUser->getGID()) == -1)
throw SysError("cannot change ownership of '%1%'", chrootRootDir);
/* Create a writable /tmp in the chroot. Many builders need
@ -597,6 +660,10 @@ void LocalDerivationGoal::startBuilder()
nobody account. The latter is kind of a hack to support
Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
chownToBuilder(chrootRootDir + "/etc");
if (parsedDrv->useUidRange() && (!buildUser || buildUser->getUIDCount() < 65536))
throw Error("feature 'uid-range' requires the setting '%s' to be enabled", settings.autoAllocateUids.name);
/* Declare the build user's group so that programs get a consistent
view of the system (e.g., "id -gn"). */
@ -647,12 +714,28 @@ void LocalDerivationGoal::startBuilder()
dirsInChroot.erase(worker.store.printStorePath(*i.second.second));
}
#elif __APPLE__
if (cgroup) {
if (mkdir(cgroup->c_str(), 0755) != 0)
throw SysError("creating cgroup '%s'", *cgroup);
chownToBuilder(*cgroup);
chownToBuilder(*cgroup + "/cgroup.procs");
chownToBuilder(*cgroup + "/cgroup.threads");
//chownToBuilder(*cgroup + "/cgroup.subtree_control");
}
#else
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is not supported on this platform");
#if __APPLE__
/* We don't really have any parent prep work to do (yet?)
All work happens in the child, instead. */
#else
throw Error("sandboxing builds is not supported on this platform");
#endif
#endif
} else {
if (parsedDrv->useUidRange())
throw Error("feature 'uid-range' is only supported in sandboxed builds");
}
if (needsHashRewrite() && pathExists(homeDir))
@ -913,14 +996,16 @@ void LocalDerivationGoal::startBuilder()
the calling user (if build users are disabled). */
uid_t hostUid = buildUser ? buildUser->getUID() : getuid();
uid_t hostGid = buildUser ? buildUser->getGID() : getgid();
uid_t nrIds = buildUser ? buildUser->getUIDCount() : 1;
writeFile("/proc/" + std::to_string(pid) + "/uid_map",
fmt("%d %d 1", sandboxUid(), hostUid));
fmt("%d %d %d", sandboxUid(), hostUid, nrIds));
if (!buildUser || buildUser->getUIDCount() == 1)
writeFile("/proc/" + std::to_string(pid) + "/setgroups", "deny");
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
fmt("%d %d 1", sandboxGid(), hostGid));
fmt("%d %d %d", sandboxGid(), hostGid, nrIds));
} else {
debug("note: not using a user namespace");
if (!buildUser)
@ -947,6 +1032,10 @@ void LocalDerivationGoal::startBuilder()
throw SysError("getting sandbox user namespace");
}
/* Move the child into its own cgroup. */
if (cgroup)
writeFile(*cgroup + "/cgroup.procs", fmt("%d", (pid_t) pid));
/* Signal the builder that we've updated its user namespace. */
writeFull(userNamespaceSync.writeSide.get(), "1");
@ -1552,6 +1641,22 @@ void setupSeccomp()
seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0)
printError("unable to add ARM seccomp architecture; this may result in spurious build failures if running 32-bit ARM processes");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS) != 0)
printError("unable to add mips seccomp architecture");
if (nativeSystem == "mips64-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPS64N32) != 0)
printError("unable to add mips64-*abin32 seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL) != 0)
printError("unable to add mipsel seccomp architecture");
if (nativeSystem == "mips64el-linux" &&
seccomp_arch_add(ctx, SCMP_ARCH_MIPSEL64N32) != 0)
printError("unable to add mips64el-*abin32 seccomp architecture");
/* Prevent builders from creating setuid/setgid binaries. */
for (int perm : { S_ISUID, S_ISGID }) {
if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EPERM), SCMP_SYS(chmod), 1,
@ -1763,6 +1868,13 @@ void LocalDerivationGoal::runChild()
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) == -1)
throw SysError("mounting /proc");
/* Mount sysfs on /sys. */
if (buildUser && buildUser->getUIDCount() != 1) {
createDirs(chrootRootDir + "/sys");
if (mount("none", (chrootRootDir + "/sys").c_str(), "sysfs", 0, 0) == -1)
throw SysError("mounting /sys");
}
/* Mount a new tmpfs on /dev/shm to ensure that whatever
the builder puts in /dev/shm is cleaned up automatically. */
if (pathExists("/dev/shm") && mount("none", (chrootRootDir + "/dev/shm").c_str(), "tmpfs", 0,
@ -1805,6 +1917,12 @@ void LocalDerivationGoal::runChild()
if (unshare(CLONE_NEWNS) == -1)
throw SysError("unsharing mount namespace");
/* Unshare the cgroup namespace. This means
/proc/self/cgroup will show the child's cgroup as '/'
rather than whatever it is in the parent. */
if (cgroup && unshare(CLONE_NEWCGROUP) == -1)
throw SysError("unsharing cgroup namespace");
/* Do the chroot(). */
if (chdir(chrootRootDir.c_str()) == -1)
throw SysError("cannot change directory to '%1%'", chrootRootDir);
@ -1890,9 +2008,8 @@ void LocalDerivationGoal::runChild()
if (setUser && buildUser) {
/* Preserve supplementary groups of the build user, to allow
admins to specify groups such as "kvm". */
if (!buildUser->getSupplementaryGIDs().empty() &&
setgroups(buildUser->getSupplementaryGIDs().size(),
buildUser->getSupplementaryGIDs().data()) == -1)
auto gids = buildUser->getSupplementaryGIDs();
if (setgroups(gids.size(), gids.data()) == -1)
throw SysError("cannot set supplementary groups of build user");
if (setgid(buildUser->getGID()) == -1 ||
@ -2221,7 +2338,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* Canonicalise first. This ensures that the path we're
rewriting doesn't contain a hard link to /etc/shadow or
something like that. */
canonicalisePathMetaData(actualPath, buildUser ? buildUser->getUID() : -1, inodesSeen);
canonicalisePathMetaData(
actualPath,
buildUser ? std::optional(buildUser->getUIDRange()) : std::nullopt,
inodesSeen);
debug("scanning for references for output '%s' in temp location '%s'", outputName, actualPath);
@ -2314,6 +2434,10 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
sink.s = rewriteStrings(sink.s, outputRewrites);
StringSource source(sink.s);
restorePath(actualPath, source);
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, {}, inodesSeen);
}
};
@ -2476,7 +2600,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs()
/* FIXME: set proper permissions in restorePath() so
we don't have to do another traversal. */
canonicalisePathMetaData(actualPath, -1, inodesSeen);
canonicalisePathMetaData(actualPath, {}, inodesSeen);
/* Calculate where we'll move the output files. In the checking case we
will leave leave them where they are, for now, rather than move to

View file

@ -15,6 +15,9 @@ struct LocalDerivationGoal : public DerivationGoal
/* The process ID of the builder. */
Pid pid;
/* The cgroup of the builder, if any. */
std::optional<Path> cgroup;
/* The temporary directory. */
Path tmpDir;
@ -92,8 +95,8 @@ struct LocalDerivationGoal : public DerivationGoal
result. */
std::map<Path, ValidPathInfo> prevInfos;
uid_t sandboxUid() { return usingUserNamespace ? 1000 : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? 100 : buildUser->getGID(); }
uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); }
gid_t sandboxGid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 100 : 0) : buildUser->getGID(); }
const static Path homeDir;
@ -197,6 +200,10 @@ struct LocalDerivationGoal : public DerivationGoal
/* Forcibly kill the child process, if any. */
void killChild() override;
/* Kill any processes running under the build user UID or in the
cgroup of the build. */
void killSandbox(bool getStats);
/* Create alternative path calculated from but distinct from the
input, so we can avoid overwriting outputs (or other store paths)
that already exist. */

131
src/libstore/cgroup.cc Normal file
View file

@ -0,0 +1,131 @@
#if __linux__
#include "cgroup.hh"
#include "util.hh"
#include <chrono>
#include <cmath>
#include <regex>
#include <unordered_set>
#include <thread>
#include <dirent.h>
namespace nix {
// FIXME: obsolete, check for cgroup2
std::map<std::string, std::string> getCgroups(const Path & cgroupFile)
{
std::map<std::string, std::string> cgroups;
for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cgroupFile), "\n")) {
static std::regex regex("([0-9]+):([^:]*):(.*)");
std::smatch match;
if (!std::regex_match(line, match, regex))
throw Error("invalid line '%s' in '%s'", line, cgroupFile);
std::string name = hasPrefix(std::string(match[2]), "name=") ? std::string(match[2], 5) : match[2];
cgroups.insert_or_assign(name, match[3]);
}
return cgroups;
}
static CgroupStats destroyCgroup(const Path & cgroup, bool returnStats)
{
if (!pathExists(cgroup)) return {};
auto procsFile = cgroup + "/cgroup.procs";
if (!pathExists(procsFile))
throw Error("'%s' is not a cgroup", cgroup);
/* Use the fast way to kill every process in a cgroup, if
available. */
auto killFile = cgroup + "/cgroup.kill";
if (pathExists(killFile))
writeFile(killFile, "1");
/* Otherwise, manually kill every process in the subcgroups and
this cgroup. */
for (auto & entry : readDirectory(cgroup)) {
if (entry.type != DT_DIR) continue;
destroyCgroup(cgroup + "/" + entry.name, false);
}
int round = 1;
std::unordered_set<pid_t> pidsShown;
while (true) {
auto pids = tokenizeString<std::vector<std::string>>(readFile(procsFile));
if (pids.empty()) break;
if (round > 20)
throw Error("cannot kill cgroup '%s'", cgroup);
for (auto & pid_s : pids) {
pid_t pid;
if (auto o = string2Int<pid_t>(pid_s))
pid = *o;
else
throw Error("invalid pid '%s'", pid);
if (pidsShown.insert(pid).second) {
try {
auto cmdline = readFile(fmt("/proc/%d/cmdline", pid));
using namespace std::string_literals;
warn("killing stray builder process %d (%s)...",
pid, trim(replaceStrings(cmdline, "\0"s, " ")));
} catch (SysError &) {
}
}
// FIXME: pid wraparound
if (kill(pid, SIGKILL) == -1 && errno != ESRCH)
throw SysError("killing member %d of cgroup '%s'", pid, cgroup);
}
auto sleep = std::chrono::milliseconds((int) std::pow(2.0, std::min(round, 10)));
if (sleep.count() > 100)
printError("waiting for %d ms for cgroup '%s' to become empty", sleep.count(), cgroup);
std::this_thread::sleep_for(sleep);
round++;
}
CgroupStats stats;
if (returnStats) {
auto cpustatPath = cgroup + "/cpu.stat";
if (pathExists(cpustatPath)) {
for (auto & line : tokenizeString<std::vector<std::string>>(readFile(cpustatPath), "\n")) {
std::string_view userPrefix = "user_usec ";
if (hasPrefix(line, userPrefix)) {
auto n = string2Int<uint64_t>(line.substr(userPrefix.size()));
if (n) stats.cpuUser = std::chrono::microseconds(*n);
}
std::string_view systemPrefix = "system_usec ";
if (hasPrefix(line, systemPrefix)) {
auto n = string2Int<uint64_t>(line.substr(systemPrefix.size()));
if (n) stats.cpuSystem = std::chrono::microseconds(*n);
}
}
}
}
if (rmdir(cgroup.c_str()) == -1)
throw SysError("deleting cgroup '%s'", cgroup);
return stats;
}
CgroupStats destroyCgroup(const Path & cgroup)
{
return destroyCgroup(cgroup, true);
}
}
#endif

27
src/libstore/cgroup.hh Normal file
View file

@ -0,0 +1,27 @@
#pragma once
#if __linux__
#include <chrono>
#include <optional>
#include "types.hh"
namespace nix {
std::map<std::string, std::string> getCgroups(const Path & cgroupFile);
struct CgroupStats
{
std::optional<std::chrono::microseconds> cpuUser, cpuSystem;
};
/* Destroy the cgroup denoted by 'path'. The postcondition is that
'path' does not exist, and thus any processes in the cgroup have
been killed. Also return statistics from the cgroup just before
destruction. */
CgroupStats destroyCgroup(const Path & cgroup);
}
#endif

View file

@ -53,28 +53,13 @@ StorePathSet BuiltPath::outPaths() const
);
}
template<typename T>
nlohmann::json stuffToJSON(const std::vector<T> & ts, ref<Store> store) {
auto res = nlohmann::json::array();
for (const T & t : ts) {
std::visit([&res, store](const auto & t) {
res.push_back(t.toJSON(store));
}, t.raw());
}
return res;
}
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store)
{ return stuffToJSON<BuiltPath>(buildables, store); }
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
{ return stuffToJSON<DerivedPath>(paths, store); }
std::string DerivedPath::Opaque::to_string(const Store & store) const {
std::string DerivedPath::Opaque::to_string(const Store & store) const
{
return store.printStorePath(path);
}
std::string DerivedPath::Built::to_string(const Store & store) const {
std::string DerivedPath::Built::to_string(const Store & store) const
{
return store.printStorePath(drvPath)
+ "!"
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));

View file

@ -125,7 +125,4 @@ struct BuiltPath : _BuiltPathRaw {
typedef std::vector<DerivedPath> DerivedPaths;
typedef std::vector<BuiltPath> BuiltPaths;
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
nlohmann::json derivedPathsToJSON(const DerivedPaths & , ref<Store> store);
}

View file

@ -147,7 +147,7 @@ void LocalStore::addTempRoot(const StorePath & path)
} catch (SysError & e) {
/* The garbage collector may have exited, so we need to
restart. */
if (e.errNo == EPIPE) {
if (e.errNo == EPIPE || e.errNo == ECONNRESET) {
debug("GC socket disconnected");
state->fdRootsSocket.close();
goto restart;
@ -506,6 +506,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
Finally cleanup([&]() {
debug("GC roots server shutting down");
fdServer.close();
while (true) {
auto item = remove_begin(*connections.lock());
if (!item) break;

View file

@ -130,6 +130,10 @@ StringSet Settings::getDefaultSystemFeatures()
actually require anything special on the machines. */
StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
features.insert("uid-range");
#endif
#if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm");

View file

@ -46,6 +46,14 @@ struct PluginFilesSetting : public BaseSetting<Paths>
void set(const std::string & str, bool append = false) override;
};
const uint32_t maxIdsPerBuild =
#if __linux__
1 << 16
#else
1
#endif
;
class Settings : public Config {
unsigned int getDefaultCores();
@ -275,6 +283,38 @@ public:
multi-user settings with untrusted users.
)"};
Setting<bool> autoAllocateUids{this, false, "auto-allocate-uids",
"Whether to allocate UIDs for builders automatically."};
Setting<uint32_t> startId{this,
#if __linux__
0x34000000,
#else
56930,
#endif
"start-id",
"The first UID and GID to use for dynamic ID allocation."};
Setting<uint32_t> uidCount{this,
#if __linux__
maxIdsPerBuild * 128,
#else
128,
#endif
"id-count",
"The number of UIDs/GIDs to use for dynamic ID allocation."};
#if __linux__
Setting<bool> useCgroups{
this, false, "use-cgroups",
R"(
Whether to execute builds inside cgroups. Cgroups are
enabled automatically for derivations that require the
`uid-range` system feature.
)"
};
#endif
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};

View file

@ -583,7 +583,10 @@ void canonicaliseTimestampAndPermissions(const Path & path)
}
static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
static void canonicalisePathMetaData_(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{
checkInterrupt();
@ -630,7 +633,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
However, ignore files that we chown'ed ourselves previously to
ensure that we don't fail on hard links within the same build
(i.e. "touch $out/foo; ln $out/foo $out/bar"). */
if (fromUid != (uid_t) -1 && st.st_uid != fromUid) {
if (uidRange && (st.st_uid < uidRange->first || st.st_uid > uidRange->second)) {
if (S_ISDIR(st.st_mode) || !inodesSeen.count(Inode(st.st_dev, st.st_ino)))
throw BuildError("invalid ownership on file '%1%'", path);
mode_t mode = st.st_mode & ~S_IFMT;
@ -663,14 +666,17 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
if (S_ISDIR(st.st_mode)) {
DirEntries entries = readDirectory(path);
for (auto & i : entries)
canonicalisePathMetaData_(path + "/" + i.name, fromUid, inodesSeen);
canonicalisePathMetaData_(path + "/" + i.name, uidRange, inodesSeen);
}
}
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen)
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen)
{
canonicalisePathMetaData_(path, fromUid, inodesSeen);
canonicalisePathMetaData_(path, uidRange, inodesSeen);
/* On platforms that don't have lchown(), the top-level path can't
be a symlink, since we can't change its ownership. */
@ -683,10 +689,11 @@ void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & ino
}
void canonicalisePathMetaData(const Path & path, uid_t fromUid)
void canonicalisePathMetaData(const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange)
{
InodesSeen inodesSeen;
canonicalisePathMetaData(path, fromUid, inodesSeen);
canonicalisePathMetaData(path, uidRange, inodesSeen);
}
@ -1331,7 +1338,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
autoGC();
canonicalisePathMetaData(realPath, -1);
canonicalisePathMetaData(realPath, {});
optimisePath(realPath, repair); // FIXME: combine with hashPath()
@ -1444,7 +1451,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name
narHash = narSink.finish();
}
canonicalisePathMetaData(realPath, -1); // FIXME: merge into restorePath
canonicalisePathMetaData(realPath, {}); // FIXME: merge into restorePath
optimisePath(realPath, repair);
@ -1486,7 +1493,7 @@ StorePath LocalStore::addTextToStore(
writeFile(realPath, s);
canonicalisePathMetaData(realPath, -1);
canonicalisePathMetaData(realPath, {});
StringSink sink;
dumpString(s, sink);

View file

@ -310,9 +310,18 @@ typedef std::set<Inode> InodesSeen;
- the permissions are set of 444 or 555 (i.e., read-only with or
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
running as root. */
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
void canonicalisePathMetaData(const Path & path, uid_t fromUid);
running as root.
If uidRange is not empty, this function will throw an error if it
encounters files owned by a user outside of the closed interval
[uidRange->first, uidRange->second].
*/
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange,
InodesSeen & inodesSeen);
void canonicalisePathMetaData(
const Path & path,
std::optional<std::pair<uid_t, uid_t>> uidRange);
void canonicaliseTimestampAndPermissions(const Path & path);

View file

@ -20,7 +20,7 @@ endif
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
ifeq ($(ENABLE_S3), 1)
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core -laws-crt-cpp
endif
ifdef HOST_SOLARIS

View file

@ -2,105 +2,197 @@
#include "globals.hh"
#include "pathlocks.hh"
#include <grp.h>
#include <pwd.h>
#include <fcntl.h>
#include <unistd.h>
#include <grp.h>
namespace nix {
UserLock::UserLock()
struct SimpleUserLock : UserLock
{
AutoCloseFD fdUserLock;
uid_t uid;
gid_t gid;
std::vector<gid_t> supplementaryGIDs;
uid_t getUID() override { assert(uid); return uid; }
uid_t getUIDCount() override { return 1; }
gid_t getGID() override { assert(gid); return gid; }
std::vector<gid_t> getSupplementaryGIDs() override { return supplementaryGIDs; }
static std::unique_ptr<UserLock> acquire()
{
assert(settings.buildUsersGroup != "");
createDirs(settings.nixStateDir + "/userpool");
}
bool UserLock::findFreeUser() {
if (enabled()) return true;
/* Get the members of the build-users-group. */
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%1%' specified in 'build-users-group' does not exist",
settings.buildUsersGroup);
gid = gr->gr_gid;
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
/* Copy the result of getgrnam. */
Strings users;
for (char * * p = gr->gr_mem; *p; ++p) {
debug("found build user '%1%'", *p);
debug("found build user '%s'", *p);
users.push_back(*p);
}
if (users.empty())
throw Error("the build users group '%1%' has no members",
settings.buildUsersGroup);
throw Error("the build users group '%s' has no members", settings.buildUsersGroup);
/* Find a user account that isn't currently in use for another
build. */
for (auto & i : users) {
debug("trying user '%1%'", i);
debug("trying user '%s'", i);
struct passwd * pw = getpwnam(i.c_str());
if (!pw)
throw Error("the user '%1%' in the group '%2%' does not exist",
i, settings.buildUsersGroup);
throw Error("the user '%s' in the group '%s' does not exist", i, settings.buildUsersGroup);
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
auto fnUserLock = fmt("%s/userpool/%s", settings.nixStateDir,pw->pw_uid);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%1%'", fnUserLock);
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
fdUserLock = std::move(fd);
user = i;
uid = pw->pw_uid;
auto lock = std::make_unique<SimpleUserLock>();
lock->fdUserLock = std::move(fd);
lock->uid = pw->pw_uid;
lock->gid = gr->gr_gid;
/* Sanity check... */
if (uid == getuid() || uid == geteuid())
throw Error("the Nix user should not be a member of '%1%'",
settings.buildUsersGroup);
if (lock->uid == getuid() || lock->uid == geteuid())
throw Error("the Nix user should not be a member of '%s'", settings.buildUsersGroup);
#if __linux__
/* Get the list of supplementary groups of this build user. This
is usually either empty or contains a group such as "kvm". */
/* Get the list of supplementary groups of this build
user. This is usually either empty or contains a
group such as "kvm". */
int ngroups = 32; // arbitrary initial guess
supplementaryGIDs.resize(ngroups);
std::vector<gid_t> gids;
gids.resize(ngroups);
int err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
int err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
// Our initial size of 32 wasn't sufficient, the correct size has
// been stored in ngroups, so we try again.
/* Our initial size of 32 wasn't sufficient, the
correct size has been stored in ngroups, so we try
again. */
if (err == -1) {
supplementaryGIDs.resize(ngroups);
err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
gids.resize(ngroups);
err = getgrouplist(
pw->pw_name, pw->pw_gid,
gids.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%1%'",
pw->pw_name);
throw Error("failed to get list of supplementary groups for '%s'", pw->pw_name);
// Finally, trim back the GID list to its real size
supplementaryGIDs.resize(ngroups);
// Finally, trim back the GID list to its real size.
for (auto i = 0; i < ngroups; i++)
if (gids[i] != lock->gid)
lock->supplementaryGIDs.push_back(gids[i]);
#endif
isEnabled = true;
return true;
return lock;
}
}
return false;
return nullptr;
}
};
void UserLock::kill()
struct AutoUserLock : UserLock
{
killUser(uid);
AutoCloseFD fdUserLock;
uid_t firstUid = 0;
gid_t firstGid = 0;
uid_t nrIds = 1;
uid_t getUID() override { assert(firstUid); return firstUid; }
gid_t getUIDCount() override { return nrIds; }
gid_t getGID() override { assert(firstGid); return firstGid; }
std::vector<gid_t> getSupplementaryGIDs() override { return {}; }
static std::unique_ptr<UserLock> acquire(uid_t nrIds, bool useChroot)
{
settings.requireExperimentalFeature(Xp::AutoAllocateUids);
assert(settings.startId > 0);
assert(settings.uidCount % maxIdsPerBuild == 0);
assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits<uid_t>::max());
assert(nrIds <= maxIdsPerBuild);
createDirs(settings.nixStateDir + "/userpool2");
size_t nrSlots = settings.uidCount / maxIdsPerBuild;
for (size_t i = 0; i < nrSlots; i++) {
debug("trying user slot '%d'", i);
createDirs(settings.nixStateDir + "/userpool2");
auto fnUserLock = fmt("%s/userpool2/slot-%d", settings.nixStateDir, i);
AutoCloseFD fd = open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
if (!fd)
throw SysError("opening user lock '%s'", fnUserLock);
if (lockFile(fd.get(), ltWrite, false)) {
auto firstUid = settings.startId + i * maxIdsPerBuild;
auto pw = getpwuid(firstUid);
if (pw)
throw Error("auto-allocated UID %d clashes with existing user account '%s'", firstUid, pw->pw_name);
auto lock = std::make_unique<AutoUserLock>();
lock->fdUserLock = std::move(fd);
lock->firstUid = firstUid;
if (useChroot)
lock->firstGid = firstUid;
else {
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
if (!gr)
throw Error("the group '%s' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
lock->firstGid = gr->gr_gid;
}
lock->nrIds = nrIds;
return lock;
}
}
return nullptr;
}
};
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot)
{
if (settings.autoAllocateUids)
return AutoUserLock::acquire(nrIds, useChroot);
else
return SimpleUserLock::acquire();
}
bool useBuildUsers()
{
#if __linux__
static bool b = (settings.buildUsersGroup != "" || settings.startId.get() != 0) && getuid() == 0;
return b;
#elif __APPLE__
static bool b = settings.buildUsersGroup != "" && getuid() == 0;
return b;
#else
return false;
#endif
}
}

View file

@ -1,37 +1,38 @@
#pragma once
#include "sync.hh"
#include "types.hh"
#include "util.hh"
#include <optional>
#include <sys/types.h>
namespace nix {
class UserLock
struct UserLock
{
private:
Path fnUserLock;
AutoCloseFD fdUserLock;
virtual ~UserLock() { }
bool isEnabled = false;
std::string user;
uid_t uid = 0;
gid_t gid = 0;
std::vector<gid_t> supplementaryGIDs;
/* Get the first and last UID. */
std::pair<uid_t, uid_t> getUIDRange()
{
auto first = getUID();
return {first, first + getUIDCount() - 1};
}
public:
UserLock();
/* Get the first UID. */
virtual uid_t getUID() = 0;
void kill();
virtual uid_t getUIDCount() = 0;
std::string getUser() { return user; }
uid_t getUID() { assert(uid); return uid; }
uid_t getGID() { assert(gid); return gid; }
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
bool findFreeUser();
bool enabled() { return isEnabled; }
virtual gid_t getGID() = 0;
virtual std::vector<gid_t> getSupplementaryGIDs() = 0;
};
/* Acquire a user lock for a UID range of size `nrIds`. Note that this
may return nullptr if no user is available. */
std::unique_ptr<UserLock> acquireUserLock(uid_t nrIds, bool useChroot);
bool useBuildUsers();
}

View file

@ -1,6 +1,5 @@
#include "nar-accessor.hh"
#include "archive.hh"
#include "json.hh"
#include <map>
#include <stack>
@ -243,42 +242,43 @@ ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
return make_ref<NarAccessor>(listing, getNarBytes);
}
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse)
using nlohmann::json;
json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse)
{
auto st = accessor->stat(path);
auto obj = res.object();
json obj = json::object();
switch (st.type) {
case FSAccessor::Type::tRegular:
obj.attr("type", "regular");
obj.attr("size", st.fileSize);
obj["type"] = "regular";
obj["size"] = st.fileSize;
if (st.isExecutable)
obj.attr("executable", true);
obj["executable"] = true;
if (st.narOffset)
obj.attr("narOffset", st.narOffset);
obj["narOffset"] = st.narOffset;
break;
case FSAccessor::Type::tDirectory:
obj.attr("type", "directory");
obj["type"] = "directory";
{
auto res2 = obj.object("entries");
obj["entries"] = json::object();
json &res2 = obj["entries"];
for (auto & name : accessor->readDirectory(path)) {
if (recurse) {
auto res3 = res2.placeholder(name);
listNar(res3, accessor, path + "/" + name, true);
res2[name] = listNar(accessor, path + "/" + name, true);
} else
res2.object(name);
res2[name] = json::object();
}
}
break;
case FSAccessor::Type::tSymlink:
obj.attr("type", "symlink");
obj.attr("target", accessor->readLink(path));
obj["type"] = "symlink";
obj["target"] = accessor->readLink(path);
break;
default:
throw Error("path '%s' does not exist in NAR", path);
}
return obj;
}
}

View file

@ -2,6 +2,7 @@
#include <functional>
#include <nlohmann/json_fwd.hpp>
#include "fs-accessor.hh"
namespace nix {
@ -24,11 +25,8 @@ ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
class JSONPlaceholder;
/* Write a JSON representation of the contents of a NAR (except file
contents). */
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse);
nlohmann::json listNar(ref<FSAccessor> accessor, const Path & path, bool recurse);
}

View file

@ -2,7 +2,6 @@
#include <nlohmann/json.hpp>
#include <regex>
#include "json.hh"
namespace nix {
@ -90,6 +89,7 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name
StringSet ParsedDerivation::getRequiredSystemFeatures() const
{
// FIXME: cache this?
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
@ -125,6 +125,11 @@ bool ParsedDerivation::substitutesAllowed() const
return getBoolAttr("allowSubstitutes", true);
}
bool ParsedDerivation::useUidRange() const
{
return getRequiredSystemFeatures().count("uid-range");
}
static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths)
@ -144,17 +149,12 @@ std::optional<nlohmann::json> ParsedDerivation::prepareStructuredAttrs(Store & s
auto e = json.find("exportReferencesGraph");
if (e != json.end() && e->is_object()) {
for (auto i = e->begin(); i != e->end(); ++i) {
std::ostringstream str;
{
JSONPlaceholder jsonRoot(str, true);
StorePathSet storePaths;
for (auto & p : *i)
storePaths.insert(store.parseStorePath(p.get<std::string>()));
store.pathInfoToJSON(jsonRoot,
json[i.key()] = store.pathInfoToJSON(
store.exportReferences(storePaths, inputPaths), false, true);
}
json[i.key()] = nlohmann::json::parse(str.str()); // urgh
}
}
return json;

View file

@ -38,6 +38,8 @@ public:
bool substitutesAllowed() const;
bool useUidRange() const;
std::optional<nlohmann::json> prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths);
};

View file

@ -67,6 +67,40 @@ void RefScanSink::operator () (std::string_view data)
}
PathRefScanSink::PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap)
: RefScanSink(std::move(hashes))
, backMap(std::move(backMap))
{ }
PathRefScanSink PathRefScanSink::fromPaths(const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
return PathRefScanSink(std::move(hashes), std::move(backMap));
}
StorePathSet PathRefScanSink::getResultPaths()
{
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
}
std::pair<StorePathSet, HashResult> scanForReferences(
const std::string & path,
const StorePathSet & refs)
@ -82,30 +116,13 @@ StorePathSet scanForReferences(
const Path & path,
const StorePathSet & refs)
{
StringSet hashes;
std::map<std::string, StorePath> backMap;
for (auto & i : refs) {
std::string hashPart(i.hashPart());
auto inserted = backMap.emplace(hashPart, i).second;
assert(inserted);
hashes.insert(hashPart);
}
PathRefScanSink refsSink = PathRefScanSink::fromPaths(refs);
TeeSink sink { refsSink, toTee };
/* Look for the hashes in the NAR dump of the path. */
RefScanSink refsSink(std::move(hashes));
TeeSink sink { refsSink, toTee };
dumpPath(path, sink);
/* Map the hashes found back to their store paths. */
StorePathSet found;
for (auto & i : refsSink.getResult()) {
auto j = backMap.find(i);
assert(j != backMap.end());
found.insert(j->second);
}
return found;
return refsSink.getResultPaths();
}

View file

@ -27,6 +27,19 @@ public:
void operator () (std::string_view data) override;
};
class PathRefScanSink : public RefScanSink
{
std::map<std::string, StorePath> backMap;
PathRefScanSink(StringSet && hashes, std::map<std::string, StorePath> && backMap);
public:
static PathRefScanSink fromPaths(const StorePathSet & refs);
StorePathSet getResultPaths();
};
struct RewritingSink : Sink
{
std::string from, to, prev;

View file

@ -1,6 +1,6 @@
#include <nlohmann/json.hpp>
#include "remote-fs-accessor.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include <sys/types.h>
#include <sys/stat.h>
@ -38,10 +38,8 @@ ref<FSAccessor> RemoteFSAccessor::addToCache(std::string_view hashPart, std::str
if (cacheDir != "") {
try {
std::ostringstream str;
JSONPlaceholder jsonRoot(str);
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), str.str());
nlohmann::json j = listNar(narAccessor, "", true);
writeFile(makeCacheFile(hashPart, "ls"), j.dump());
} catch (...) {
ignoreException();
}

View file

@ -67,7 +67,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
if (fakeSSH) {
args = { "bash", "-c" };
} else {
args = { "ssh", host.c_str(), "-x", "-a" };
args = { "ssh", host.c_str(), "-x" };
addCommonSSHOpts(args);
if (socketPath != "")
args.insert(args.end(), {"-S", socketPath});

View file

@ -6,14 +6,16 @@
#include "util.hh"
#include "nar-info-disk-cache.hh"
#include "thread-pool.hh"
#include "json.hh"
#include "url.hh"
#include "archive.hh"
#include "callback.hh"
#include "remote-store.hh"
#include <nlohmann/json.hpp>
#include <regex>
using json = nlohmann::json;
namespace nix {
@ -838,56 +840,53 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor
return paths;
}
void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
json Store::pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase,
AllowInvalidFlag allowInvalid)
{
auto jsonList = jsonOut.list();
json::array_t jsonList = json::array();
for (auto & storePath : storePaths) {
auto jsonPath = jsonList.object();
auto& jsonPath = jsonList.emplace_back(json::object());
try {
auto info = queryPathInfo(storePath);
jsonPath.attr("path", printStorePath(info->path));
jsonPath
.attr("narHash", info->narHash.to_string(hashBase, true))
.attr("narSize", info->narSize);
jsonPath["path"] = printStorePath(info->path);
jsonPath["narHash"] = info->narHash.to_string(hashBase, true);
jsonPath["narSize"] = info->narSize;
{
auto jsonRefs = jsonPath.list("references");
auto& jsonRefs = (jsonPath["references"] = json::array());
for (auto & ref : info->references)
jsonRefs.elem(printStorePath(ref));
jsonRefs.emplace_back(printStorePath(ref));
}
if (info->ca)
jsonPath.attr("ca", renderContentAddress(info->ca));
jsonPath["ca"] = renderContentAddress(info->ca);
std::pair<uint64_t, uint64_t> closureSizes;
if (showClosureSize) {
closureSizes = getClosureSize(info->path);
jsonPath.attr("closureSize", closureSizes.first);
jsonPath["closureSize"] = closureSizes.first;
}
if (includeImpureInfo) {
if (info->deriver)
jsonPath.attr("deriver", printStorePath(*info->deriver));
jsonPath["deriver"] = printStorePath(*info->deriver);
if (info->registrationTime)
jsonPath.attr("registrationTime", info->registrationTime);
jsonPath["registrationTime"] = info->registrationTime;
if (info->ultimate)
jsonPath.attr("ultimate", info->ultimate);
jsonPath["ultimate"] = info->ultimate;
if (!info->sigs.empty()) {
auto jsonSigs = jsonPath.list("signatures");
for (auto & sig : info->sigs)
jsonSigs.elem(sig);
jsonPath["signatures"].push_back(sig);
}
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
@ -895,21 +894,22 @@ void Store::pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & store
if (narInfo) {
if (!narInfo->url.empty())
jsonPath.attr("url", narInfo->url);
jsonPath["url"] = narInfo->url;
if (narInfo->fileHash)
jsonPath.attr("downloadHash", narInfo->fileHash->to_string(hashBase, true));
jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashBase, true);
if (narInfo->fileSize)
jsonPath.attr("downloadSize", narInfo->fileSize);
jsonPath["downloadSize"] = narInfo->fileSize;
if (showClosureSize)
jsonPath.attr("closureDownloadSize", closureSizes.second);
jsonPath["closureDownloadSize"] = closureSizes.second;
}
}
} catch (InvalidPath &) {
jsonPath.attr("path", printStorePath(storePath));
jsonPath.attr("valid", false);
jsonPath["path"] = printStorePath(storePath);
jsonPath["valid"] = false;
}
}
return jsonList;
}

View file

@ -14,6 +14,7 @@
#include "path-info.hh"
#include "repair-flag.hh"
#include <nlohmann/json_fwd.hpp>
#include <atomic>
#include <limits>
#include <map>
@ -68,7 +69,6 @@ struct Derivation;
class FSAccessor;
class NarInfoDiskCache;
class Store;
class JSONPlaceholder;
enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true };
@ -512,7 +512,7 @@ public:
variable elements such as the registration time are
included. If showClosureSize is true, the closure size of
each path is included. */
void pathInfoToJSON(JSONPlaceholder & jsonOut, const StorePathSet & storePaths,
nlohmann::json pathInfoToJSON(const StorePathSet & storePaths,
bool includeImpureInfo, bool showClosureSize,
Base hashBase = Base32,
AllowInvalidFlag allowInvalid = DisallowInvalid);

View file

@ -14,6 +14,8 @@ std::map<ExperimentalFeature, std::string> stringifiedXpFeatures = {
{ Xp::NoUrlLiterals, "no-url-literals" },
{ Xp::FetchClosure, "fetch-closure" },
{ Xp::ReplFlake, "repl-flake" },
{ Xp::AutoAllocateUids, "auto-allocate-uids" },
{ Xp::Cgroups, "cgroups" },
};
const std::optional<ExperimentalFeature> parseExperimentalFeature(const std::string_view & name)

View file

@ -23,6 +23,8 @@ enum struct ExperimentalFeature
NoUrlLiterals,
FetchClosure,
ReplFlake,
AutoAllocateUids,
Cgroups,
};
/**

View file

@ -1,5 +1,6 @@
#include <sys/time.h>
#include <filesystem>
#include <atomic>
#include "finally.hh"
#include "util.hh"
@ -10,7 +11,7 @@ namespace fs = std::filesystem;
namespace nix {
static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
int & counter)
std::atomic<unsigned int> & counter)
{
tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true);
if (includePid)
@ -22,9 +23,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid,
Path createTempDir(const Path & tmpRoot, const Path & prefix,
bool includePid, bool useGlobalCounter, mode_t mode)
{
static int globalCounter = 0;
int localCounter = 0;
int & counter(useGlobalCounter ? globalCounter : localCounter);
static std::atomic<unsigned int> globalCounter = 0;
std::atomic<unsigned int> localCounter = 0;
auto & counter(useGlobalCounter ? globalCounter : localCounter);
while (1) {
checkInterrupt();

View file

@ -1,203 +0,0 @@
#include "json.hh"
#include <iomanip>
#include <cstdint>
#include <cstring>
namespace nix {
template<>
void toJSON<std::string_view>(std::ostream & str, const std::string_view & s)
{
constexpr size_t BUF_SIZE = 4096;
char buf[BUF_SIZE + 7]; // BUF_SIZE + largest single sequence of puts
size_t bufPos = 0;
const auto flush = [&] {
str.write(buf, bufPos);
bufPos = 0;
};
const auto put = [&] (char c) {
buf[bufPos++] = c;
};
put('"');
for (auto i = s.begin(); i != s.end(); i++) {
if (bufPos >= BUF_SIZE) flush();
if (*i == '\"' || *i == '\\') { put('\\'); put(*i); }
else if (*i == '\n') { put('\\'); put('n'); }
else if (*i == '\r') { put('\\'); put('r'); }
else if (*i == '\t') { put('\\'); put('t'); }
else if (*i >= 0 && *i < 32) {
const char hex[17] = "0123456789abcdef";
put('\\');
put('u');
put(hex[(uint16_t(*i) >> 12) & 0xf]);
put(hex[(uint16_t(*i) >> 8) & 0xf]);
put(hex[(uint16_t(*i) >> 4) & 0xf]);
put(hex[(uint16_t(*i) >> 0) & 0xf]);
}
else put(*i);
}
put('"');
flush();
}
void toJSON(std::ostream & str, const char * s)
{
if (!s) str << "null"; else toJSON(str, std::string_view(s));
}
template<> void toJSON<int>(std::ostream & str, const int & n) { str << n; }
template<> void toJSON<unsigned int>(std::ostream & str, const unsigned int & n) { str << n; }
template<> void toJSON<long>(std::ostream & str, const long & n) { str << n; }
template<> void toJSON<unsigned long>(std::ostream & str, const unsigned long & n) { str << n; }
template<> void toJSON<long long>(std::ostream & str, const long long & n) { str << n; }
template<> void toJSON<unsigned long long>(std::ostream & str, const unsigned long long & n) { str << n; }
template<> void toJSON<float>(std::ostream & str, const float & n) { str << n; }
template<> void toJSON<double>(std::ostream & str, const double & n) { str << n; }
template<> void toJSON<std::string>(std::ostream & str, const std::string & s) { toJSON(str, (std::string_view) s); }
template<> void toJSON<bool>(std::ostream & str, const bool & b)
{
str << (b ? "true" : "false");
}
template<> void toJSON<std::nullptr_t>(std::ostream & str, const std::nullptr_t & b)
{
str << "null";
}
JSONWriter::JSONWriter(std::ostream & str, bool indent)
: state(new JSONState(str, indent))
{
state->stack++;
}
JSONWriter::JSONWriter(JSONState * state)
: state(state)
{
state->stack++;
}
JSONWriter::~JSONWriter()
{
if (state) {
assertActive();
state->stack--;
if (state->stack == 0) delete state;
}
}
void JSONWriter::comma()
{
assertActive();
if (first) {
first = false;
} else {
state->str << ',';
}
if (state->indent) indent();
}
void JSONWriter::indent()
{
state->str << '\n' << std::string(state->depth * 2, ' ');
}
void JSONList::open()
{
state->depth++;
state->str << '[';
}
JSONList::~JSONList()
{
state->depth--;
if (state->indent && !first) indent();
state->str << "]";
}
JSONList JSONList::list()
{
comma();
return JSONList(state);
}
JSONObject JSONList::object()
{
comma();
return JSONObject(state);
}
JSONPlaceholder JSONList::placeholder()
{
comma();
return JSONPlaceholder(state);
}
void JSONObject::open()
{
state->depth++;
state->str << '{';
}
JSONObject::~JSONObject()
{
if (state) {
state->depth--;
if (state->indent && !first) indent();
state->str << "}";
}
}
void JSONObject::attr(std::string_view s)
{
comma();
toJSON(state->str, s);
state->str << ':';
if (state->indent) state->str << ' ';
}
JSONList JSONObject::list(std::string_view name)
{
attr(name);
return JSONList(state);
}
JSONObject JSONObject::object(std::string_view name)
{
attr(name);
return JSONObject(state);
}
JSONPlaceholder JSONObject::placeholder(std::string_view name)
{
attr(name);
return JSONPlaceholder(state);
}
JSONList JSONPlaceholder::list()
{
assertValid();
first = false;
return JSONList(state);
}
JSONObject JSONPlaceholder::object()
{
assertValid();
first = false;
return JSONObject(state);
}
JSONPlaceholder::~JSONPlaceholder()
{
if (first) {
assert(std::uncaught_exceptions());
if (state->stack != 0)
write(nullptr);
}
}
}

View file

@ -1,185 +0,0 @@
#pragma once
#include <iostream>
#include <vector>
#include <cassert>
namespace nix {
void toJSON(std::ostream & str, const char * s);
template<typename T>
void toJSON(std::ostream & str, const T & n);
class JSONWriter
{
protected:
struct JSONState
{
std::ostream & str;
bool indent;
size_t depth = 0;
size_t stack = 0;
JSONState(std::ostream & str, bool indent) : str(str), indent(indent) { }
~JSONState()
{
assert(stack == 0);
}
};
JSONState * state;
bool first = true;
JSONWriter(std::ostream & str, bool indent);
JSONWriter(JSONState * state);
~JSONWriter();
void assertActive()
{
assert(state->stack != 0);
}
void comma();
void indent();
};
class JSONObject;
class JSONPlaceholder;
class JSONList : JSONWriter
{
private:
friend class JSONObject;
friend class JSONPlaceholder;
void open();
JSONList(JSONState * state)
: JSONWriter(state)
{
open();
}
public:
JSONList(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
open();
}
~JSONList();
template<typename T>
JSONList & elem(const T & v)
{
comma();
toJSON(state->str, v);
return *this;
}
JSONList list();
JSONObject object();
JSONPlaceholder placeholder();
};
class JSONObject : JSONWriter
{
private:
friend class JSONList;
friend class JSONPlaceholder;
void open();
JSONObject(JSONState * state)
: JSONWriter(state)
{
open();
}
void attr(std::string_view s);
public:
JSONObject(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
open();
}
JSONObject(const JSONObject & obj) = delete;
JSONObject(JSONObject && obj)
: JSONWriter(obj.state)
{
obj.state = 0;
}
~JSONObject();
template<typename T>
JSONObject & attr(std::string_view name, const T & v)
{
attr(name);
toJSON(state->str, v);
return *this;
}
JSONList list(std::string_view name);
JSONObject object(std::string_view name);
JSONPlaceholder placeholder(std::string_view name);
};
class JSONPlaceholder : JSONWriter
{
private:
friend class JSONList;
friend class JSONObject;
JSONPlaceholder(JSONState * state)
: JSONWriter(state)
{
}
void assertValid()
{
assertActive();
assert(first);
}
public:
JSONPlaceholder(std::ostream & str, bool indent = false)
: JSONWriter(str, indent)
{
}
~JSONPlaceholder();
template<typename T>
void write(const T & v)
{
assertValid();
first = false;
toJSON(state->str, v);
}
JSONList list();
JSONObject object();
};
}

View file

@ -77,9 +77,7 @@ TarArchive::~TarArchive()
static void extract_archive(TarArchive & archive, const Path & destDir)
{
int flags = ARCHIVE_EXTRACT_FFLAGS
| ARCHIVE_EXTRACT_PERM
| ARCHIVE_EXTRACT_TIME
int flags = ARCHIVE_EXTRACT_TIME
| ARCHIVE_EXTRACT_SECURE_SYMLINKS
| ARCHIVE_EXTRACT_SECURE_NODOTDOT;
@ -98,6 +96,10 @@ static void extract_archive(TarArchive & archive, const Path & destDir)
archive_entry_copy_pathname(entry,
(destDir + "/" + name).c_str());
// sources can and do contain dirs with no rx bits
if (archive_entry_filetype(entry) == AE_IFDIR && (archive_entry_mode(entry) & 0500) != 0500)
archive_entry_set_mode(entry, archive_entry_mode(entry) | 0500);
// Patch hardlink path
const char *original_hardlink = archive_entry_hardlink(entry);
if (original_hardlink) {

View file

@ -1,193 +0,0 @@
#include "json.hh"
#include <gtest/gtest.h>
#include <sstream>
namespace nix {
/* ----------------------------------------------------------------------------
* toJSON
* --------------------------------------------------------------------------*/
TEST(toJSON, quotesCharPtr) {
const char* input = "test";
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "\"test\"");
}
TEST(toJSON, quotesStdString) {
std::string input = "test";
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "\"test\"");
}
TEST(toJSON, convertsNullptrtoNull) {
auto input = nullptr;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "null");
}
TEST(toJSON, convertsNullToNull) {
const char* input = 0;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "null");
}
TEST(toJSON, convertsFloat) {
auto input = 1.024f;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "1.024");
}
TEST(toJSON, convertsDouble) {
const double input = 1.024;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "1.024");
}
TEST(toJSON, convertsBool) {
auto input = false;
std::stringstream out;
toJSON(out, input);
ASSERT_EQ(out.str(), "false");
}
TEST(toJSON, quotesTab) {
std::stringstream out;
toJSON(out, "\t");
ASSERT_EQ(out.str(), "\"\\t\"");
}
TEST(toJSON, quotesNewline) {
std::stringstream out;
toJSON(out, "\n");
ASSERT_EQ(out.str(), "\"\\n\"");
}
TEST(toJSON, quotesCreturn) {
std::stringstream out;
toJSON(out, "\r");
ASSERT_EQ(out.str(), "\"\\r\"");
}
TEST(toJSON, quotesCreturnNewLine) {
std::stringstream out;
toJSON(out, "\r\n");
ASSERT_EQ(out.str(), "\"\\r\\n\"");
}
TEST(toJSON, quotesDoublequotes) {
std::stringstream out;
toJSON(out, "\"");
ASSERT_EQ(out.str(), "\"\\\"\"");
}
TEST(toJSON, substringEscape) {
std::stringstream out;
std::string_view s = "foo\t";
toJSON(out, s.substr(3));
ASSERT_EQ(out.str(), "\"\\t\"");
}
/* ----------------------------------------------------------------------------
* JSONObject
* --------------------------------------------------------------------------*/
TEST(JSONObject, emptyObject) {
std::stringstream out;
{
JSONObject t(out);
}
ASSERT_EQ(out.str(), "{}");
}
TEST(JSONObject, objectWithList) {
std::stringstream out;
{
JSONObject t(out);
auto l = t.list("list");
l.elem("element");
}
ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
}
TEST(JSONObject, objectWithListIndent) {
std::stringstream out;
{
JSONObject t(out, true);
auto l = t.list("list");
l.elem("element");
}
ASSERT_EQ(out.str(),
R"#({
"list": [
"element"
]
})#");
}
TEST(JSONObject, objectWithPlaceholderAndList) {
std::stringstream out;
{
JSONObject t(out);
auto l = t.placeholder("list");
l.list().elem("element");
}
ASSERT_EQ(out.str(), R"#({"list":["element"]})#");
}
TEST(JSONObject, objectWithPlaceholderAndObject) {
std::stringstream out;
{
JSONObject t(out);
auto l = t.placeholder("object");
l.object().attr("key", "value");
}
ASSERT_EQ(out.str(), R"#({"object":{"key":"value"}})#");
}
/* ----------------------------------------------------------------------------
* JSONList
* --------------------------------------------------------------------------*/
TEST(JSONList, empty) {
std::stringstream out;
{
JSONList l(out);
}
ASSERT_EQ(out.str(), R"#([])#");
}
TEST(JSONList, withElements) {
std::stringstream out;
{
JSONList l(out);
l.elem("one");
l.object();
l.placeholder().write("three");
}
ASSERT_EQ(out.str(), R"#(["one",{},"three"])#");
}
}

View file

@ -12,7 +12,6 @@
#include "local-fs-store.hh"
#include "user-env.hh"
#include "util.hh"
#include "json.hh"
#include "value-to-json.hh"
#include "xml-writer.hh"
#include "legacy.hh"
@ -26,6 +25,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <nlohmann/json.hpp>
using namespace nix;
using std::cout;
@ -911,43 +911,47 @@ static VersionDiff compareVersionAgainstSet(
static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool printOutPath, bool printMeta)
{
JSONObject topObj(cout, true);
using nlohmann::json;
json topObj = json::object();
for (auto & i : elems) {
try {
if (i.hasFailed()) continue;
JSONObject pkgObj = topObj.object(i.attrPath);
auto drvName = DrvName(i.queryName());
pkgObj.attr("name", drvName.fullName);
pkgObj.attr("pname", drvName.name);
pkgObj.attr("version", drvName.version);
pkgObj.attr("system", i.querySystem());
pkgObj.attr("outputName", i.queryOutputName());
json &pkgObj = topObj[i.attrPath];
pkgObj = {
{"name", drvName.fullName},
{"pname", drvName.name},
{"version", drvName.version},
{"system", i.querySystem()},
{"outputName", i.queryOutputName()},
};
{
DrvInfo::Outputs outputs = i.queryOutputs(printOutPath);
JSONObject outputObj = pkgObj.object("outputs");
json &outputObj = pkgObj["outputs"];
outputObj = json::object();
for (auto & j : outputs) {
if (j.second)
outputObj.attr(j.first, globals.state->store->printStorePath(*j.second));
outputObj[j.first] = globals.state->store->printStorePath(*j.second);
else
outputObj.attr(j.first, nullptr);
outputObj[j.first] = nullptr;
}
}
if (printMeta) {
JSONObject metaObj = pkgObj.object("meta");
json &metaObj = pkgObj["meta"];
metaObj = json::object();
StringSet metaNames = i.queryMetaNames();
for (auto & j : metaNames) {
Value * v = i.queryMeta(j);
if (!v) {
printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j);
metaObj.attr(j, nullptr);
metaObj[j] = nullptr;
} else {
auto placeholder = metaObj.placeholder(j);
PathSet context;
printValueAsJSON(*globals.state, true, *v, noPos, placeholder, context);
metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context);
}
}
}
@ -958,6 +962,7 @@ static void queryJSON(Globals & globals, std::vector<DrvInfo> & elems, bool prin
throw;
}
}
std::cout << topObj.dump(2);
}

View file

@ -516,7 +516,7 @@ static void registerValidity(bool reregister, bool hashGiven, bool canonicalise)
if (!store->isValidPath(info->path) || reregister) {
/* !!! races */
if (canonicalise)
canonicalisePathMetaData(store->printStorePath(info->path), -1);
canonicalisePathMetaData(store->printStorePath(info->path), {});
if (!hashGiven) {
HashResult hash = hashPath(htSHA256, store->printStorePath(info->path));
info->narHash = hash.first;

View file

@ -37,11 +37,13 @@ struct InstallableDerivedPath : Installable
* Return the rewrites that are needed to resolve a string whose context is
* included in `dependencies`.
*/
StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
StringPairs resolveRewrites(
Store & store,
const std::vector<BuiltPathWithResult> & dependencies)
{
StringPairs res;
for (auto & dep : dependencies)
if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep))
if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep.path))
for (auto & [ outputName, outputPath ] : drvDep->outputs)
res.emplace(
downstreamPlaceholder(store, drvDep->drvPath, outputName),
@ -53,7 +55,10 @@ StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
/**
* Resolve the given string assuming the given context.
*/
std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies)
std::string resolveString(
Store & store,
const std::string & toResolve,
const std::vector<BuiltPathWithResult> & dependencies)
{
auto rewrites = resolveRewrites(store, dependencies);
return rewriteStrings(toResolve, rewrites);
@ -66,7 +71,9 @@ UnresolvedApp Installable::toApp(EvalState & state)
auto type = cursor->getAttr("type")->getString();
std::string expected = !attrPath.empty() && state.symbols[attrPath[0]] == "apps" ? "app" : "derivation";
std::string expected = !attrPath.empty() &&
(state.symbols[attrPath[0]] == "apps" || state.symbols[attrPath[0]] == "defaultApp")
? "app" : "derivation";
if (type != expected)
throw Error("attribute '%s' should have type '%s'", cursor->getAttrPathStr(), expected);

View file

@ -10,6 +10,37 @@
using namespace nix;
nlohmann::json derivedPathsToJSON(const DerivedPaths & paths, ref<Store> store)
{
auto res = nlohmann::json::array();
for (auto & t : paths) {
std::visit([&res, store](const auto & t) {
res.push_back(t.toJSON(store));
}, t.raw());
}
return res;
}
nlohmann::json builtPathsWithResultToJSON(const std::vector<BuiltPathWithResult> & buildables, ref<Store> store)
{
auto res = nlohmann::json::array();
for (auto & b : buildables) {
std::visit([&](const auto & t) {
auto j = t.toJSON(store);
if (b.result) {
j["startTime"] = b.result->startTime;
j["stopTime"] = b.result->stopTime;
if (b.result->cpuUser)
j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000;
if (b.result->cpuSystem)
j["cpuSystem"] = ((double) b.result->cpuSystem->count()) / 1000000;
}
res.push_back(j);
}, b.path.raw());
}
return res;
}
struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
{
Path outLink = "result";
@ -78,7 +109,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
Realise::Outputs,
installables, buildMode);
if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump());
if (json) logger->cout("%s", builtPathsWithResultToJSON(buildables, store).dump());
if (outLink != "")
if (auto store2 = store.dynamic_pointer_cast<LocalFSStore>())
@ -98,7 +129,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
store2->addPermRoot(output.second, absPath(symlink));
}
},
}, buildable.raw());
}, buildable.path.raw());
}
if (printOutputPaths) {
@ -113,11 +144,14 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
std::cout << store->printStorePath(output.second) << std::endl;
}
},
}, buildable.raw());
}, buildable.path.raw());
}
}
updateProfile(buildables);
BuiltPaths buildables2;
for (auto & b : buildables)
buildables2.push_back(b.path);
updateProfile(buildables2);
}
};

View file

@ -11,7 +11,7 @@ R""(
# Description
This command runs the Nix daemon, which is a required component in
multi-user Nix installations. It performs build actions and other
multi-user Nix installations. It runs build tasks and other
operations on the Nix store on behalf of non-root users. Usually you
don't run the daemon directly; instead it's managed by a service
management framework such as `systemd`.

View file

@ -4,10 +4,11 @@
#include "store-api.hh"
#include "eval.hh"
#include "eval-inline.hh"
#include "json.hh"
#include "value-to-json.hh"
#include "progress-bar.hh"
#include <nlohmann/json.hpp>
using namespace nix;
struct CmdEval : MixJSON, InstallableCommand
@ -115,9 +116,7 @@ struct CmdEval : MixJSON, InstallableCommand
}
else if (json) {
JSONPlaceholder jsonOut(std::cout);
printValueAsJSON(*state, true, *v, pos, jsonOut, context, false);
std::cout << std::endl;
std::cout << printValueAsJSON(*state, true, *v, pos, context, false).dump() << std::endl;
}
else {

View file

@ -11,7 +11,6 @@
#include "attr-path.hh"
#include "fetchers.hh"
#include "registry.hh"
#include "json.hh"
#include "eval-cache.hh"
#include "markdown.hh"
@ -21,6 +20,7 @@
using namespace nix;
using namespace nix::flake;
using json = nlohmann::json;
class FlakeCommand : virtual Args, public MixFlakeOptions
{
@ -915,35 +915,44 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun
{
auto flake = lockFlake();
auto jsonRoot = json ? std::optional<JSONObject>(std::cout) : std::nullopt;
StorePathSet sources;
sources.insert(flake.flake.sourceInfo->storePath);
if (jsonRoot)
jsonRoot->attr("path", store->printStorePath(flake.flake.sourceInfo->storePath));
// FIXME: use graph output, handle cycles.
std::function<void(const Node & node, std::optional<JSONObject> & jsonObj)> traverse;
traverse = [&](const Node & node, std::optional<JSONObject> & jsonObj)
std::function<nlohmann::json(const Node & node)> traverse;
traverse = [&](const Node & node)
{
auto jsonObj2 = jsonObj ? jsonObj->object("inputs") : std::optional<JSONObject>();
nlohmann::json jsonObj2 = json ? json::object() : nlohmann::json(nullptr);
for (auto & [inputName, input] : node.inputs) {
if (auto inputNode = std::get_if<0>(&input)) {
auto jsonObj3 = jsonObj2 ? jsonObj2->object(inputName) : std::optional<JSONObject>();
auto storePath =
dryRun
? (*inputNode)->lockedRef.input.computeStorePath(*store)
: (*inputNode)->lockedRef.input.fetch(store).first.storePath;
if (jsonObj3)
jsonObj3->attr("path", store->printStorePath(storePath));
if (json) {
auto& jsonObj3 = jsonObj2[inputName];
jsonObj3["path"] = store->printStorePath(storePath);
sources.insert(std::move(storePath));
traverse(**inputNode, jsonObj3);
jsonObj3["inputs"] = traverse(**inputNode);
} else {
sources.insert(std::move(storePath));
traverse(**inputNode);
}
}
}
return jsonObj2;
};
traverse(*flake.lockFile.root, jsonRoot);
if (json) {
nlohmann::json jsonRoot = {
{"path", store->printStorePath(flake.flake.sourceInfo->storePath)},
{"inputs", traverse(*flake.lockFile.root)},
};
std::cout << jsonRoot.dump() << std::endl;
} else {
traverse(*flake.lockFile.root);
}
if (!dryRun && !dstUri.empty()) {
ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);

View file

@ -18,7 +18,7 @@ nix_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libexpr
nix_LIBS = libexpr libmain libfetchers libstore libutil libcmd
nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -llowdown
nix_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) $(LOWDOWN_LIBS)
$(foreach name, \
nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \

View file

@ -3,7 +3,7 @@
#include "fs-accessor.hh"
#include "nar-accessor.hh"
#include "common-args.hh"
#include "json.hh"
#include <nlohmann/json.hpp>
using namespace nix;
@ -91,10 +91,9 @@ struct MixLs : virtual Args, MixJSON
if (path == "/") path = "";
if (json) {
JSONPlaceholder jsonRoot(std::cout);
if (showDirectory)
throw UsageError("'--directory' is useless with '--json'");
listNar(jsonRoot, accessor, path, recursive);
std::cout << listNar(accessor, path, recursive);
} else
listText(accessor);
}

View file

@ -53,7 +53,6 @@ static bool haveInternet()
}
std::string programPath;
char * * savedArgv;
struct HelpRequested { };
@ -270,7 +269,7 @@ void mainWrapped(int argc, char * * argv)
programPath = argv[0];
auto programName = std::string(baseNameOf(programPath));
if (argc > 0 && std::string_view(argv[0]) == "__build-remote") {
if (argc > 1 && std::string_view(argv[1]) == "__build-remote") {
programName = "build-remote";
argv++; argc--;
}

View file

@ -2,10 +2,13 @@
#include "store-api.hh"
#include "make-content-addressed.hh"
#include "common-args.hh"
#include "json.hh"
#include <nlohmann/json.hpp>
using namespace nix;
using nlohmann::json;
struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, MixJSON
{
CmdMakeContentAddressed()
@ -25,6 +28,7 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
;
}
using StorePathsCommand::run;
void run(ref<Store> srcStore, StorePaths && storePaths) override
{
auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
@ -33,13 +37,15 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand,
StorePathSet(storePaths.begin(), storePaths.end()));
if (json) {
JSONObject jsonRoot(std::cout);
JSONObject jsonRewrites(jsonRoot.object("rewrites"));
auto jsonRewrites = json::object();
for (auto & path : storePaths) {
auto i = remappings.find(path);
assert(i != remappings.end());
jsonRewrites.attr(srcStore->printStorePath(path), srcStore->printStorePath(i->second));
jsonRewrites[srcStore->printStorePath(path)] = srcStore->printStorePath(i->second);
}
auto json = json::object();
json["rewrites"] = jsonRewrites;
std::cout << json.dump();
} else {
for (auto & path : storePaths) {
auto i = remappings.find(path);

View file

@ -1,12 +1,13 @@
#include "command.hh"
#include "shared.hh"
#include "store-api.hh"
#include "json.hh"
#include "common-args.hh"
#include <algorithm>
#include <array>
#include <nlohmann/json.hpp>
using namespace nix;
struct CmdPathInfo : StorePathsCommand, MixJSON
@ -86,11 +87,10 @@ struct CmdPathInfo : StorePathsCommand, MixJSON
pathLen = std::max(pathLen, store->printStorePath(storePath).size());
if (json) {
JSONPlaceholder jsonRoot(std::cout);
store->pathInfoToJSON(jsonRoot,
std::cout << store->pathInfoToJSON(
// FIXME: preserve order?
StorePathSet(storePaths.begin(), storePaths.end()),
true, showClosureSize, SRI, AllowInvalid);
true, showClosureSize, SRI, AllowInvalid).dump();
}
else {

View file

@ -253,11 +253,11 @@ struct ProfileManifest
static std::map<Installable *, BuiltPaths>
builtPathsPerInstallable(
const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPath>> & builtPaths)
const std::vector<std::pair<std::shared_ptr<Installable>, BuiltPathWithResult>> & builtPaths)
{
std::map<Installable *, BuiltPaths> res;
for (auto & [installable, builtPath] : builtPaths)
res[installable.get()].push_back(builtPath);
res[installable.get()].push_back(builtPath.path);
return res;
}

View file

@ -5,7 +5,6 @@
#include "names.hh"
#include "get-drvs.hh"
#include "common-args.hh"
#include "json.hh"
#include "shared.hh"
#include "eval-cache.hh"
#include "attr-path.hh"
@ -13,8 +12,10 @@
#include <regex>
#include <fstream>
#include <nlohmann/json.hpp>
using namespace nix;
using json = nlohmann::json;
std::string wrap(std::string prefix, std::string s)
{
@ -106,7 +107,8 @@ struct CmdSearch : SourceExprCommand, MixJSON
auto state = getEvalState();
auto jsonOut = json ? std::make_unique<JSONObject>(std::cout) : nullptr;
std::optional<nlohmann::json> jsonOut;
if (json) jsonOut = json::object();
uint64_t results = 0;
@ -173,10 +175,11 @@ struct CmdSearch : SourceExprCommand, MixJSON
{
results++;
if (json) {
auto jsonElem = jsonOut->object(attrPath2);
jsonElem.attr("pname", name.name);
jsonElem.attr("version", name.version);
jsonElem.attr("description", description);
(*jsonOut)[attrPath2] = {
{"pname", name.name},
{"version", name.version},
{"description", description},
};
} else {
auto name2 = hiliteMatches(name.name, nameMatches, ANSI_GREEN, "\e[0;2m");
if (results > 1) logger->cout("");
@ -215,6 +218,10 @@ struct CmdSearch : SourceExprCommand, MixJSON
for (auto & cursor : installable->getCursors(*state))
visit(*cursor, cursor->getAttrPath(), true);
if (json) {
std::cout << jsonOut->dump() << std::endl;
}
if (!json && !results)
throw Error("no results for the given search term(s)!");
}

View file

@ -5,10 +5,11 @@
#include "common-args.hh"
#include "store-api.hh"
#include "archive.hh"
#include "json.hh"
#include "derivations.hh"
#include <nlohmann/json.hpp>
using namespace nix;
using json = nlohmann::json;
struct CmdShowDerivation : InstallablesCommand
{
@ -48,77 +49,63 @@ struct CmdShowDerivation : InstallablesCommand
drvPaths = std::move(closure);
}
{
JSONObject jsonRoot(std::cout, true);
json jsonRoot = json::object();
for (auto & drvPath : drvPaths) {
if (!drvPath.isDerivation()) continue;
auto drvObj(jsonRoot.object(store->printStorePath(drvPath)));
json& drvObj = jsonRoot[store->printStorePath(drvPath)];
auto drv = store->readDerivation(drvPath);
{
auto outputsObj(drvObj.object("outputs"));
json& outputsObj = drvObj["outputs"];
outputsObj = json::object();
for (auto & [_outputName, output] : drv.outputs) {
auto & outputName = _outputName; // work around clang bug
auto outputObj { outputsObj.object(outputName) };
auto& outputObj = outputsObj[outputName];
outputObj = json::object();
std::visit(overloaded {
[&](const DerivationOutput::InputAddressed & doi) {
outputObj.attr("path", store->printStorePath(doi.path));
outputObj["path"] = store->printStorePath(doi.path);
},
[&](const DerivationOutput::CAFixed & dof) {
outputObj.attr("path", store->printStorePath(dof.path(*store, drv.name, outputName)));
outputObj.attr("hashAlgo", dof.hash.printMethodAlgo());
outputObj.attr("hash", dof.hash.hash.to_string(Base16, false));
outputObj["path"] = store->printStorePath(dof.path(*store, drv.name, outputName));
outputObj["hashAlgo"] = dof.hash.printMethodAlgo();
outputObj["hash"] = dof.hash.hash.to_string(Base16, false);
},
[&](const DerivationOutput::CAFloating & dof) {
outputObj.attr("hashAlgo", makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType));
outputObj["hashAlgo"] = makeFileIngestionPrefix(dof.method) + printHashType(dof.hashType);
},
[&](const DerivationOutput::Deferred &) {},
[&](const DerivationOutput::Impure & doi) {
outputObj.attr("hashAlgo", makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType));
outputObj.attr("impure", true);
outputObj["hashAlgo"] = makeFileIngestionPrefix(doi.method) + printHashType(doi.hashType);
outputObj["impure"] = true;
},
}, output.raw());
}
}
{
auto inputsList(drvObj.list("inputSrcs"));
auto& inputsList = drvObj["inputSrcs"];
inputsList = json::array();
for (auto & input : drv.inputSrcs)
inputsList.elem(store->printStorePath(input));
inputsList.emplace_back(store->printStorePath(input));
}
{
auto inputDrvsObj(drvObj.object("inputDrvs"));
for (auto & input : drv.inputDrvs) {
auto inputList(inputDrvsObj.list(store->printStorePath(input.first)));
for (auto & outputId : input.second)
inputList.elem(outputId);
}
auto& inputDrvsObj = drvObj["inputDrvs"];
inputDrvsObj = json::object();
for (auto & input : drv.inputDrvs)
inputDrvsObj[store->printStorePath(input.first)] = input.second;
}
drvObj.attr("system", drv.platform);
drvObj.attr("builder", drv.builder);
{
auto argsList(drvObj.list("args"));
for (auto & arg : drv.args)
argsList.elem(arg);
drvObj["system"] = drv.platform;
drvObj["builder"] = drv.builder;
drvObj["args"] = drv.args;
drvObj["env"] = drv.env;
}
{
auto envObj(drvObj.object("env"));
for (auto & var : drv.env)
envObj.attr(var.first, var.second);
}
}
}
std::cout << "\n";
std::cout << jsonRoot.dump(2) << std::endl;
}
};

View file

@ -83,20 +83,47 @@ struct CmdWhyDepends : SourceExprCommand
{
auto package = parseInstallable(store, _package);
auto packagePath = Installable::toStorePath(getEvalStore(), store, Realise::Outputs, operateOn, package);
/* We don't need to build `dependency`. We try to get the store
* path if it's already known, and if not, then it's not a dependency.
*
* Why? If `package` does depends on `dependency`, then getting the
* store path of `package` above necessitated having the store path
* of `dependency`. The contrapositive is, if the store path of
* `dependency` is not already known at this point (i.e. it's a CA
* derivation which hasn't been built), then `package` did not need it
* to build.
*/
auto dependency = parseInstallable(store, _dependency);
auto dependencyPath = Installable::toStorePath(getEvalStore(), store, Realise::Derivation, operateOn, dependency);
auto dependencyPathHash = dependencyPath.hashPart();
auto derivedDependency = dependency->toDerivedPath();
auto optDependencyPath = std::visit(overloaded {
[](const DerivedPath::Opaque & nodrv) -> std::optional<StorePath> {
return { nodrv.path };
},
[&](const DerivedPath::Built & hasdrv) -> std::optional<StorePath> {
if (hasdrv.outputs.size() != 1) {
throw Error("argument '%s' should evaluate to one store path", dependency->what());
}
auto outputMap = store->queryPartialDerivationOutputMap(hasdrv.drvPath);
auto maybePath = outputMap.find(*hasdrv.outputs.begin());
if (maybePath == outputMap.end()) {
throw Error("unexpected end of iterator");
}
return maybePath->second;
},
}, derivedDependency.raw());
StorePathSet closure;
store->computeFSClosure({packagePath}, closure, false, false);
if (!closure.count(dependencyPath)) {
printError("'%s' does not depend on '%s'",
store->printStorePath(packagePath),
store->printStorePath(dependencyPath));
if (!optDependencyPath.has_value() || !closure.count(*optDependencyPath)) {
printError("'%s' does not depend on '%s'", package->what(), dependency->what());
return;
}
auto dependencyPath = *optDependencyPath;
auto dependencyPathHash = dependencyPath.hashPart();
stopProgressBar(); // FIXME
auto accessor = store->getFSAccessor();

View file

@ -70,3 +70,54 @@ testNormalization () {
}
testNormalization
# https://github.com/NixOS/nix/issues/6572
issue_6572_independent_outputs() {
nix build -f multiple-outputs.nix --json independent --no-link > $TEST_ROOT/independent.json
# Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
nix-store --delete "$p" # Clean up for next test
# Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
nix-store --delete "$(jq -r <$TEST_ROOT/independent.json .[0].outputs.first)"
p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
cmp $p <<EOF
first
second
EOF
nix-store --delete "$p" # Clean up for next test
# Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
nix-store --delete "$(jq -r <$TEST_ROOT/independent.json .[0].outputs.second)"
p=$(nix build -f multiple-outputs.nix use-independent --no-link --print-out-paths)
cmp $p <<EOF
first
second
EOF
nix-store --delete "$p" # Clean up for next test
}
issue_6572_independent_outputs
# https://github.com/NixOS/nix/issues/6572
issue_6572_dependent_outputs() {
nix build -f multiple-outputs.nix --json a --no-link > $TEST_ROOT/a.json
# # Make sure that 'nix build' can build a derivation that depends on both outputs of another derivation.
p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
nix-store --delete "$p" # Clean up for next test
# Make sure that 'nix build' tracks input-outputs correctly when a single output is already present.
nix-store --delete "$(jq -r <$TEST_ROOT/a.json .[0].outputs.second)"
p=$(nix build -f multiple-outputs.nix use-a --no-link --print-out-paths)
cmp $p <<EOF
first
second
EOF
nix-store --delete "$p" # Clean up for next test
}
if isDaemonNewer "2.12pre0"; then
issue_6572_dependent_outputs
fi

5
tests/ca/why-depends.sh Normal file
View file

@ -0,0 +1,5 @@
source common.sh
export NIX_TESTS_CA_BY_DEFAULT=1
cd .. && source why-depends.sh

View file

@ -28,6 +28,10 @@ cat <<EOF > bar/flake.nix
};
}
EOF
mkdir -p err
cat <<EOF > err/flake.nix
throw "error"
EOF
# Test the completion of a subcommand
[[ "$(NIX_GET_COMPLETIONS=1 nix buil)" == $'normal\nbuild\t' ]]
@ -60,3 +64,5 @@ NIX_GET_COMPLETIONS=3 nix build --option allow-import-from | grep -- "allow-impo
# Attr path completions
[[ "$(NIX_GET_COMPLETIONS=2 nix eval ./foo\#sam)" == $'attrs\n./foo#sampleOutput\t' ]]
[[ "$(NIX_GET_COMPLETIONS=4 nix eval --file ./foo/flake.nix outp)" == $'attrs\noutputs\t' ]]
[[ "$(NIX_GET_COMPLETIONS=4 nix eval --file ./err/flake.nix outp 2>&1)" == $'attrs' ]]
[[ "$(NIX_GET_COMPLETIONS=2 nix eval ./err\# 2>&1)" == $'attrs' ]]

68
tests/containers.nix Normal file
View file

@ -0,0 +1,68 @@
# Test whether we can run a NixOS container inside a Nix build using systemd-nspawn.
{ nixpkgs, system, overlay }:
with import (nixpkgs + "/nixos/lib/testing-python.nix") {
inherit system;
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
};
makeTest ({
name = "containers";
nodes =
{
host =
{ config, lib, pkgs, nodes, ... }:
{ virtualisation.writableStore = true;
virtualisation.diskSize = 2048;
virtualisation.additionalPaths =
[ pkgs.stdenv
(import ./systemd-nspawn.nix { inherit nixpkgs; }).toplevel
];
virtualisation.memorySize = 4096;
nix.binaryCaches = lib.mkForce [ ];
nix.extraOptions =
''
extra-experimental-features = nix-command auto-allocate-uids cgroups
extra-system-features = uid-range
'';
nix.nixPath = [ "nixpkgs=${nixpkgs}" ];
};
};
testScript = { nodes }: ''
start_all()
host.succeed("nix --version >&2")
# Test that 'id' gives the expected result in various configurations.
# Existing UIDs, sandbox.
host.succeed("nix build --no-auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-1")
host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]")
# Existing UIDs, no sandbox.
host.succeed("nix build --no-auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-2")
host.succeed("[[ $(cat ./result) = 'uid=30001(nixbld1) gid=30000(nixbld) groups=30000(nixbld)' ]]")
# Auto-allocated UIDs, sandbox.
host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-3")
host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]")
# Auto-allocated UIDs, no sandbox.
host.succeed("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-4")
host.succeed("[[ $(cat ./result) = 'uid=872415232 gid=30000(nixbld) groups=30000(nixbld)' ]]")
# Auto-allocated UIDs, UID range, sandbox.
host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-5 --arg uidRange true")
host.succeed("[[ $(cat ./result) = 'uid=0(root) gid=0(root) groups=0(root)' ]]")
# Auto-allocated UIDs, UID range, no sandbox.
host.fail("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-6 --arg uidRange true")
# Run systemd-nspawn in a Nix build.
host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./systemd-nspawn.nix} --argstr nixpkgs ${nixpkgs}")
host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]")
'';
})

View file

@ -1,7 +1,6 @@
source common.sh
enableFeatures "fetch-closure"
needLocalStore "'--no-require-sigs' cant be used with the daemon"
clearStore
clearCacheCache
@ -28,6 +27,8 @@ clearStore
[ ! -e $nonCaPath ]
[ -e $caPath ]
if [[ "$NIX_REMOTE" != "daemon" ]]; then
# In impure mode, we can use non-CA paths.
[[ $(nix eval --raw --no-require-sigs --impure --expr "
builtins.fetchClosure {
@ -38,6 +39,8 @@ clearStore
[ -e $nonCaPath ]
fi
# 'toPath' set to empty string should fail but print the expected path.
nix eval -v --json --expr "
builtins.fetchClosure {

View file

@ -24,12 +24,14 @@ touch $repo/.gitignore
git -C $repo add hello .gitignore
git -C $repo commit -m 'Bla1'
rev1=$(git -C $repo rev-parse HEAD)
git -C $repo tag -a tag1 -m tag1
echo world > $repo/hello
git -C $repo commit -m 'Bla2' -a
git -C $repo worktree add $TEST_ROOT/worktree
echo hello >> $TEST_ROOT/worktree/hello
rev2=$(git -C $repo rev-parse HEAD)
git -C $repo tag -a tag2 -m tag2
# Fetch a worktree
unset _NIX_FORCE_HTTP
@ -217,6 +219,16 @@ rev4_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$
path9=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; name = \"foo\"; }).outPath")
[[ $path9 =~ -foo$ ]]
# Specifying a ref without a rev shouldn't pick a cached rev for a different ref
export _NIX_FORCE_HTTP=1
rev_tag1_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"refs/tags/tag1\"; }).rev")
rev_tag1=$(git -C $repo rev-parse refs/tags/tag1)
[[ $rev_tag1_nix = $rev_tag1 ]]
rev_tag2_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"refs/tags/tag2\"; }).rev")
rev_tag2=$(git -C $repo rev-parse refs/tags/tag2)
[[ $rev_tag2_nix = $rev_tag2 ]]
unset _NIX_FORCE_HTTP
# should fail if there is no repo
rm -rf $repo/.git
(! nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath")

View file

@ -7,7 +7,7 @@ with import (nixpkgs + "/nixos/lib/testing-python.nix") {
let
# Generate a fake root CA and a fake api.github.com / channels.nixos.org certificate.
# Generate a fake root CA and a fake api.github.com / github.com / channels.nixos.org certificate.
cert = pkgs.runCommand "cert" { buildInputs = [ pkgs.openssl ]; }
''
mkdir -p $out
@ -18,7 +18,7 @@ let
openssl req -newkey rsa:2048 -nodes -keyout $out/server.key \
-subj "/C=CN/ST=Denial/L=Springfield/O=Dis/CN=github.com" -out server.csr
openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:channels.nixos.org") \
openssl x509 -req -extfile <(printf "subjectAltName=DNS:api.github.com,DNS:github.com,DNS:channels.nixos.org") \
-days 36500 -in server.csr -CA $out/ca.crt -CAkey ca.key -CAcreateserial -out $out/server.crt
'';
@ -37,6 +37,17 @@ let
"owner": "NixOS",
"repo": "nixpkgs"
}
},
{
"from": {
"type": "indirect",
"id": "private-flake"
},
"to": {
"type": "github",
"owner": "fancy-enterprise",
"repo": "private-flake"
}
}
],
"version": 2
@ -45,20 +56,40 @@ let
destination = "/flake-registry.json";
};
api = pkgs.runCommand "nixpkgs-flake" {}
private-flake-rev = "9f1dd0df5b54a7dc75b618034482ed42ce34383d";
private-flake-api = pkgs.runCommand "private-flake" {}
''
mkdir -p $out/tarball
mkdir -p $out/{commits,tarball}
# Setup https://docs.github.com/en/rest/commits/commits#get-a-commit
echo '{"sha": "${private-flake-rev}"}' > $out/commits/HEAD
# Setup tarball download via API
dir=private-flake
mkdir $dir
echo '{ outputs = {...}: {}; }' > $dir/flake.nix
tar cfz $out/tarball/${private-flake-rev} $dir --hard-dereference
'';
nixpkgs-api = pkgs.runCommand "nixpkgs-flake" {}
''
mkdir -p $out/commits
# Setup https://docs.github.com/en/rest/commits/commits#get-a-commit
echo '{"sha": "${nixpkgs.rev}"}' > $out/commits/HEAD
'';
archive = pkgs.runCommand "nixpkgs-flake" {}
''
mkdir -p $out/archive
dir=NixOS-nixpkgs-${nixpkgs.shortRev}
cp -prd ${nixpkgs} $dir
# Set the correct timestamp in the tarball.
find $dir -print0 | xargs -0 touch -t ${builtins.substring 0 12 nixpkgs.lastModifiedDate}.${builtins.substring 12 2 nixpkgs.lastModifiedDate} --
tar cfz $out/tarball/${nixpkgs.rev} $dir --hard-dereference
mkdir -p $out/commits
echo '{"sha": "${nixpkgs.rev}"}' > $out/commits/HEAD
tar cfz $out/archive/${nixpkgs.rev}.tar.gz $dir --hard-dereference
'';
in
makeTest (
@ -93,7 +124,20 @@ makeTest (
sslServerCert = "${cert}/server.crt";
servedDirs =
[ { urlPath = "/repos/NixOS/nixpkgs";
dir = api;
dir = nixpkgs-api;
}
{ urlPath = "/repos/fancy-enterprise/private-flake";
dir = private-flake-api;
}
];
};
services.httpd.virtualHosts."github.com" =
{ forceSSL = true;
sslServerKey = "${cert}/server.key";
sslServerCert = "${cert}/server.crt";
servedDirs =
[ { urlPath = "/NixOS/nixpkgs";
dir = archive;
}
];
};
@ -107,9 +151,8 @@ makeTest (
virtualisation.memorySize = 4096;
nix.binaryCaches = lib.mkForce [ ];
nix.extraOptions = "experimental-features = nix-command flakes";
environment.systemPackages = [ pkgs.jq ];
networking.hosts.${(builtins.head nodes.github.config.networking.interfaces.eth1.ipv4.addresses).address} =
[ "channels.nixos.org" "api.github.com" ];
[ "channels.nixos.org" "api.github.com" "github.com" ];
security.pki.certificateFiles = [ "${cert}/ca.crt" ];
};
};
@ -121,22 +164,39 @@ makeTest (
start_all()
def cat_log():
github.succeed("cat /var/log/httpd/*.log >&2")
github.wait_for_unit("httpd.service")
client.succeed("curl -v https://api.github.com/ >&2")
client.succeed("nix registry list | grep nixpkgs")
client.succeed("curl -v https://github.com/ >&2")
out = client.succeed("nix registry list")
print(out)
assert "github:NixOS/nixpkgs" in out, "nixpkgs flake not found"
assert "github:fancy-enterprise/private-flake" in out, "private flake not found"
cat_log()
rev = client.succeed("nix flake info nixpkgs --json | jq -r .revision")
assert rev.strip() == "${nixpkgs.rev}", "revision mismatch"
# If no github access token is provided, nix should use the public archive url...
out = client.succeed("nix flake metadata nixpkgs --json")
print(out)
info = json.loads(out)
assert info["revision"] == "${nixpkgs.rev}", f"revision mismatch: {info['revision']} != ${nixpkgs.rev}"
cat_log()
# ... otherwise it should use the API
out = client.succeed("nix flake metadata private-flake --json --access-tokens github.com=ghp_000000000000000000000000000000000000 --tarball-ttl 0")
print(out)
info = json.loads(out)
assert info["revision"] == "${private-flake-rev}", f"revision mismatch: {info['revision']} != ${private-flake-rev}"
cat_log()
client.succeed("nix registry pin nixpkgs")
client.succeed("nix flake info nixpkgs --tarball-ttl 0 >&2")
client.succeed("nix flake metadata nixpkgs --tarball-ttl 0 >&2")
# Shut down the web server. The flake should be cached on the client.
github.succeed("systemctl stop httpd.service")
info = json.loads(client.succeed("nix flake info nixpkgs --json"))
info = json.loads(client.succeed("nix flake metadata nixpkgs --json"))
date = time.strftime("%Y%m%d%H%M%S", time.gmtime(info['lastModified']))
assert date == "${nixpkgs.lastModifiedDate}", "time mismatch"

8
tests/id-test.nix Normal file
View file

@ -0,0 +1,8 @@
{ name, uidRange ? false }:
with import <nixpkgs> {};
runCommand name
{ requiredSystemFeatures = if uidRange then ["uid-range"] else [];
}
"id; id > $out"

View file

@ -2,7 +2,7 @@ source common.sh
requireDaemonNewerThan "2.8pre20220311"
enableFeatures "ca-derivations ca-references impure-derivations"
enableFeatures "ca-derivations impure-derivations"
restartDaemon
set -o pipefail

View file

@ -0,0 +1 @@
true

View file

@ -1 +0,0 @@
Bool(True)

View file

@ -31,6 +31,15 @@ rec {
helloString = "Hello, world!";
};
use-a = mkDerivation {
name = "use-a";
inherit (a) first second;
builder = builtins.toFile "builder.sh"
''
cat $first/file $second/file >$out
'';
};
b = mkDerivation {
defaultOutput = assert a.second.helloString == "Hello, world!"; a;
firstOutput = assert a.outputName == "first"; a.first.first;
@ -87,4 +96,25 @@ rec {
buildCommand = "mkdir $a $b $c";
};
independent = mkDerivation {
name = "multiple-outputs-independent";
outputs = [ "first" "second" ];
builder = builtins.toFile "builder.sh"
''
mkdir $first $second
test -z $all
echo "first" > $first/file
echo "second" > $second/file
'';
};
use-independent = mkDerivation {
name = "use-independent";
inherit (a) first second;
builder = builtins.toFile "builder.sh"
''
cat $first/file $second/file >$out
'';
};
}

View file

@ -1 +0,0 @@
echo "$input" > $out

Some files were not shown because too many files have changed in this diff Show more