diff --git a/.clang-format b/.clang-format index 73eac7ef6..f5d7fb711 100644 --- a/.clang-format +++ b/.clang-format @@ -15,7 +15,7 @@ SpaceAfterCStyleCast: true SpaceAfterTemplateKeyword: false AccessModifierOffset: -4 AlignAfterOpenBracket: AlwaysBreak -AlignEscapedNewlines: DontAlign +AlignEscapedNewlines: Left ColumnLimit: 120 BreakStringLiterals: false BitFieldColonSpacing: None @@ -30,3 +30,5 @@ BreakBeforeBinaryOperators: NonAssignment AlwaysBreakBeforeMultilineStrings: true IndentPPDirectives: AfterHash PPIndentWidth: 2 +BinPackArguments: false +BinPackParameters: false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 59db217d9..99ca670e0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,7 +11,16 @@ .github/CODEOWNERS @edolstra # Documentation of built-in functions -src/libexpr/primops.cc @roberth +src/libexpr/primops.cc @roberth @fricklerhandwerk + +# Documentation of settings +src/libexpr/eval-settings.hh @fricklerhandwerk +src/libstore/globals.hh @fricklerhandwerk + +# Documentation +doc/manual @fricklerhandwerk +maintainers/*.md @fricklerhandwerk +src/**/*.md @fricklerhandwerk # Libstore layer /src/libstore @thufschmitt @ericson2314 diff --git a/.github/labeler.yml b/.github/labeler.yml index e036eb3c8..0e6fd3e26 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -13,7 +13,7 @@ "documentation": - changed-files: - - any-glob-to-any-file: "doc/manual/*" + - any-glob-to-any-file: "doc/manual/**/*" - any-glob-to-any-file: "src/nix/**/*.md" "store": @@ -40,4 +40,4 @@ - any-glob-to-any-file: "src/*/tests/**/*" # Functional and integration tests - any-glob-to-any-file: "tests/functional/**/*" - + diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 8f83b913c..dd110de6c 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -21,7 +21,7 @@ jobs: fetch-depth: 0 - name: Create backport PRs # should be kept in sync with `version` - uses: zeebe-io/backport-action@v2.5.0 + uses: zeebe-io/backport-action@v3.0.2 with: # Config README: https://github.com/zeebe-io/backport-action#backport-action github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2b8eac49d..4612b4ef5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,12 +20,12 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v26 + - uses: cachix/install-nix-action@V27 with: # The sandbox would otherwise be disabled by default on Darwin extra_nix_config: "sandbox = true" - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/cachix-action@v14 + - uses: cachix/cachix-action@v15 if: needs.check_secrets.outputs.cachix == 'true' with: name: '${{ env.CACHIX_NAME }}' @@ -62,10 +62,10 @@ jobs: with: fetch-depth: 0 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/install-nix-action@v26 + - uses: cachix/install-nix-action@V27 with: install_url: https://releases.nixos.org/nix/nix-2.20.3/install - - uses: cachix/cachix-action@v14 + - uses: cachix/cachix-action@v15 with: name: '${{ env.CACHIX_NAME }}' signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' @@ -84,7 +84,7 @@ jobs: steps: - uses: actions/checkout@v4 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/install-nix-action@v26 + - uses: cachix/install-nix-action@V27 with: install_url: '${{needs.installer.outputs.installerURL}}' install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" @@ -114,12 +114,12 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v26 + - uses: cachix/install-nix-action@V27 with: install_url: https://releases.nixos.org/nix/nix-2.20.3/install - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV - - uses: cachix/cachix-action@v14 + - uses: cachix/cachix-action@v15 if: needs.check_secrets.outputs.cachix == 'true' with: name: '${{ env.CACHIX_NAME }}' diff --git a/.gitignore b/.gitignore index 52aaec23f..28f853715 100644 --- a/.gitignore +++ b/.gitignore @@ -92,7 +92,7 @@ perl/Makefile.config # /tests/functional/ /tests/functional/test-tmp -/tests/functional/common/vars-and-functions.sh +/tests/functional/common/subst-vars.sh /tests/functional/result* /tests/functional/restricted-innocent /tests/functional/shell diff --git a/.shellcheckrc b/.shellcheckrc new file mode 100644 index 000000000..662e2045c --- /dev/null +++ b/.shellcheckrc @@ -0,0 +1,2 @@ +external-sources=true +source-path=SCRIPTDIR diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 000000000..0105fb823 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,42 @@ +cff-version: 1.2.0 +title: Nix +message: >- + If you use this software, please cite it using the + metadata from this file. +type: software +authors: + - given-names: Eelco + family-names: Dolstra + email: edolstra@gmail.com + - name: The Nix contributors + website: 'https://github.com/NixOS/nix' +references: + - title: The Purely Functional Software Deployment Model + authors: + - family-names: Dolstra + given-names: Eelco + year: 2006 + type: thesis + thesis-type: PhD thesis + isbn: 90-393-4130-3 + url: https://dspace.library.uu.nl/handle/1874/7540 + database-provider: Utrecht University Repository + institution: + name: Utrecht University + keywords: + - configuration management + - software deployment + - purely functional + - component-based software engineering +repository-code: 'https://github.com/NixOS/nix' +url: 'https://nixos.org/' +abstract: >- + Nix, a purely functional package manager, is a powerful + package manager for Linux and other Unix systems that + makes package management reliable and reproducible. +keywords: + - reproducibility + - open-source + - c++ + - functional +license: LGPL-2.1 diff --git a/Makefile b/Makefile index ea0754fa5..6c96ef5db 100644 --- a/Makefile +++ b/Makefile @@ -98,7 +98,7 @@ ifdef HOST_WINDOWS GLOBAL_LDFLAGS += -Wl,--export-all-symbols endif -GLOBAL_CXXFLAGS += -g -Wall -Wimplicit-fallthrough -include $(buildprefix)config.h -std=c++2a -I src +GLOBAL_CXXFLAGS += -g -Wall -Wdeprecated-copy -Wignored-qualifiers -Wimplicit-fallthrough -include $(buildprefix)config.h -std=c++2a -I src # Include the main lib, causing rules to be defined diff --git a/build/hydra.nix b/build/hydra.nix new file mode 100644 index 000000000..595aad324 --- /dev/null +++ b/build/hydra.nix @@ -0,0 +1,186 @@ +{ inputs +, binaryTarball +, forAllCrossSystems +, forAllSystems +, lib +, linux64BitSystems +, nixpkgsFor +, self +}: +let + inherit (inputs) nixpkgs nixpkgs-regression; + inherit (lib) fileset; + + installScriptFor = tarballs: + nixpkgsFor.x86_64-linux.native.callPackage ../scripts/installer.nix { + inherit tarballs; + }; + + testNixVersions = pkgs: client: daemon: + pkgs.callPackage ../package.nix { + pname = + "nix-tests" + + lib.optionalString + (lib.versionAtLeast daemon.version "2.4pre20211005" && + lib.versionAtLeast client.version "2.4pre20211005") + "-${client.version}-against-${daemon.version}"; + + inherit fileset; + + test-client = client; + test-daemon = daemon; + + doBuild = false; + }; +in +{ + # Binary package for various platforms. + build = forAllSystems (system: self.packages.${system}.nix); + + shellInputs = forAllSystems (system: self.devShells.${system}.default.inputDerivation); + + buildStatic = lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static); + + buildCross = forAllCrossSystems (crossSystem: + lib.genAttrs [ "x86_64-linux" ] (system: self.packages.${system}."nix-${crossSystem}")); + + buildNoGc = forAllSystems (system: + self.packages.${system}.nix.override { enableGC = false; } + ); + + buildNoTests = forAllSystems (system: + self.packages.${system}.nix.override { + doCheck = false; + doInstallCheck = false; + installUnitTests = false; + } + ); + + # Toggles some settings for better coverage. Windows needs these + # library combinations, and Debian build Nix with GNU readline too. + buildReadlineNoMarkdown = forAllSystems (system: + self.packages.${system}.nix.override { + enableMarkdown = false; + readlineFlavor = "readline"; + } + ); + + # Perl bindings for various platforms. + perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nix.perl-bindings); + + # Binary tarball for various platforms, containing a Nix store + # with the closure of 'nix' package, and the second half of + # the installation script. + binaryTarball = forAllSystems (system: binaryTarball nixpkgsFor.${system}.native.nix nixpkgsFor.${system}.native); + + binaryTarballCross = lib.genAttrs [ "x86_64-linux" ] (system: + forAllCrossSystems (crossSystem: + binaryTarball + self.packages.${system}."nix-${crossSystem}" + nixpkgsFor.${system}.cross.${crossSystem})); + + # The first half of the installation script. This is uploaded + # to https://nixos.org/nix/install. It downloads the binary + # tarball for the user's system and calls the second half of the + # installation script. + installerScript = installScriptFor [ + # Native + self.hydraJobs.binaryTarball."x86_64-linux" + self.hydraJobs.binaryTarball."i686-linux" + self.hydraJobs.binaryTarball."aarch64-linux" + self.hydraJobs.binaryTarball."x86_64-darwin" + self.hydraJobs.binaryTarball."aarch64-darwin" + # Cross + self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" + self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" + self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" + ]; + installerScriptForGHA = installScriptFor [ + # Native + self.hydraJobs.binaryTarball."x86_64-linux" + self.hydraJobs.binaryTarball."x86_64-darwin" + # Cross + self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" + self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" + self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" + ]; + + # docker image with Nix inside + dockerImage = lib.genAttrs linux64BitSystems (system: self.packages.${system}.dockerImage); + + # Line coverage analysis. + coverage = nixpkgsFor.x86_64-linux.native.nix.override { + pname = "nix-coverage"; + withCoverageChecks = true; + }; + + # API docs for Nix's unstable internal C++ interfaces. + internal-api-docs = nixpkgsFor.x86_64-linux.native.callPackage ../package.nix { + inherit fileset; + doBuild = false; + enableInternalAPIDocs = true; + }; + + # API docs for Nix's C bindings. + external-api-docs = nixpkgsFor.x86_64-linux.native.callPackage ../package.nix { + inherit fileset; + doBuild = false; + enableExternalAPIDocs = true; + }; + + # System tests. + tests = import ../tests/nixos { inherit lib nixpkgs nixpkgsFor; } // { + + # Make sure that nix-env still produces the exact same result + # on a particular version of Nixpkgs. + evalNixpkgs = + let + inherit (nixpkgsFor.x86_64-linux.native) runCommand nix; + in + runCommand "eval-nixos" { buildInputs = [ nix ]; } + '' + type -p nix-env + # Note: we're filtering out nixos-install-tools because https://github.com/NixOS/nixpkgs/pull/153594#issuecomment-1020530593. + ( + set -x + time nix-env --store dummy:// -f ${nixpkgs-regression} -qaP --drv-path | sort | grep -v nixos-install-tools > packages + [[ $(sha1sum < packages | cut -c1-40) = e01b031fc9785a572a38be6bc473957e3b6faad7 ]] + ) + mkdir $out + ''; + + nixpkgsLibTests = + forAllSystems (system: + import (nixpkgs + "/lib/tests/release.nix") + { + pkgs = nixpkgsFor.${system}.native; + nixVersions = [ self.packages.${system}.nix ]; + } + ); + }; + + metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" { + pkgs = nixpkgsFor.x86_64-linux.native; + nixpkgs = nixpkgs-regression; + }; + + installTests = forAllSystems (system: + let pkgs = nixpkgsFor.${system}.native; in + pkgs.runCommand "install-tests" + { + againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix; + againstCurrentUnstable = + # FIXME: temporarily disable this on macOS because of #3605. + if system == "x86_64-linux" + then testNixVersions pkgs pkgs.nix pkgs.nixUnstable + else null; + # Disabled because the latest stable version doesn't handle + # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work + # againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable; + } "touch $out"); + + installerTests = import ../tests/installer { + binaryTarballs = self.hydraJobs.binaryTarball; + inherit nixpkgsFor; + }; +} diff --git a/configure.ac b/configure.ac index b2a5794b5..90a6d45d5 100644 --- a/configure.ac +++ b/configure.ac @@ -313,7 +313,7 @@ case "$host_os" in ])) if test "x$enable_seccomp_sandboxing" != "xno"; then PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp], - [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"]) + [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS" CFLAGS="$LIBSECCOMP_CFLAGS $CFLAGS"]) have_seccomp=1 AC_DEFINE([HAVE_SECCOMP], [1], [Whether seccomp is available and should be used for sandboxing.]) AC_COMPILE_IFELSE([ diff --git a/doc/external-api/README.md b/doc/external-api/README.md index 167c02199..8760ac88b 100644 --- a/doc/external-api/README.md +++ b/doc/external-api/README.md @@ -27,7 +27,7 @@ appreciated. The following examples, for simplicity, don't include error handling. See the [Handling errors](@ref errors) section for more information. -# Embedding the Nix Evaluator +# Embedding the Nix Evaluator{#nix_evaluator_example} In this example we programmatically start the Nix language evaluator with a dummy store (that has no store paths and cannot be written to), and evaluate the @@ -46,9 +46,9 @@ Nix expression `builtins.nixVersion`. // NOTE: This example lacks all error handling. Production code must check for // errors, as some return values will be undefined. -void my_get_string_cb(const char * start, unsigned int n, char ** user_data) +void my_get_string_cb(const char * start, unsigned int n, void * user_data) { - *user_data = strdup(start); + *((char **) user_data) = strdup(start); } int main() @@ -63,7 +63,7 @@ int main() nix_value_force(NULL, state, value); char * version; - nix_get_string(NULL, value, my_get_string_cb, version); + nix_get_string(NULL, value, my_get_string_cb, &version); printf("Nix version: %s\n", version); free(version); diff --git a/doc/manual/book.toml b/doc/manual/book.toml index d524dbb13..73fb7e75e 100644 --- a/doc/manual/book.toml +++ b/doc/manual/book.toml @@ -6,8 +6,6 @@ additional-css = ["custom.css"] additional-js = ["redirects.js"] edit-url-template = "https://github.com/NixOS/nix/tree/master/doc/manual/{path}" git-repository-url = "https://github.com/NixOS/nix" -fold.enable = true -fold.level = 1 [preprocessor.anchors] renderers = ["html"] diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js index 25648969d..633932e55 100644 --- a/doc/manual/redirects.js +++ b/doc/manual/redirects.js @@ -285,15 +285,15 @@ const redirects = { "ch-basic-package-mgmt": "package-management/basic-package-mgmt.html", "ssec-binary-cache-substituter": "package-management/binary-cache-substituter.html", "sec-channels": "command-ref/nix-channel.html", - "ssec-copy-closure": "package-management/copy-closure.html", + "ssec-copy-closure": "command-ref/nix-copy-closure.html", "sec-garbage-collection": "package-management/garbage-collection.html", "ssec-gc-roots": "package-management/garbage-collector-roots.html", "chap-package-management": "package-management/index.html", "sec-profiles": "package-management/profiles.html", - "ssec-s3-substituter": "package-management/s3-substituter.html", - "ssec-s3-substituter-anonymous-reads": "package-management/s3-substituter.html#anonymous-reads-to-your-s3-compatible-binary-cache", - "ssec-s3-substituter-authenticated-reads": "package-management/s3-substituter.html#authenticated-reads-to-your-s3-binary-cache", - "ssec-s3-substituter-authenticated-writes": "package-management/s3-substituter.html#authenticated-writes-to-your-s3-compatible-binary-cache", + "ssec-s3-substituter": "store/types/s3-substituter.html", + "ssec-s3-substituter-anonymous-reads": "store/types/s3-substituter.html#anonymous-reads-to-your-s3-compatible-binary-cache", + "ssec-s3-substituter-authenticated-reads": "store/types/s3-substituter.html#authenticated-reads-to-your-s3-binary-cache", + "ssec-s3-substituter-authenticated-writes": "store/types/s3-substituter.html#authenticated-writes-to-your-s3-compatible-binary-cache", "sec-sharing-packages": "package-management/sharing-packages.html", "ssec-ssh-substituter": "package-management/ssh-substituter.html", "chap-quick-start": "quick-start.html", diff --git a/doc/manual/rl-next/consistent-nix-build.md b/doc/manual/rl-next/consistent-nix-build.md new file mode 100644 index 000000000..d5929dc8e --- /dev/null +++ b/doc/manual/rl-next/consistent-nix-build.md @@ -0,0 +1,6 @@ +--- +synopsis: Show all FOD errors with `nix build --keep-going` +--- + +`nix build --keep-going` now behaves consistently with `nix-build --keep-going`. This means +that if e.g. multiple FODs fail to build, all hash mismatches are displayed. diff --git a/doc/manual/rl-next/derivation-json-change.md b/doc/manual/rl-next/derivation-json-change.md new file mode 100644 index 000000000..2a1d40e83 --- /dev/null +++ b/doc/manual/rl-next/derivation-json-change.md @@ -0,0 +1,12 @@ +--- +synopsis: Modify `nix derivation {add,show}` JSON format +issues: 9866 +prs: 10722 +--- + +The JSON format for derivations has been slightly revised to better conform to our [JSON guidelines](@docroot@contributing/cli-guideline#returning-future-proof-json). +In particular, the hash algorithm and content addressing method of content-addresed derivation outputs is now separated into two fields `hashAlgo` and `method`, +rather than one field with an arcane `:`-separated format. + +This JSON format is only used by the experimental `nix derivation` family of commands, at this time. +Future revisions are expected as the JSON format is still not entirely in compliance even after these changes. diff --git a/doc/manual/rl-next/fix-silent-unknown-options.md b/doc/manual/rl-next/fix-silent-unknown-options.md new file mode 100644 index 000000000..0977260ac --- /dev/null +++ b/doc/manual/rl-next/fix-silent-unknown-options.md @@ -0,0 +1,28 @@ +--- +synopsis: Warn on unknown settings anywhere in the command line +prs: 10701 +--- + +All `nix` commands will now properly warn when an unknown option is specified anywhere in the command line. + +Before: + +```console +$ nix-instantiate --option foobar baz --expr '{}' +warning: unknown setting 'foobar' +$ nix-instantiate '{}' --option foobar baz --expr +$ nix eval --expr '{}' --option foobar baz +{ } +``` + +After: + +```console +$ nix-instantiate --option foobar baz --expr '{}' +warning: unknown setting 'foobar' +$ nix-instantiate '{}' --option foobar baz --expr +warning: unknown setting 'foobar' +$ nix eval --expr '{}' --option foobar baz +warning: unknown setting 'foobar' +{ } +``` diff --git a/doc/manual/rl-next/nix-env-shell.md b/doc/manual/rl-next/nix-env-shell.md new file mode 100644 index 000000000..b2344417a --- /dev/null +++ b/doc/manual/rl-next/nix-env-shell.md @@ -0,0 +1,12 @@ +--- +synopsis: "`nix env shell` is the new `nix shell`, and `nix shell` remains an accepted alias" +issues: 10504 +prs: 10807 +--- + +This is part of an effort to bring more structure to the CLI subcommands. + +`nix env` will be about the process environment. +Future commands may include `nix env run` and `nix env print-env`. + +It is also somewhat analogous to the [planned](https://github.com/NixOS/nix/issues/10504) `nix dev shell` (currently `nix develop`), which is less about environment variables, and more about running a development shell, which is a more powerful command, but also requires more setup. diff --git a/doc/manual/rl-next/print-value-in-installable-flake-error.md b/doc/manual/rl-next/print-value-in-installable-flake-error.md new file mode 100644 index 000000000..bb35e252e --- /dev/null +++ b/doc/manual/rl-next/print-value-in-installable-flake-error.md @@ -0,0 +1,18 @@ +--- +synopsis: New-cli flake commands that expect derivations now print the failing value and its type +prs: 10778 +--- + +In errors like `flake output attribute 'nixosConfigurations.yuki.config' is not a derivation or path`, the message now includes the failing value and type. + +Before: + +``` + error: flake output attribute 'nixosConfigurations.yuki.config' is not a derivation or path +```` + +After: + +``` + error: expected flake output attribute 'nixosConfigurations.yuki.config' to be a derivation or path but found a set: { appstream = «thunk»; assertions = «thunk»; boot = { bcache = «thunk»; binfmt = «thunk»; binfmtMiscRegistrations = «thunk»; blacklistedKernelModules = «thunk»; bootMount = «thunk»; bootspec = «thunk»; cleanTmpDir = «thunk»; consoleLogLevel = «thunk»; «43 attributes elided» }; «48 attributes elided» } +``` diff --git a/doc/manual/rl-next/store-object-info.md b/doc/manual/rl-next/store-object-info.md new file mode 100644 index 000000000..ab8f5fec0 --- /dev/null +++ b/doc/manual/rl-next/store-object-info.md @@ -0,0 +1,11 @@ +--- +synopsis: Store object info JSON format now uses `null` rather than omitting fields. +prs: 9995 +--- + +The [store object info JSON format](@docroot@/protocols/json/store-object-info.md), used for e.g. `nix path-info`, no longer omits fields to indicate absent information, but instead includes the fields with a `null` value. +For example, `"ca": null` is used to to indicate a store object that isn't content-addressed rather than omitting the `ca` field entirely. +This makes records of this sort more self-describing, and easier to consume programmatically. + +We will follow this design principle going forward; +the [JSON guidelines](@docroot@/contributing/json-guideline.md) in the contributing section have been updated accordingly. diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in index fdfd0a927..18e7e8380 100644 --- a/doc/manual/src/SUMMARY.md.in +++ b/doc/manual/src/SUMMARY.md.in @@ -18,7 +18,9 @@ - [Uninstalling Nix](installation/uninstall.md) - [Nix Store](store/index.md) - [File System Object](store/file-system-object.md) + - [Content-Addressing File System Objects](store/file-system-object/content-address.md) - [Store Object](store/store-object.md) + - [Content-Addressing Store Objects](store/store-object/content-address.md) - [Store Path](store/store-path.md) - [Store Types](store/types/index.md) {{#include ./store/types/SUMMARY.md}} @@ -41,9 +43,7 @@ - [Advanced Topics](advanced-topics/index.md) - [Sharing Packages Between Machines](package-management/sharing-packages.md) - [Serving a Nix store via HTTP](package-management/binary-cache-substituter.md) - - [Copying Closures via SSH](package-management/copy-closure.md) - [Serving a Nix store via SSH](package-management/ssh-substituter.md) - - [Serving a Nix store via S3](package-management/s3-substituter.md) - [Remote Builds](advanced-topics/distributed-builds.md) - [Tuning Cores and Jobs](advanced-topics/cores-vs-jobs.md) - [Verifying Build Reproducibility](advanced-topics/diff-hook.md) @@ -121,6 +121,7 @@ - [Documentation](contributing/documentation.md) - [Experimental Features](contributing/experimental-features.md) - [CLI guideline](contributing/cli-guideline.md) + - [JSON guideline](contributing/json-guideline.md) - [C++ style guide](contributing/cxx.md) - [Releases](release-notes/index.md) {{#include ./SUMMARY-rl-next.md}} diff --git a/doc/manual/src/_redirects b/doc/manual/src/_redirects index 8bf0e854b..a04a36f1e 100644 --- a/doc/manual/src/_redirects +++ b/doc/manual/src/_redirects @@ -39,3 +39,5 @@ /json/* /protocols/json/:splat 301! /release-notes/release-notes /release-notes 301! + +/package-management/copy-closure /command-ref/nix-copy-closure 301! diff --git a/doc/manual/src/command-ref/nix-collect-garbage.md b/doc/manual/src/command-ref/nix-collect-garbage.md index 1bc88d858..8e1307c48 100644 --- a/doc/manual/src/command-ref/nix-collect-garbage.md +++ b/doc/manual/src/command-ref/nix-collect-garbage.md @@ -74,4 +74,4 @@ $ nix-collect-garbage -d ``` [profiles]: @docroot@/command-ref/files/profiles.md -[store objects]: @docroot@/glossary.md#gloss-store-object +[store objects]: @docroot@/store/store-object.md diff --git a/doc/manual/src/command-ref/nix-copy-closure.md b/doc/manual/src/command-ref/nix-copy-closure.md index eb1693e1e..d94bde3a3 100644 --- a/doc/manual/src/command-ref/nix-copy-closure.md +++ b/doc/manual/src/command-ref/nix-copy-closure.md @@ -1,91 +1,91 @@ # Name -`nix-copy-closure` - copy a closure to or from a remote machine via SSH +`nix-copy-closure` - copy store objects to or from a remote machine via SSH # Synopsis `nix-copy-closure` - [`--to` | `--from`] + [`--to` | `--from` ] [`--gzip`] [`--include-outputs`] [`--use-substitutes` | `-s`] [`-v`] - _user@machine_ _paths_ + [_user_@]_machine_[:_port_] _paths_ # Description -`nix-copy-closure` gives you an easy and efficient way to exchange -software between machines. Given one or more Nix store _paths_ on the -local machine, `nix-copy-closure` computes the closure of those paths -(i.e. all their dependencies in the Nix store), and copies all paths -in the closure to the remote machine via the `ssh` (Secure Shell) -command. With the `--from` option, the direction is reversed: the -closure of _paths_ on a remote machine is copied to the Nix store on -the local machine. +Given _paths_ from one machine, `nix-copy-closure` computes the [closure](@docroot@/glossary.md#gloss-closure) of those paths (i.e. all their dependencies in the Nix store), and copies [store objects](@docroot@/glossary.md#gloss-store-object) in that closure to another machine via SSH. +It doesn’t copy store objects that are already present on the other machine. -This command is efficient because it only sends the store paths -that are missing on the target machine. +> **Note** +> +> While the Nix store to use on the local machine can be specified on the command line with the [`--store`](@docroot@/command-ref/conf-file.md#conf-store) option, the Nix store to be accessed on the remote machine can only be [configured statically](@docroot@/command-ref/conf-file.md#configuration-file) on that remote machine. -Since `nix-copy-closure` calls `ssh`, you may be asked to type in the -appropriate password or passphrase. In fact, you may be asked _twice_ -because `nix-copy-closure` currently connects twice to the remote -machine, first to get the set of paths missing on the target machine, -and second to send the dump of those paths. When using public key -authentication, you can avoid typing the passphrase with `ssh-agent`. +Since `nix-copy-closure` calls `ssh`, you may need to authenticate with the remote machine. +In fact, you may be asked for authentication _twice_ because `nix-copy-closure` currently connects twice to the remote machine: first to get the set of paths missing on the target machine, and second to send the dump of those paths. +When using public key authentication, you can avoid typing the passphrase with `ssh-agent`. # Options - - `--to`\ - Copy the closure of _paths_ from the local Nix store to the Nix - store on _machine_. This is the default. + - `--to` - - `--from`\ - Copy the closure of _paths_ from the Nix store on _machine_ to the - local Nix store. + Copy the closure of _paths_ from a Nix store accessible from the local machine to the Nix store on the remote _machine_. + This is the default behavior. + + - `--from` + + Copy the closure of _paths_ from the Nix store on the remote _machine_ to the local machine's specified Nix store. + + - `--gzip` - - `--gzip`\ Enable compression of the SSH connection. - - `--include-outputs`\ + - `--include-outputs` + Also copy the outputs of [store derivation]s included in the closure. [store derivation]: @docroot@/glossary.md#gloss-store-derivation - - `--use-substitutes` / `-s`\ - Attempt to download missing paths on the target machine using Nix’s - substitute mechanism. Any paths that cannot be substituted on the - target are still copied normally from the source. This is useful, - for instance, if the connection between the source and target - machine is slow, but the connection between the target machine and - `nixos.org` (the default binary cache server) is - fast. + - `--use-substitutes` / `-s` - - `-v`\ - Show verbose output. + Attempt to download missing store objects on the target from [substituters](@docroot@/command-ref/conf-file.md#conf-substituters). + Any store objects that cannot be substituted on the target are still copied normally from the source. + This is useful, for instance, if the connection between the source and target machine is slow, but the connection between the target machine and `cache.nixos.org` (the default binary cache server) is fast. {{#include ./opt-common.md}} # Environment variables - - `NIX_SSHOPTS`\ - Additional options to be passed to `ssh` on the command - line. + - `NIX_SSHOPTS` + + Additional options to be passed to `ssh` on the command line. {{#include ./env-common.md}} # Examples -Copy Firefox with all its dependencies to a remote machine: +> **Example** +> +> Copy GNU Hello with all its dependencies to a remote machine: +> +> ```shell-session +> $ storePath="$(nix-build '' -I nixpkgs=channel:nixpkgs-unstable -A hello --no-out-link)" +> $ nix-copy-closure --to alice@itchy.example.org "$storePath" +> copying 5 paths... +> copying path '/nix/store/nrwkk6ak3rgkrxbqhsscb01jpzmslf2r-xgcc-13.2.0-libgcc' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/gm61h1y42pqyl6178g90x8zm22n6pyy5-libunistring-1.1' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/ddfzjdykw67s20c35i7a6624by3iz5jv-libidn2-2.3.7' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/apab5i73dqa09wx0q27b6fbhd1r18ihl-glibc-2.39-31' to 'ssh://alice@itchy.example.org'... +> copying path '/nix/store/g1n2vryg06amvcc1avb2mcq36faly0mh-hello-2.12.1' to 'ssh://alice@itchy.example.org'... +> ``` -```console -$ nix-copy-closure --to alice@itchy.labs $(type -tP firefox) -``` - -Copy Subversion from a remote machine and then install it into a user -environment: - -```console -$ nix-copy-closure --from alice@itchy.labs \ - /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4 -$ nix-env --install /nix/store/0dj0503hjxy5mbwlafv1rsbdiyx1gkdy-subversion-1.4.4 -``` +> **Example** +> +> Copy GNU Hello from a remote machine using a known store path, and run it: +> +> ```shell-session +> $ storePath="$(nix-instantiate --eval '' -I nixpkgs=channel:nixpkgs-unstable -A hello.outPath | tr -d '"')" +> $ nix-copy-closure --from alice@itchy.example.org "$storePath" +> $ "$storePath"/bin/hello +> Hello, world! +> ``` diff --git a/doc/manual/src/command-ref/nix-env.md b/doc/manual/src/command-ref/nix-env.md index 941723216..c6f627365 100644 --- a/doc/manual/src/command-ref/nix-env.md +++ b/doc/manual/src/command-ref/nix-env.md @@ -47,39 +47,83 @@ These pages can be viewed offline: Example: `nix-env --help --install` +# Package sources + +`nix-env` can obtain packages from multiple sources: + +- An attribute set of derivations from: + - The [default Nix expression](@docroot@/command-ref/files/default-nix-expression.md) (by default) + - A Nix file, specified via `--file` + - A [profile](@docroot@/command-ref/files/profiles.md), specified via `--from-profile` + - A Nix expression that is a function which takes default expression as argument, specified via `--from-expression` +- A [store path](@docroot@/store/store-path.md) + # Selectors -Several commands, such as `nix-env --query ` and `nix-env --install `, take a list of -arguments that specify the packages on which to operate. These are -extended regular expressions that must match the entire name of the -package. (For details on regular expressions, see **regex**(7).) The match is -case-sensitive. The regular expression can optionally be followed by a -dash and a version number; if omitted, any version of the package will -match. Here are some examples: +Several operations, such as [`nix-env --query`](./nix-env/query.md) and [`nix-env --install`](./nix-env/install.md), take a list of *arguments* that specify the packages on which to operate. - - `firefox`\ - Matches the package name `firefox` and any version. +Packages are identified based on a `name` part and a `version` part of a [symbolic derivation name](@docroot@/language/derivations.md#attr-names): - - `firefox-32.0`\ - Matches the package name `firefox` and version `32.0`. +- `name`: Everything up to but not including the first dash (`-`) that is *not* followed by a letter. +- `version`: The rest, excluding the separating dash. - - `gtk\\+`\ - Matches the package name `gtk+`. The `+` character must be escaped - using a backslash to prevent it from being interpreted as a - quantifier, and the backslash must be escaped in turn with another - backslash to ensure that the shell passes it on. +> **Example** +> +> `nix-env` parses the symbolic derivation name `apache-httpd-2.0.48` as: +> +> ```json +> { +> "name": "apache-httpd", +> "version": "2.0.48" +> } +> ``` - - `.\*`\ - Matches any package name. This is the default for most commands. +> **Example** +> +> `nix-env` parses the symbolic derivation name `firefox.*` as: +> +> ```json +> { +> "name": "firefox.*", +> "version": "" +> } +> ``` - - `'.*zip.*'`\ - Matches any package name containing the string `zip`. Note the dots: - `'*zip*'` does not work, because in a regular expression, the - character `*` is interpreted as a quantifier. +The `name` parts of the *arguments* to `nix-env` are treated as extended regular expressions and matched against the `name` parts of derivation names in the package source. +The match is case-sensitive. +The regular expression can optionally be followed by a dash (`-`) and a version number; if omitted, any version of the package will match. +For details on regular expressions, see [**regex**(7)](https://linux.die.net/man/7/regex). - - `'.*(firefox|chromium).*'`\ - Matches any package name containing the strings `firefox` or - `chromium`. +> **Example** +> +> Common patterns for finding package names with `nix-env`: +> +> - `firefox` +> +> Matches the package name `firefox` and any version. +> +> - `firefox-32.0` +> +> Matches the package name `firefox` and version `32.0`. +> +> - `gtk\\+` +> +> Matches the package name `gtk+`. +> The `+` character must be escaped using a backslash (`\`) to prevent it from being interpreted as a quantifier, and the backslash must be escaped in turn with another backslash to ensure that the shell passes it on. +> +> - `.\*` +> +> Matches any package name. +> This is the default for most commands. +> +> - `'.*zip.*'` +> +> Matches any package name containing the string `zip`. +> Note the dots: `'*zip*'` does not work, because in a regular expression, the character `*` is interpreted as a quantifier. +> +> - `'.*(firefox|chromium).*'` +> +> Matches any package name containing the strings `firefox` or `chromium`. # Files diff --git a/doc/manual/src/command-ref/nix-env/delete-generations.md b/doc/manual/src/command-ref/nix-env/delete-generations.md index 6b6ea798e..ae618b2c6 100644 --- a/doc/manual/src/command-ref/nix-env/delete-generations.md +++ b/doc/manual/src/command-ref/nix-env/delete-generations.md @@ -49,7 +49,7 @@ Periodically deleting old generations is important to make garbage collection effective. The is because profiles are also garbage collection roots — any [store object] reachable from a profile is "alive" and ineligible for deletion. -[store object]: @docroot@/glossary.md#gloss-store-object +[store object]: @docroot@/store/store-object.md {{#include ./opt-common.md}} diff --git a/doc/manual/src/command-ref/nix-env/install.md b/doc/manual/src/command-ref/nix-env/install.md index d80bcb668..85f37904f 100644 --- a/doc/manual/src/command-ref/nix-env/install.md +++ b/doc/manual/src/command-ref/nix-env/install.md @@ -14,14 +14,13 @@ # Description -The install operation creates a new user environment. +The `--install` operation creates a new user environment. It is based on the current generation of the active [profile](@docroot@/command-ref/files/profiles.md), to which a set of [store paths] described by *args* is added. -[store paths]: @docroot@/glossary.md#gloss-store-path +[store paths]: @docroot@/store/store-path.md The arguments *args* map to store paths in a number of possible ways: - - By default, *args* is a set of [derivation] names denoting derivations in the [default Nix expression]. These are [realised], and the resulting output paths are installed. Currently installed derivations with a name equal to the name of a derivation being added are removed unless the option `--preserve-installed` is specified. diff --git a/doc/manual/src/command-ref/nix-hash.md b/doc/manual/src/command-ref/nix-hash.md index 37c8facec..24e91df12 100644 --- a/doc/manual/src/command-ref/nix-hash.md +++ b/doc/manual/src/command-ref/nix-hash.md @@ -20,16 +20,21 @@ an example. The hash is computed over a *serialisation* of each path: a dump of the file system tree rooted at the path. This allows directories and symlinks to be hashed as well as regular files. The dump is in the -*NAR format* produced by [`nix-store +*[Nix Archive (NAR)][Nix Archive] format* produced by [`nix-store --dump`](@docroot@/command-ref/nix-store/dump.md). Thus, `nix-hash path` yields the same cryptographic hash as `nix-store --dump path | md5sum`. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive + # Options - `--flat`\ - Print the cryptographic hash of the contents of each regular file - *path*. That is, do not compute the hash over the dump of *path*. + Print the cryptographic hash of the contents of each regular file *path*. + That is, instead of computing + the hash of the [Nix Archive (NAR)](@docroot@/store/file-system-object/content-address.md#serial-nix-archive) of *path*, + just [directly hash]((@docroot@/store/file-system-object/content-address.md#serial-flat) *path* as is. + This requires *path* to resolve to a regular file rather than directory. The result is identical to that produced by the GNU commands `md5sum` and `sha1sum`. diff --git a/doc/manual/src/command-ref/nix-store/dump.md b/doc/manual/src/command-ref/nix-store/dump.md index c2f3c42ef..3de0e27b0 100644 --- a/doc/manual/src/command-ref/nix-store/dump.md +++ b/doc/manual/src/command-ref/nix-store/dump.md @@ -1,6 +1,6 @@ # Name -`nix-store --dump` - write a single path to a Nix Archive +`nix-store --dump` - write a single path to a [Nix Archive] ## Synopsis @@ -8,7 +8,7 @@ ## Description -The operation `--dump` produces a NAR (Nix ARchive) file containing the +The operation `--dump` produces a [Nix archive](@docroot@/glossary.md#gloss-nar) (NAR) file containing the contents of the file system tree rooted at *path*. The archive is written to standard output. @@ -30,8 +30,9 @@ NAR archives support filenames of unlimited length and 64-bit file sizes. They can contain regular files, directories, and symbolic links, but not other types of files (such as device nodes). -A Nix archive can be unpacked using `nix-store ---restore`. +A Nix archive can be unpacked using [`nix-store --restore`](@docroot@/command-ref/nix-store/restore.md). + +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive {{#include ./opt-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/export.md b/doc/manual/src/command-ref/nix-store/export.md index 1bc46f53b..ba772eb43 100644 --- a/doc/manual/src/command-ref/nix-store/export.md +++ b/doc/manual/src/command-ref/nix-store/export.md @@ -1,6 +1,6 @@ # Name -`nix-store --export` - export store paths to a Nix Archive +`nix-store --export` - export store paths to a [Nix Archive] ## Synopsis @@ -8,16 +8,22 @@ ## Description -The operation `--export` writes a serialisation of the specified store -paths to standard output in a format that can be imported into another -Nix store with `nix-store --import`. This is like `nix-store ---dump`, except that the NAR archive produced by that command doesn’t -contain the necessary meta-information to allow it to be imported into -another Nix store (namely, the set of references of the path). +The operation `--export` writes a serialisation of the given [store objects](@docroot@/glossary.md#gloss-store-object) to standard output in a format that can be imported into another [Nix store](@docroot@/store/index.md) with [`nix-store --import`](./import.md). -This command does not produce a *closure* of the specified paths, so if -a store path references other store paths that are missing in the target -Nix store, the import will fail. +> **Warning** +> +> This command *does not* produce a [closure](@docroot@/glossary.md#gloss-closure) of the specified store paths. +> Trying to import a store object that refers to store paths not available in the target Nix store will fail. +> +> Use [`nix-store --query`](@docroot@/command-ref/nix-store/query.md) to obtain the closure of a store path. + +This command is different from [`nix-store --dump`](./dump.md), which produces a [Nix archive](@docroot@/glossary.md#gloss-nar) that *does not* contain the set of [references](@docroot@/glossary.md#gloss-reference) of a given store path. + +> **Note** +> +> For efficient transfer of closures to remote machines over SSH, use [`nix-copy-closure`](@docroot@/command-ref/nix-copy-closure.md). + +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive {{#include ./opt-common.md}} @@ -27,15 +33,21 @@ Nix store, the import will fail. # Examples -To copy a whole closure, do something -like: - -```console -$ nix-store --export $(nix-store --query --requisites paths) > out -``` - -To import the whole closure again, run: - -```console -$ nix-store --import < out -``` +> **Example** +> +> Deploy GNU Hello to an airgapped machine via USB stick. +> +> Write the closure to the block device on a machine with internet connection: +> +> ```shell-session +> [alice@itchy]$ storePath=$(nix-build '' -I nixpkgs=channel:nixpkgs-unstable -A hello --no-out-link) +> [alice@itchy]$ nix-store --export $(nix-store --query --requisites $storePath) | sudo dd of=/dev/usb +> ``` +> +> Read the closure from the block device on the machine without internet connection: +> +> ```shell-session +> [bob@scratchy]$ hello=$(sudo dd if=/dev/usb | nix-store --import | tail -1) +> [bob@scratchy]$ $hello/bin/hello +> Hello, world! +> ``` diff --git a/doc/manual/src/command-ref/nix-store/import.md b/doc/manual/src/command-ref/nix-store/import.md index 2711316a7..3f6b3d076 100644 --- a/doc/manual/src/command-ref/nix-store/import.md +++ b/doc/manual/src/command-ref/nix-store/import.md @@ -1,6 +1,8 @@ # Name -`nix-store --import` - import Nix Archive into the store +`nix-store --import` - import [Nix Archive] into the store + +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive # Synopsis @@ -8,14 +10,34 @@ # Description -The operation `--import` reads a serialisation of a set of store paths -produced by `nix-store --export` from standard input and adds those -store paths to the Nix store. Paths that already exist in the Nix store -are ignored. If a path refers to another path that doesn’t exist in the -Nix store, the import fails. +The operation `--import` reads a serialisation of a set of [store objects](@docroot@/glossary.md#gloss-store-object) produced by [`nix-store --export`](./export.md) from standard input, and adds those store objects to the specified [Nix store](@docroot@/store/index.md). +Paths that already exist in the target Nix store are ignored. +If a path [refers](@docroot@/glossary.md#gloss-reference) to another path that doesn’t exist in the target Nix store, the import fails. + +> **Note** +> +> For efficient transfer of closures to remote machines over SSH, use [`nix-copy-closure`](@docroot@/command-ref/nix-copy-closure.md). {{#include ./opt-common.md}} {{#include ../opt-common.md}} {{#include ../env-common.md}} + +# Examples + +> **Example** +> +> Given a closure of GNU Hello as a file: +> +> ```shell-session +> $ storePath="$(nix-build '' -I nixpkgs=channel:nixpkgs-unstable -A hello --no-out-link)" +> $ nix-store --export $(nix-store --query --requisites $storePath) > hello.closure +> ``` +> +> Import the closure into a [remote SSH store](@docroot@/store/types/ssh-store.md) using the [`--store`](@docroot@/command-ref/conf-file.md#conf-store) option: +> +> ```console +> $ nix-store --import --store ssh://alice@itchy.example.org < hello.closure +> ``` + diff --git a/doc/manual/src/command-ref/nix-store/optimise.md b/doc/manual/src/command-ref/nix-store/optimise.md index dc392aeb8..b257466b2 100644 --- a/doc/manual/src/command-ref/nix-store/optimise.md +++ b/doc/manual/src/command-ref/nix-store/optimise.md @@ -12,7 +12,7 @@ The operation `--optimise` reduces Nix store disk space usage by finding identical files in the store and hard-linking them to each other. It typically reduces the size of the store by something like 25-35%. Only regular files and symlinks are hard-linked in this manner. Files are -considered identical when they have the same NAR archive serialisation: +considered identical when they have the same [Nix Archive (NAR)][Nix Archive] serialisation: that is, regular files must have the same contents and permission (executable or non-executable), and symlinks must have the same contents. @@ -38,3 +38,4 @@ hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1' there are 114486 files with equal contents out of 215894 files in total ``` +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive diff --git a/doc/manual/src/command-ref/nix-store/realise.md b/doc/manual/src/command-ref/nix-store/realise.md index 5428d57fa..6e56387eb 100644 --- a/doc/manual/src/command-ref/nix-store/realise.md +++ b/doc/manual/src/command-ref/nix-store/realise.md @@ -25,11 +25,11 @@ Each of *paths* is processed as follows: If no substitutes are available and no store derivation is given, realisation fails. -[store paths]: @docroot@/glossary.md#gloss-store-path +[store paths]: @docroot@/store/store-path.md [valid]: @docroot@/glossary.md#gloss-validity [store derivation]: @docroot@/glossary.md#gloss-store-derivation [output paths]: @docroot@/glossary.md#gloss-output-path -[store objects]: @docroot@/glossary.md#gloss-store-object +[store objects]: @docroot@/store/store-object.md [closure]: @docroot@/glossary.md#gloss-closure [substituters]: @docroot@/command-ref/conf-file.md#conf-substituters [content-addressed derivations]: @docroot@/contributing/experimental-features.md#xp-feature-ca-derivations diff --git a/doc/manual/src/command-ref/nix-store/restore.md b/doc/manual/src/command-ref/nix-store/restore.md index fcba43df4..2d0aa3127 100644 --- a/doc/manual/src/command-ref/nix-store/restore.md +++ b/doc/manual/src/command-ref/nix-store/restore.md @@ -8,9 +8,11 @@ ## Description -The operation `--restore` unpacks a NAR archive to *path*, which must +The operation `--restore` unpacks a [Nix Archive (NAR)][Nix Archive] to *path*, which must not already exist. The archive is read from standard input. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive + {{#include ./opt-common.md}} {{#include ../opt-common.md}} diff --git a/doc/manual/src/contributing/cli-guideline.md b/doc/manual/src/contributing/cli-guideline.md index e90d6de8d..23df844ec 100644 --- a/doc/manual/src/contributing/cli-guideline.md +++ b/doc/manual/src/contributing/cli-guideline.md @@ -389,88 +389,6 @@ colors, no emojis and using ASCII instead of Unicode symbols). The same should happen when TTY is not detected on STDERR. We should not display progress / status section, but only print warnings and errors. -## Returning future proof JSON - -The schema of JSON output should allow for backwards compatible extension. This section explains how to achieve this. - -Two definitions are helpful here, because while JSON only defines one "key-value" -object type, we use it to cover two use cases: - - - **dictionary**: a map from names to value that all have the same type. In - C++ this would be a `std::map` with string keys. - - **record**: a fixed set of attributes each with their own type. In C++, this - would be represented by a `struct`. - -It is best not to mix these use cases, as that may lead to incompatibilities when the schema changes. For example, adding a record field to a dictionary breaks consumers that assume all JSON object fields to have the same meaning and type. - -This leads to the following guidelines: - - - The top-level (root) value must be a record. - - Otherwise, one can not change the structure of a command's output. - - - The value of a dictionary item must be a record. - - Otherwise, the item type can not be extended. - - - List items should be records. - - Otherwise, one can not change the structure of the list items. - - If the order of the items does not matter, and each item has a unique key that is a string, consider representing the list as a dictionary instead. If the order of the items needs to be preserved, return a list of records. - - - Streaming JSON should return records. - - An example of a streaming JSON format is [JSON lines](https://jsonlines.org/), where each line represents a JSON value. These JSON values can be considered top-level values or list items, and they must be records. - -### Examples - - -This is bad, because all keys must be assumed to be store types: - -```json -{ - "local": { ... }, - "remote": { ... }, - "http": { ... } -} -``` - -This is good, because the it is extensible at the root, and is somewhat self-documenting: - -```json -{ - "storeTypes": { "local": { ... }, ... }, - "pluginSupport": true -} -``` - -While the dictionary of store types seems like a very complete response at first, a use case may arise that warrants returning additional information. -For example, the presence of plugin support may be crucial information for a client to proceed when their desired store type is missing. - - - -The following representation is bad because it is not extensible: - -```json -{ "outputs": [ "out" "bin" ] } -``` - -However, simply converting everything to records is not enough, because the order of outputs must be preserved: - -```json -{ "outputs": { "bin": {}, "out": {} } } -``` - -The first item is the default output. Deriving this information from the outputs ordering is not great, but this is how Nix currently happens to work. -While it is possible for a JSON parser to preserve the order of fields, we can not rely on this capability to be present in all JSON libraries. - -This representation is extensible and preserves the ordering: - -```json -{ "outputs": [ { "outputName": "out" }, { "outputName": "bin" } ] } -``` - ## Dialog with the user CLIs don't always make it clear when an action has taken place. For every diff --git a/doc/manual/src/contributing/documentation.md b/doc/manual/src/contributing/documentation.md index 359fdb556..6e7c0a967 100644 --- a/doc/manual/src/contributing/documentation.md +++ b/doc/manual/src/contributing/documentation.md @@ -147,7 +147,7 @@ Please observe these guidelines to ease reviews: ``` A [store object] contains a [file system object] and [references] to other store objects. - [store object]: @docroot@/glossary.md#gloss-store-object + [store object]: @docroot@/store/store-object.md [file system object]: @docroot@/architecture/file-system-object.md [references]: @docroot@/glossary.md#gloss-reference ``` diff --git a/doc/manual/src/contributing/json-guideline.md b/doc/manual/src/contributing/json-guideline.md new file mode 100644 index 000000000..b4bc92af9 --- /dev/null +++ b/doc/manual/src/contributing/json-guideline.md @@ -0,0 +1,128 @@ +# JSON guideline + +Nix consumes and produces JSON in a variety of contexts. +These guidelines ensure consistent practices for all our JSON interfaces, for ease of use, and so that experience in one part carries over to another. + +## Extensibility + +The schema of JSON input and output should allow for backwards compatible extension. +This section explains how to achieve this. + +Two definitions are helpful here, because while JSON only defines one "key-value" object type, we use it to cover two use cases: + + - **dictionary**: a map from names to value that all have the same type. + In C++ this would be a `std::map` with string keys. + + - **record**: a fixed set of attributes each with their own type. + In C++, this would be represented by a `struct`. + +It is best not to mix these use cases, as that may lead to incompatibilities when the schema changes. +For example, adding a record field to a dictionary breaks consumers that assume all JSON object fields to have the same meaning and type, and dictionary items with a colliding name can not be represented anymore. + +This leads to the following guidelines: + + - The top-level (root) value must be a record. + + Otherwise, one can not change the structure of a command's output. + + - The value of a dictionary item must be a record. + + Otherwise, the item type can not be extended. + + - List items should be records. + + Otherwise, one can not change the structure of the list items. + + If the order of the items does not matter, and each item has a unique key that is a string, consider representing the list as a dictionary instead. + If the order of the items needs to be preserved, return a list of records. + + - Streaming JSON should return records. + + An example of a streaming JSON format is [JSON lines](https://jsonlines.org/), where each line represents a JSON value. + These JSON values can be considered top-level values or list items, and they must be records. + +### Examples + +This is bad, because all keys must be assumed to be store types: + +```json +{ + "local": { ... }, + "remote": { ... }, + "http": { ... } +} +``` + +This is good, because the it is extensible at the root, and is somewhat self-documenting: + +```json +{ + "storeTypes": { "local": { ... }, ... }, + "pluginSupport": true +} +``` + +While the dictionary of store types seems like a very complete response at first, a use case may arise that warrants returning additional information. +For example, the presence of plugin support may be crucial information for a client to proceed when their desired store type is missing. + + + +The following representation is bad because it is not extensible: + +```json +{ "outputs": [ "out" "bin" ] } +``` + +However, simply converting everything to records is not enough, because the order of outputs must be preserved: + +```json +{ "outputs": { "bin": {}, "out": {} } } +``` + +The first item is the default output. Deriving this information from the outputs ordering is not great, but this is how Nix currently happens to work. +While it is possible for a JSON parser to preserve the order of fields, we can not rely on this capability to be present in all JSON libraries. + +This representation is extensible and preserves the ordering: + +```json +{ "outputs": [ { "outputName": "out" }, { "outputName": "bin" } ] } +``` + +## Self-describing values + +As described in the previous section, it's crucial that schemas can be extended with with new fields without breaking compatibility. +However, that should *not* mean we use the presence/absence of fields to indicate optional information *within* a version of the schema. +Instead, always include the field, and use `null` to indicate the "nothing" case. + +### Examples + +Here are two JSON objects: + +```json +{ + "foo": {} +} +``` +```json +{ + "foo": {}, + "bar": {} +} +``` + +Since they differ in which fields they contain, they should *not* both be valid values of the same schema. +At most, they can match two different schemas where the second (with `foo` and `bar`) is considered a newer version of the first (with just `foo`). +Within each version, all fields are mandatory (always `foo`, and always `foo` and `bar`). +Only *between* each version, `bar` gets added as a new mandatory field. + +Here are another two JSON objects: + +```json +{ "foo": null } +``` +```json +{ "foo": { "bar": 1 } } +``` + +Since they both contain a `foo` field, they could be valid values of the same schema. +The schema would have `foo` has an optional field, which is either `null` or an object where `bar` is an integer. diff --git a/doc/manual/src/contributing/testing.md b/doc/manual/src/contributing/testing.md index 31c39c16c..88b3c5cd9 100644 --- a/doc/manual/src/contributing/testing.md +++ b/doc/manual/src/contributing/testing.md @@ -60,7 +60,7 @@ The unit tests are defined using the [googletest] and [rapidcheck] frameworks. > ``` The tests for each Nix library (`libnixexpr`, `libnixstore`, etc..) live inside a directory `tests/unit/${library_name_without-nix}`. -Given a interface (header) and implementation pair in the original library, say, `src/libexpr/value/context.{hh,cc}`, we write tests for it in `tests/unit/libexpr/tests/value/context.cc`, and (possibly) declare/define additional interfaces for testing purposes in `tests/unit/libexpr-support/tests/value/context.{hh,cc}`. +Given an interface (header) and implementation pair in the original library, say, `src/libexpr/value/context.{hh,cc}`, we write tests for it in `tests/unit/libexpr/tests/value/context.cc`, and (possibly) declare/define additional interfaces for testing purposes in `tests/unit/libexpr-support/tests/value/context.{hh,cc}`. Data for unit tests is stored in a `data` subdir of the directory for each unit test executable. For example, `libnixstore` code is in `src/libstore`, and its test data is in `tests/unit/libstore/data`. @@ -162,14 +162,14 @@ ran test tests/functional/${testName}.sh... [PASS] or without `make`: ```shell-session -$ ./mk/run-test.sh tests/functional/${testName}.sh tests/functional/init.sh +$ ./mk/run-test.sh tests/functional/${testName}.sh ran test tests/functional/${testName}.sh... [PASS] ``` To see the complete output, one can also run: ```shell-session -$ ./mk/debug-test.sh tests/functional/${testName}.sh tests/functional/init.sh +$ ./mk/debug-test.sh tests/functional/${testName}.sh +(${testName}.sh:1) foo output from foo +(${testName}.sh:2) bar @@ -204,7 +204,7 @@ edit it like so: Then, running the test with `./mk/debug-test.sh` will drop you into GDB once the script reaches that point: ```shell-session -$ ./mk/debug-test.sh tests/functional/${testName}.sh tests/functional/init.sh +$ ./mk/debug-test.sh tests/functional/${testName}.sh ... + gdb blash blub GNU gdb (GDB) 12.1 diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md index cbffda187..55ad9e1c2 100644 --- a/doc/manual/src/glossary.md +++ b/doc/manual/src/glossary.md @@ -1,5 +1,24 @@ # Glossary +- [content address]{#gloss-content-address} + + A + [*content address*](https://en.wikipedia.org/wiki/Content-addressable_storage) + is a secure way to reference immutable data. + The reference is calculated directly from the content of the data being referenced, which means the reference is + [*tamper proof*](https://en.wikipedia.org/wiki/Tamperproofing) + --- variations of the data should always calculate to distinct content addresses. + + For how Nix uses content addresses, see: + + - [Content-Addressing File System Objects](@docroot@/store/file-system-object/content-address.md) + - [Content-Addressing Store Objects](@docroot@/store/store-object/content-address.md) + - [content-addressed derivation](#gloss-content-addressed-derivation) + + Software Heritage's writing on [*Intrinsic and Extrinsic identifiers*](https://www.softwareheritage.org/2020/07/09/intrinsic-vs-extrinsic-identifiers) is also a good introduction to the value of content-addressing over other referencing schemes. + + Besides content addressing, the Nix store also uses [input addressing](#gloss-input-addressed-store-object). + - [derivation]{#gloss-derivation} A description of a build task. The result of a derivation is a @@ -118,9 +137,12 @@ - [content-addressed store object]{#gloss-content-addressed-store-object} - A [store object] whose [store path] is determined by its contents. + A [store object] which is [content-addressed](#gloss-content-address), + i.e. whose [store path] is determined by its contents. This includes derivations, the outputs of [content-addressed derivations](#gloss-content-addressed-derivation), and the outputs of [fixed-output derivations](#gloss-fixed-output-derivation). + See [Content-Addressing Store Objects](@docroot@/store/store-object/content-address.md) for details. + - [substitute]{#gloss-substitute} A substitute is a command invocation stored in the [Nix database] that @@ -266,13 +288,15 @@ See [installables](./command-ref/new-cli/nix.md#installables) for [`nix` commands](./command-ref/new-cli/nix.md) (experimental) for details. -- [NAR]{#gloss-nar} +- [Nix Archive (NAR)]{#gloss-nar} A *N*ix *AR*chive. This is a serialisation of a path in the Nix store. It can contain regular files, directories and symbolic links. NARs are generated and unpacked using `nix-store --dump` and `nix-store --restore`. + See [Nix Archive](store/file-system-object/content-address.html#serial-nix-archive) for details. + - [`∅`]{#gloss-emtpy-set} The empty set symbol. In the context of profile history, this denotes a package is not present in a particular version of the profile. diff --git a/doc/manual/src/language/advanced-attributes.md b/doc/manual/src/language/advanced-attributes.md index 1fcc5a95b..113062db1 100644 --- a/doc/manual/src/language/advanced-attributes.md +++ b/doc/manual/src/language/advanced-attributes.md @@ -197,33 +197,40 @@ Derivations can declare some infrequently used optional attributes. `outputHashAlgo` can only be `null` when `outputHash` follows the SRI format. The `outputHashMode` attribute determines how the hash is computed. - It must be one of the following two values: + It must be one of the following values: - - `"flat"`\ - The output must be a non-executable regular file. If it isn’t, - the build fails. The hash is simply computed over the contents - of that file (so it’s equal to what Unix commands like - `sha256sum` or `sha1sum` produce). + - [`"flat"`](@docroot@/store/store-object/content-address.md#method-flat) This is the default. - - `"recursive"` or `"nar"`\ - The hash is computed over the [NAR archive](@docroot@/glossary.md#gloss-nar) dump of the output - (i.e., the result of [`nix-store --dump`](@docroot@/command-ref/nix-store/dump.md)). In - this case, the output can be anything, including a directory - tree. + - [`"recursive"` or `"nar"`](@docroot@/store/store-object/content-address.md#method-nix-archive) - `"recursive"` is the traditional way of indicating this, - and is supported since 2005 (virtually the entire history of Nix). - `"nar"` is more clear, and consistent with other parts of Nix (such as the CLI), - however support for it is only added in Nix version 2.21. + > **Compatibility** + > + > `"recursive"` is the traditional way of indicating this, + > and is supported since 2005 (virtually the entire history of Nix). + > `"nar"` is more clear, and consistent with other parts of Nix (such as the CLI), + > however support for it is only added in Nix version 2.21. + + - [`"text"`](@docroot@/store/store-object/content-address.md#method-text) + + > **Warning** + > + > The use of this method for derivation outputs is part of the [`dynamic-derivations`][xp-feature-dynamic-derivations] experimental feature. + + - [`"git"`](@docroot@/store/store-object/content-address.md#method-git) + + > **Warning** + > + > This method is part of the [`git-hashing`][xp-feature-git-hashing] experimental feature. - [`__contentAddressed`]{#adv-attr-__contentAddressed} + > **Warning** > This attribute is part of an [experimental feature](@docroot@/contributing/experimental-features.md). > > To use this attribute, you must enable the - > [`ca-derivations`](@docroot@/contributing/experimental-features.md#xp-feature-ca-derivations) experimental feature. + > [`ca-derivations`][xp-feature-ca-derivations] experimental feature. > For example, in [nix.conf](../command-ref/conf-file.md) you could add: > > ``` @@ -303,7 +310,7 @@ Derivations can declare some infrequently used optional attributes. [`disallowedReferences`](#adv-attr-disallowedReferences) and [`disallowedRequisites`](#adv-attr-disallowedRequisites), the following attributes are available: - - `maxSize` defines the maximum size of the resulting [store object](@docroot@/glossary.md#gloss-store-object). + - `maxSize` defines the maximum size of the resulting [store object](@docroot@/store/store-object.md). - `maxClosureSize` defines the maximum size of the output's closure. - `ignoreSelfRefs` controls whether self-references should be considered when checking for allowed references/requisites. @@ -355,3 +362,7 @@ Derivations can declare some infrequently used optional attributes. ``` ensures that the derivation can only be built on a machine with the `kvm` feature. + +[xp-feature-ca-derivations]: @docroot@/contributing/experimental-features.md#xp-feature-ca-derivations +[xp-feature-dynamic-derivations]: @docroot@/contributing/experimental-features.md#xp-feature-dynamic-derivations +[xp-feature-git-hashing]: @docroot@/contributing/experimental-features.md#xp-feature-git-hashing diff --git a/doc/manual/src/language/constructs.md b/doc/manual/src/language/constructs.md index 4d75ea82c..491d221b3 100644 --- a/doc/manual/src/language/constructs.md +++ b/doc/manual/src/language/constructs.md @@ -414,12 +414,62 @@ Does evaluate to `"inner"`. ## Comments -Comments can be single-line, started with a `#` character, or -inline/multi-line, enclosed within `/* ... */`. +- Inline comments start with `#` and run until the end of the line. -`#` comments last until the end of the line. + > **Example** + > + > ```nix + > # A number + > 2 # Equals 1 + 1 + > ``` + > + > ```console + > 2 + > ``` -`/*` comments run until the next occurrence of `*/`; this cannot be escaped. +- Block comments start with `/*` and run until the next occurrence of `*/`. + + > **Example** + > + > ```nix + > /* + > Block comments + > can span multiple lines. + > */ "hello" + > ``` + > + > ```console + > "hello" + > ``` + + This means that block comments cannot be nested. + + > **Example** + > + > ```nix + > /* /* nope */ */ 1 + > ``` + > + > ```console + > error: syntax error, unexpected '*' + > + > at «string»:1:15: + > + > 1| /* /* nope */ * + > | ^ + > ``` + + Consider escaping nested comments and unescaping them in post-processing. + + > **Example** + > + > ```nix + > /* /* nested *\/ */ 1 + > ``` + > + > ```console + > 1 + > ``` ## Scoping rules @@ -432,6 +482,5 @@ Nix is [statically scoped](https://en.wikipedia.org/wiki/Scope_(computer_science * secondary scope --- implicitly-bound variables * [`with`](#with-expressions) - Primary scope takes precedence over secondary scope. See [`with`](#with-expressions) for a detailed example. diff --git a/doc/manual/src/language/derivations.md b/doc/manual/src/language/derivations.md index 75f824a34..b95900cdd 100644 --- a/doc/manual/src/language/derivations.md +++ b/doc/manual/src/language/derivations.md @@ -17,7 +17,7 @@ It outputs an attribute set, and produces a [store derivation] as a side effect A symbolic name for the derivation. It is added to the [store path] of the corresponding [store derivation] as well as to its [output paths](@docroot@/glossary.md#gloss-output-path). - [store path]: @docroot@/glossary.md#gloss-store-path + [store path]: @docroot@/store/store-path.md > **Example** > @@ -141,7 +141,7 @@ It outputs an attribute set, and produces a [store derivation] as a side effect By default, a derivation produces a single output called `out`. However, derivations can produce multiple outputs. - This allows the associated [store objects](@docroot@/glossary.md#gloss-store-object) and their [closures](@docroot@/glossary.md#gloss-closure) to be copied or garbage-collected separately. + This allows the associated [store objects](@docroot@/store/store-object.md) and their [closures](@docroot@/glossary.md#gloss-closure) to be copied or garbage-collected separately. > **Example** > diff --git a/doc/manual/src/language/import-from-derivation.md b/doc/manual/src/language/import-from-derivation.md index fb12ba51a..e901f5bcf 100644 --- a/doc/manual/src/language/import-from-derivation.md +++ b/doc/manual/src/language/import-from-derivation.md @@ -2,9 +2,9 @@ The value of a Nix expression can depend on the contents of a [store object]. -[store object]: @docroot@/glossary.md#gloss-store-object +[store object]: @docroot@/store/store-object.md -Passing an expression `expr` that evaluates to a [store path](@docroot@/glossary.md#gloss-store-path) to any built-in function which reads from the filesystem constitutes Import From Derivation (IFD): +Passing an expression `expr` that evaluates to a [store path](@docroot@/store/store-path.md) to any built-in function which reads from the filesystem constitutes Import From Derivation (IFD): - [`import`](./builtins.md#builtins-import)` expr` - [`builtins.readFile`](./builtins.md#builtins-readFile)` expr` diff --git a/doc/manual/src/language/operators.md b/doc/manual/src/language/operators.md index 698fed47e..9e5ab52a2 100644 --- a/doc/manual/src/language/operators.md +++ b/doc/manual/src/language/operators.md @@ -128,7 +128,7 @@ The result is a string. > The file or directory at *path* must exist and is copied to the [store]. > The path appears in the result as the corresponding [store path]. -[store path]: @docroot@/glossary.md#gloss-store-path +[store path]: @docroot@/store/store-path.md [store]: @docroot@/glossary.md#gloss-store [String and path concatenation]: #string-and-path-concatenation @@ -141,7 +141,7 @@ The result is a string. Update [attribute set] *attrset1* with names and values from *attrset2*. -The returned attribute set will have of all the attributes in *attrset1* and *attrset2*. +The returned attribute set will have all of the attributes in *attrset1* and *attrset2*. If an attribute name is present in both, the attribute value from the latter is taken. [Update]: #update diff --git a/doc/manual/src/language/string-interpolation.md b/doc/manual/src/language/string-interpolation.md index 1f8fecca8..1e2c4ad95 100644 --- a/doc/manual/src/language/string-interpolation.md +++ b/doc/manual/src/language/string-interpolation.md @@ -107,9 +107,9 @@ An expression that is interpolated must evaluate to one of the following: A string interpolates to itself. -A path in an interpolated expression is first copied into the Nix store, and the resulting string is the [store path] of the newly created [store object](@docroot@/glossary.md#gloss-store-object). +A path in an interpolated expression is first copied into the Nix store, and the resulting string is the [store path] of the newly created [store object](@docroot@/store/store-object.md). -[store path]: @docroot@/glossary.md#gloss-store-path +[store path]: @docroot@/store/store-path.md > **Example** > diff --git a/doc/manual/src/language/values.md b/doc/manual/src/language/values.md index 2dd52b379..4eb1887fa 100644 --- a/doc/manual/src/language/values.md +++ b/doc/manual/src/language/values.md @@ -124,7 +124,7 @@ For example, assume you used a file path in an interpolated string during a `nix repl` session. Later in the same session, after having changed the file contents, evaluating the interpolated string with the file path again might not return a new [store path], since Nix might not re-read the file contents. Use `:r` to reset the repl as needed. - [store path]: @docroot@/glossary.md#gloss-store-path + [store path]: @docroot@/store/store-path.md Path literals can also include [string interpolation], besides being [interpolated into other expressions]. diff --git a/doc/manual/src/package-management/copy-closure.md b/doc/manual/src/package-management/copy-closure.md deleted file mode 100644 index 14326298b..000000000 --- a/doc/manual/src/package-management/copy-closure.md +++ /dev/null @@ -1,34 +0,0 @@ -# Copying Closures via SSH - -The command `nix-copy-closure` copies a Nix store path along with all -its dependencies to or from another machine via the SSH protocol. It -doesn’t copy store paths that are already present on the target machine. -For example, the following command copies Firefox with all its -dependencies: - - $ nix-copy-closure --to alice@itchy.example.org $(type -p firefox) - -See the [manpage for `nix-copy-closure`](../command-ref/nix-copy-closure.md) for details. - -With `nix-store ---export` and `nix-store --import` you can write the closure of a store -path (that is, the path and all its dependencies) to a file, and then -unpack that file into another Nix store. For example, - - $ nix-store --export $(nix-store --query --requisites $(type -p firefox)) > firefox.closure - -writes the closure of Firefox to a file. You can then copy this file to -another machine and install the closure: - - $ nix-store --import < firefox.closure - -Any store paths in the closure that are already present in the target -store are ignored. It is also possible to pipe the export into another -command, e.g. to copy and install a closure directly to/on another -machine: - - $ nix-store --export $(nix-store --query --requisites $(type -p firefox)) | bzip2 | \ - ssh alice@itchy.example.org "bunzip2 | nix-store --import" - -However, `nix-copy-closure` is generally more efficient because it only -copies paths that are not already present in the target Nix store. diff --git a/doc/manual/src/protocols/json/derivation.md b/doc/manual/src/protocols/json/derivation.md index 649d543cc..f881dd703 100644 --- a/doc/manual/src/protocols/json/derivation.md +++ b/doc/manual/src/protocols/json/derivation.md @@ -18,10 +18,30 @@ is a JSON object with the following fields: Information about the output paths of the derivation. This is a JSON object with one member per output, where the key is the output name and the value is a JSON object with these fields: - * `path`: The output path. + * `path`: + The output path, if it is known in advanced. + Otherwise, `null`. + + + * `method`: + For an output which will be [content addresed], a string representing the [method](@docroot@/store/store-object/content-address.md) of content addressing that is chosen. + Valid method strings are: + + - [`flat`](@docroot@/store/store-object/content-address.md#method-flat) + - [`nar`](@docroot@/store/store-object/content-address.md#method-nix-archive) + - [`text`](@docroot@/store/store-object/content-address.md#method-text) + - [`git`](@docroot@/store/store-object/content-address.md#method-git) + + Otherwise, `null`. * `hashAlgo`: - For fixed-output derivations, the hashing algorithm (e.g. `sha256`), optionally prefixed by `r:` if `hash` denotes a NAR hash rather than a flat file hash. + For an output which will be [content addresed], the name of the hash algorithm used. + Valid algorithm strings are: + + - `md5` + - `sha1` + - `sha256` + - `sha512` * `hash`: For fixed-output derivations, the expected content hash in base-16. @@ -32,7 +52,8 @@ is a JSON object with the following fields: > "outputs": { > "out": { > "path": "/nix/store/2543j7c6jn75blc3drf4g5vhb1rhdq29-source", - > "hashAlgo": "r:sha256", + > "method": "nar", + > "hashAlgo": "sha256", > "hash": "6fc80dcc62179dbc12fc0b5881275898f93444833d21b89dfe5f7fbcbb1d0d62" > } > } diff --git a/doc/manual/src/protocols/json/store-object-info.md b/doc/manual/src/protocols/json/store-object-info.md index 179cafbb4..9f647a96c 100644 --- a/doc/manual/src/protocols/json/store-object-info.md +++ b/doc/manual/src/protocols/json/store-object-info.md @@ -24,41 +24,45 @@ Info about a [store object]. An array of [store paths][store path], possibly including this one. -* `ca` (optional): +* `ca`: - Content address of this store object's file system object, used to compute its store path. + If the store object is [content-addressed], + this is the content address of this store object's file system object, used to compute its store path. + Otherwise (i.e. if it is [input-addressed]), this is `null`. -[store path]: @docroot@/glossary.md#gloss-store-path +[store path]: @docroot@/store/store-path.md [file system object]: @docroot@/store/file-system-object.md -[Nix Archive]: @docroot@/glossary.md#gloss-nar +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive ## Impure fields These are not intrinsic properties of the store object. In other words, the same store object residing in different store could have different values for these properties. -* `deriver` (optional): +* `deriver`: - The path to the [derivation] from which this store object is produced. + If known, the path to the [derivation] from which this store object was produced. + Otherwise `null`. [derivation]: @docroot@/glossary.md#gloss-store-derivation * `registrationTime` (optional): - When this derivation was added to the store. + If known, when this derivation was added to the store. + Otherwise `null`. -* `ultimate` (optional): +* `ultimate`: Whether this store object is trusted because we built it ourselves, rather than substituted a build product from elsewhere. -* `signatures` (optional): +* `signatures`: Signatures claiming that this store object is what it claims to be. Not relevant for [content-addressed] store objects, but useful for [input-addressed] store objects. - [content-addressed]: @docroot@/glossary.md#gloss-content-addressed-store-object - [input-addressed]: @docroot@/glossary.md#gloss-input-addressed-store-object +[content-addressed]: @docroot@/store/store-object/content-address.md +[input-addressed]: @docroot@/glossary.md#gloss-input-addressed-store-object ### `.narinfo` extra fields diff --git a/doc/manual/src/protocols/nix-archive.md b/doc/manual/src/protocols/nix-archive.md index 4fb6282ee..bfc523b3d 100644 --- a/doc/manual/src/protocols/nix-archive.md +++ b/doc/manual/src/protocols/nix-archive.md @@ -1,9 +1,10 @@ # Nix Archive (NAR) format -This is the complete specification of the Nix Archive format. +This is the complete specification of the [Nix Archive] format. The Nix Archive format closely follows the abstract specification of a [file system object] tree, because it is designed to serialize exactly that data structure. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#nix-archive [file system object]: @docroot@/store/file-system-object.md The format of this specification is close to [Extended Backus–Naur form](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form), with the exception of the `str(..)` function / parameterized rule, which length-prefixes and pads strings. diff --git a/doc/manual/src/protocols/store-path.md b/doc/manual/src/protocols/store-path.md index 565c4fa75..52352d358 100644 --- a/doc/manual/src/protocols/store-path.md +++ b/doc/manual/src/protocols/store-path.md @@ -1,12 +1,14 @@ # Complete Store Path Calculation -This is the complete specification for how store paths are calculated. +This is the complete specification for how [store path]s are calculated. The format of this specification is close to [Extended Backus–Naur form](https://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_form), but must deviate for a few things such as hash functions which we treat as bidirectional for specification purposes. Regular users do *not* need to know this information --- store paths can be treated as black boxes computed from the properties of the store objects they refer to. But for those interested in exactly how Nix works, e.g. if they are reimplementing it, this information can be useful. +[store path](@docroot@/store/store-path.md) + ## Store path proper ```ebnf @@ -34,18 +36,23 @@ where - `type` = one of: - ```ebnf - | "text" ( ":" store-path )* + | "text" { ":" store-path } ``` - for encoded derivations written to the store. + This is for the + ["Text"](@docroot@/store/store-object/content-address.md#method-text) + method of content addressing store objects. The optional trailing store paths are the references of the store object. - ```ebnf - | "source" ( ":" store-path )* + | "source" { ":" store-path } [ ":self" ] ``` - For paths copied to the store and hashed via a [Nix Archive (NAR)] and [SHA-256][sha-256]. - Just like in the text case, we can have the store objects referenced by their paths. + This is for the + ["Nix Archive"](@docroot@/store/store-object/content-address.md#method-nix-archive) + method of content addressing store objects, + if the hash algorithm is [SHA-256]. + Just like in the "Text" case, we can have the store objects referenced by their paths. Additionally, we can have an optional `:self` label to denote self reference. - ```ebnf @@ -53,8 +60,12 @@ where ``` For either the outputs built from derivations, - paths copied to the store hashed that area single file hashed directly, or the via a hash algorithm other than [SHA-256][sha-256]. - (in that case "source" is used; this is only necessary for compatibility). + or content-addressed store objects that are not using one of the two above cases. + To be explicit about the latter, that is currently these methods: + + - ["Flat"](@docroot@/store/store-object/content-address.md#method-flat) + - ["Git"](@docroot@/store/store-object/content-address.md#method-git) + - ["Nix Archive"](@docroot@/store/store-object/content-address.md#method-nix-archive) if the hash algorithm is not [SHA-256]. `id` is the name of the output (usually, "out"). For content-addressed store objects, `id`, is always "out". @@ -113,8 +124,8 @@ where Note that `id` = `"out"`, regardless of the name part of the store path. Also note that NAR + SHA-256 must not use this case, and instead must use the `type` = `"source:" ...` case. -[Nix Archive (NAR)]: @docroot@/glossary.md#gloss-NAR -[sha-256]: https://en.m.wikipedia.org/wiki/SHA-256 +[Nix Archive (NAR)]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive +[SHA-256]: https://en.m.wikipedia.org/wiki/SHA-256 ### Historical Note diff --git a/doc/manual/src/protocols/tarball-fetcher.md b/doc/manual/src/protocols/tarball-fetcher.md index 274fa6d63..24ec7ae14 100644 --- a/doc/manual/src/protocols/tarball-fetcher.md +++ b/doc/manual/src/protocols/tarball-fetcher.md @@ -22,7 +22,7 @@ Link: ; rel="immutable" *flakeref* must be a tarball flakeref. It can contain the tarball flake attributes `narHash`, `rev`, `revCount` and `lastModified`. If `narHash` is included, its -value must be the NAR hash of the unpacked tarball (as computed via +value must be the [NAR hash][Nix Archive] of the unpacked tarball (as computed via `nix hash path`). Nix checks the contents of the returned tarball against the `narHash` attribute. The `rev` and `revCount` attributes are useful when the tarball flake is a mirror of a fetcher type that @@ -40,3 +40,5 @@ Link: **Warning** +> +> This method is part of the [`git-hashing`][xp-feature-git-hashing] experimental feature. + +Git's file system model is very close to Nix's, and so Git's content addressing method is a pretty good fit. +Just as with regular Git, files and symlinks are hashed as git "blobs", and directories are hashed as git "trees". + +However, one difference between Nix's and Git's file system model needs special treatment. +Plain files, executable files, and symlinks are not differentiated as distinctly addressable objects, but by their context: by the directory entry that refers to them. +That means so long as the root object is a directory, there is no problem: +every non-directory object is owned by a parent directory, and the entry that refers to it provides the missing information. +However, if the root object is not a directory, then we have no way of knowing which one of an executable file, non-executable file, or symlink it is supposed to be. + +In response to this, we have decided to treat a bare file as non-executable file. +This is similar to do what we do with [flat serialisation](#serial-flat), which also lacks this information. +To avoid an address collision, attempts to hash a bare executable file or symlink will result in an error (just as would happen for flat serialisation also). +Thus, Git can encode some, but not all of Nix's "File System Objects", and this sort of content-addressing is likewise partial. + +In the future, we may support a Git-like hash for such file system objects, or we may adopt another Merkle DAG format which is capable of representing all Nix file system objects. + +[file system object]: ../file-system-object.md +[store object]: ../store-object.md +[xp-feature-git-hashing]: @docroot@/contributing/experimental-features.md#xp-feature-git-hashing diff --git a/doc/manual/src/store/store-object/content-address.md b/doc/manual/src/store/store-object/content-address.md new file mode 100644 index 000000000..f6f982035 --- /dev/null +++ b/doc/manual/src/store/store-object/content-address.md @@ -0,0 +1,95 @@ +# Content-Addressing Store Objects + +Just [like][fso-ca] [File System Objects][File System Object], +[Store Objects][Store Object] can also be [content-addressed](@docroot@/glossary.md#gloss-content-addressed), +unless they are [input-addressed](@docroot@/glossary.md#gloss-input-addressed-store-object). + +For store objects, the content address we produce will take the form of a [Store Path] rather than regular hash. +In particular, the content-addressing scheme will ensure that the digest of the store path is solely computed from the + +- file system object graph (the root one and its children, if it has any) +- references +- [store directory](../store-path.md#store-directory) +- name + +of the store object, and not any other information, which would not be an intrinsic property of that store object. + +For the full specification of the algorithms involved, see the [specification of store path digests][sp-spec]. + +[File System Object]: ../file-system-object.md +[Store Object]: ../store-object.md +[Store Path]: ../store-path.md + +## Content addressing each part of a store object + +### File System Objects + +With all currently supported store object content addressing methods, the file system object is always [content-addressed][fso-ca] first, and then that hash is incorporated into content address computation for the store object. + +### References + +With all currently supported store object content addressing methods, +other objects are referred to by their regular (string-encoded-) [store paths][Store Path]. + +Self-references however cannot be referred to by their path, because we are in the midst of describing how to compute that path! + +> The alternative would require finding as hash function fixed point, i.e. the solution to an equation in the form +> ``` +> digest = hash(..... || digest || ....) +> ``` +> which is computationally infeasible. +> As far as we know, this is equivalent to finding a hash collision. + +Instead we just have a "has self reference" boolean, which will end up affecting the digest. + +### Name and Store Directory + +These two items affect the digest in a way that is standard for store path digest computations and not specific to content-addressing. +Consult the [specification of store path digests][sp-spec] for further details. + +## Content addressing Methods + +For historical reasons, we don't support all features in all combinations. +Each currently supported method of content addressing chooses a single method of file system object hashing, and may offer some restrictions on references. +The names and store directories are unrestricted however. + +### Flat { #method-flat } + +This uses the corresponding [Flat](../file-system-object/content-address.md#serial-flat) method of file system object content addressing. + +References are not supported: store objects with flat hashing *and* references can not be created. + +### Text { #method-text } + +This also uses the corresponding [Flat](../file-system-object/content-address.md#serial-flat) method of file system object content addressing. + +References to other store objects are supported, but self references are not. + +This is the only store-object content-addressing method that is not named identically with a corresponding file system object method. +It is somewhat obscure, mainly used for "drv files" +(derivations serialized as store objects in their ["ATerm" file format](@docroot@/protocols/derivation-aterm.md)). +Prefer another method if possible. + +### Nix Archive { #method-nix-archive } + +This uses the corresponding [Nix Archive](../file-system-object/content-address.md#serial-nix-archive) method of file system object content addressing. + +References (to other store objects and self references alike) are supported so long as the hash algorithm is SHA-256, but not (neither kind) otherwise. + +### Git { #method-git } + +> **Warning** +> +> This method is part of the [`git-hashing`][xp-feature-git-hashing] experimental feature. + +This uses the corresponding [Git](../file-system-object/content-address.md#serial-git) method of file system object content addressing. + +References are not supported. + +Only SHA-1 is supported at this time. +If [SHA-256-based Git](https://git-scm.com/docs/hash-function-transition) +becomes more widespread, this restriction will be revisited. + +[fso-ca]: ../file-system-object/content-address.md +[sp-spec]: @docroot@/protocols/store-path.md +[xp-feature-git-hashing]: @docroot@/contributing/experimental-features.md#xp-feature-git-hashing diff --git a/doc/manual/src/store/store-path.md b/doc/manual/src/store/store-path.md index 085aead51..beec2389b 100644 --- a/doc/manual/src/store/store-path.md +++ b/doc/manual/src/store/store-path.md @@ -1,5 +1,11 @@ # Store Path +> **Example** +> +> `/nix/store/a040m110amc4h71lds2jmr8qrkj2jhxd-git-2.38.1` +> +> A rendered store path + Nix implements references to [store objects](./index.md#store-object) as *store paths*. Think of a store path as an [opaque], [unique identifier]: @@ -37,6 +43,10 @@ A store path is rendered to a file system path as the concatenation of > store directory digest name > ``` +Exactly how the digest is calculated depends on the type of store path. +Store path digests are *supposed* to be opaque, and so for most operations, it is not necessary to know the details. +That said, the manual has a full [specification of store path digests](@docroot@/protocols/store-path.md). + ## Store Directory Every [Nix store](./index.md) has a store directory. diff --git a/flake.nix b/flake.nix index 987f25305..6fb159d6e 100644 --- a/flake.nix +++ b/flake.nix @@ -104,28 +104,6 @@ cross = forAllCrossSystems (crossSystem: make-pkgs crossSystem "stdenv"); }); - installScriptFor = tarballs: - nixpkgsFor.x86_64-linux.native.callPackage ./scripts/installer.nix { - inherit tarballs; - }; - - testNixVersions = pkgs: client: daemon: - pkgs.callPackage ./package.nix { - pname = - "nix-tests" - + lib.optionalString - (lib.versionAtLeast daemon.version "2.4pre20211005" && - lib.versionAtLeast client.version "2.4pre20211005") - "-${client.version}-against-${daemon.version}"; - - inherit fileset; - - test-client = client; - test-daemon = daemon; - - doBuild = false; - }; - binaryTarball = nix: pkgs: pkgs.callPackage ./scripts/binary-tarball.nix { inherit nix; }; @@ -232,156 +210,17 @@ # 'nix.perl-bindings' packages. overlays.default = overlayFor (p: p.stdenv); - hydraJobs = { - - # Binary package for various platforms. - build = forAllSystems (system: self.packages.${system}.nix); - - shellInputs = forAllSystems (system: self.devShells.${system}.default.inputDerivation); - - buildStatic = lib.genAttrs linux64BitSystems (system: self.packages.${system}.nix-static); - - buildCross = forAllCrossSystems (crossSystem: - lib.genAttrs ["x86_64-linux"] (system: self.packages.${system}."nix-${crossSystem}")); - - buildNoGc = forAllSystems (system: - self.packages.${system}.nix.override { enableGC = false; } - ); - - buildNoTests = forAllSystems (system: - self.packages.${system}.nix.override { - doCheck = false; - doInstallCheck = false; - installUnitTests = false; - } - ); - - # Toggles some settings for better coverage. Windows needs these - # library combinations, and Debian build Nix with GNU readline too. - buildReadlineNoMarkdown = forAllSystems (system: - self.packages.${system}.nix.override { - enableMarkdown = false; - readlineFlavor = "readline"; - } - ); - - # Perl bindings for various platforms. - perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nix.perl-bindings); - - # Binary tarball for various platforms, containing a Nix store - # with the closure of 'nix' package, and the second half of - # the installation script. - binaryTarball = forAllSystems (system: binaryTarball nixpkgsFor.${system}.native.nix nixpkgsFor.${system}.native); - - binaryTarballCross = lib.genAttrs ["x86_64-linux"] (system: - forAllCrossSystems (crossSystem: - binaryTarball - self.packages.${system}."nix-${crossSystem}" - nixpkgsFor.${system}.cross.${crossSystem})); - - # The first half of the installation script. This is uploaded - # to https://nixos.org/nix/install. It downloads the binary - # tarball for the user's system and calls the second half of the - # installation script. - installerScript = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."i686-linux" - self.hydraJobs.binaryTarball."aarch64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - self.hydraJobs.binaryTarball."aarch64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - installerScriptForGHA = installScriptFor [ - # Native - self.hydraJobs.binaryTarball."x86_64-linux" - self.hydraJobs.binaryTarball."x86_64-darwin" - # Cross - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv6l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."armv7l-unknown-linux-gnueabihf" - self.hydraJobs.binaryTarballCross."x86_64-linux"."riscv64-unknown-linux-gnu" - ]; - - # docker image with Nix inside - dockerImage = lib.genAttrs linux64BitSystems (system: self.packages.${system}.dockerImage); - - # Line coverage analysis. - coverage = nixpkgsFor.x86_64-linux.native.nix.override { - pname = "nix-coverage"; - withCoverageChecks = true; - }; - - # API docs for Nix's unstable internal C++ interfaces. - internal-api-docs = nixpkgsFor.x86_64-linux.native.callPackage ./package.nix { - inherit fileset; - doBuild = false; - enableInternalAPIDocs = true; - }; - - # API docs for Nix's C bindings. - external-api-docs = nixpkgsFor.x86_64-linux.native.callPackage ./package.nix { - inherit fileset; - doBuild = false; - enableExternalAPIDocs = true; - }; - - # System tests. - tests = import ./tests/nixos { inherit lib nixpkgs nixpkgsFor; } // { - - # Make sure that nix-env still produces the exact same result - # on a particular version of Nixpkgs. - evalNixpkgs = - let - inherit (nixpkgsFor.x86_64-linux.native) runCommand nix; - in - runCommand "eval-nixos" { buildInputs = [ nix ]; } - '' - type -p nix-env - # Note: we're filtering out nixos-install-tools because https://github.com/NixOS/nixpkgs/pull/153594#issuecomment-1020530593. - ( - set -x - time nix-env --store dummy:// -f ${nixpkgs-regression} -qaP --drv-path | sort | grep -v nixos-install-tools > packages - [[ $(sha1sum < packages | cut -c1-40) = e01b031fc9785a572a38be6bc473957e3b6faad7 ]] - ) - mkdir $out - ''; - - nixpkgsLibTests = - forAllSystems (system: - import (nixpkgs + "/lib/tests/release.nix") - { pkgs = nixpkgsFor.${system}.native; - nixVersions = [ self.packages.${system}.nix ]; - } - ); - }; - - metrics.nixpkgs = import "${nixpkgs-regression}/pkgs/top-level/metrics.nix" { - pkgs = nixpkgsFor.x86_64-linux.native; - nixpkgs = nixpkgs-regression; - }; - - installTests = forAllSystems (system: - let pkgs = nixpkgsFor.${system}.native; in - pkgs.runCommand "install-tests" { - againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix; - againstCurrentUnstable = - # FIXME: temporarily disable this on macOS because of #3605. - if system == "x86_64-linux" - then testNixVersions pkgs pkgs.nix pkgs.nixUnstable - else null; - # Disabled because the latest stable version doesn't handle - # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work - # againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable; - } "touch $out"); - - installerTests = import ./tests/installer { - binaryTarballs = self.hydraJobs.binaryTarball; - inherit nixpkgsFor; - }; - + hydraJobs = import ./build/hydra.nix { + inherit + inputs + binaryTarball + forAllCrossSystems + forAllSystems + lib + linux64BitSystems + nixpkgsFor + self + ; }; checks = forAllSystems (system: { @@ -393,6 +232,7 @@ in pkgs.buildPackages.runCommand "test-rl-next-release-notes" { } '' LANG=C.UTF-8 ${pkgs.changelog-d-nix}/bin/changelog-d ${./doc/manual/rl-next} >$out ''; + repl-completion = nixpkgsFor.${system}.native.callPackage ./tests/repl-completion.nix { }; } // (lib.optionalAttrs (builtins.elem system linux64BitSystems)) { dockerImage = self.hydraJobs.dockerImage.${system}; } // (lib.optionalAttrs (!(builtins.elem system linux32BitSystems))) { diff --git a/maintainers/flake-module.nix b/maintainers/flake-module.nix index 351a01fcb..3006d5e30 100644 --- a/maintainers/flake-module.nix +++ b/maintainers/flake-module.nix @@ -10,425 +10,709 @@ # https://flake.parts/options/pre-commit-hooks-nix.html#options pre-commit.settings = { hooks = { - clang-format.enable = true; + clang-format = { + enable = true; + excludes = [ + # We don't want to format test data + # ''tests/(?!nixos/).*\.nix'' + ''^tests/unit/[^/]*/data/.*$'' + + # Don't format vendored code + ''^src/toml11/.*'' + ''^doc/manual/redirects\.js$'' + ''^doc/manual/theme/highlight\.js$'' + + # We haven't applied formatting to these files yet + ''^doc/manual/redirects\.js$'' + ''^doc/manual/theme/highlight\.js$'' + ''^precompiled-headers\.h$'' + ''^src/build-remote/build-remote\.cc$'' + ''^src/libcmd/built-path\.cc$'' + ''^src/libcmd/built-path\.hh$'' + ''^src/libcmd/command\.cc$'' + ''^src/libcmd/command\.hh$'' + ''^src/libcmd/common-eval-args\.cc$'' + ''^src/libcmd/common-eval-args\.hh$'' + ''^src/libcmd/editor-for\.cc$'' + ''^src/libcmd/installable-attr-path\.cc$'' + ''^src/libcmd/installable-attr-path\.hh$'' + ''^src/libcmd/installable-derived-path\.cc$'' + ''^src/libcmd/installable-derived-path\.hh$'' + ''^src/libcmd/installable-flake\.cc$'' + ''^src/libcmd/installable-flake\.hh$'' + ''^src/libcmd/installable-value\.cc$'' + ''^src/libcmd/installable-value\.hh$'' + ''^src/libcmd/installables\.cc$'' + ''^src/libcmd/installables\.hh$'' + ''^src/libcmd/legacy\.hh$'' + ''^src/libcmd/markdown\.cc$'' + ''^src/libcmd/misc-store-flags\.cc$'' + ''^src/libcmd/repl-interacter\.cc$'' + ''^src/libcmd/repl-interacter\.hh$'' + ''^src/libcmd/repl\.cc$'' + ''^src/libcmd/repl\.hh$'' + ''^src/libexpr-c/nix_api_expr\.cc$'' + ''^src/libexpr-c/nix_api_external\.cc$'' + ''^src/libexpr/attr-path\.cc$'' + ''^src/libexpr/attr-path\.hh$'' + ''^src/libexpr/attr-set\.cc$'' + ''^src/libexpr/attr-set\.hh$'' + ''^src/libexpr/eval-cache\.cc$'' + ''^src/libexpr/eval-cache\.hh$'' + ''^src/libexpr/eval-error\.cc$'' + ''^src/libexpr/eval-inline\.hh$'' + ''^src/libexpr/eval-settings\.cc$'' + ''^src/libexpr/eval-settings\.hh$'' + ''^src/libexpr/eval\.cc$'' + ''^src/libexpr/eval\.hh$'' + ''^src/libexpr/flake/config\.cc$'' + ''^src/libexpr/flake/flake\.cc$'' + ''^src/libexpr/flake/flake\.hh$'' + ''^src/libexpr/flake/flakeref\.cc$'' + ''^src/libexpr/flake/flakeref\.hh$'' + ''^src/libexpr/flake/lockfile\.cc$'' + ''^src/libexpr/flake/lockfile\.hh$'' + ''^src/libexpr/flake/url-name\.cc$'' + ''^src/libexpr/function-trace\.cc$'' + ''^src/libexpr/gc-small-vector\.hh$'' + ''^src/libexpr/get-drvs\.cc$'' + ''^src/libexpr/get-drvs\.hh$'' + ''^src/libexpr/json-to-value\.cc$'' + ''^src/libexpr/nixexpr\.cc$'' + ''^src/libexpr/nixexpr\.hh$'' + ''^src/libexpr/parser-state\.hh$'' + ''^src/libexpr/pos-table\.hh$'' + ''^src/libexpr/primops\.cc$'' + ''^src/libexpr/primops\.hh$'' + ''^src/libexpr/primops/context\.cc$'' + ''^src/libexpr/primops/fetchClosure\.cc$'' + ''^src/libexpr/primops/fetchMercurial\.cc$'' + ''^src/libexpr/primops/fetchTree\.cc$'' + ''^src/libexpr/primops/fromTOML\.cc$'' + ''^src/libexpr/print-ambiguous\.cc$'' + ''^src/libexpr/print-ambiguous\.hh$'' + ''^src/libexpr/print-options\.hh$'' + ''^src/libexpr/print\.cc$'' + ''^src/libexpr/print\.hh$'' + ''^src/libexpr/search-path\.cc$'' + ''^src/libexpr/symbol-table\.hh$'' + ''^src/libexpr/value-to-json\.cc$'' + ''^src/libexpr/value-to-json\.hh$'' + ''^src/libexpr/value-to-xml\.cc$'' + ''^src/libexpr/value-to-xml\.hh$'' + ''^src/libexpr/value\.hh$'' + ''^src/libexpr/value/context\.cc$'' + ''^src/libexpr/value/context\.hh$'' + ''^src/libfetchers/attrs\.cc$'' + ''^src/libfetchers/cache\.cc$'' + ''^src/libfetchers/cache\.hh$'' + ''^src/libfetchers/fetch-settings\.cc$'' + ''^src/libfetchers/fetch-settings\.hh$'' + ''^src/libfetchers/fetch-to-store\.cc$'' + ''^src/libfetchers/fetchers\.cc$'' + ''^src/libfetchers/fetchers\.hh$'' + ''^src/libfetchers/filtering-source-accessor\.cc$'' + ''^src/libfetchers/filtering-source-accessor\.hh$'' + ''^src/libfetchers/fs-source-accessor\.cc$'' + ''^src/libfetchers/fs-source-accessor\.hh$'' + ''^src/libfetchers/git-utils\.cc$'' + ''^src/libfetchers/git-utils\.hh$'' + ''^src/libfetchers/github\.cc$'' + ''^src/libfetchers/indirect\.cc$'' + ''^src/libfetchers/memory-source-accessor\.cc$'' + ''^src/libfetchers/path\.cc$'' + ''^src/libfetchers/registry\.cc$'' + ''^src/libfetchers/registry\.hh$'' + ''^src/libfetchers/tarball\.cc$'' + ''^src/libfetchers/tarball\.hh$'' + ''^src/libfetchers/git\.cc$'' + ''^src/libfetchers/mercurial\.cc$'' + ''^src/libmain/common-args\.cc$'' + ''^src/libmain/common-args\.hh$'' + ''^src/libmain/loggers\.cc$'' + ''^src/libmain/loggers\.hh$'' + ''^src/libmain/progress-bar\.cc$'' + ''^src/libmain/shared\.cc$'' + ''^src/libmain/shared\.hh$'' + ''^src/libmain/unix/stack\.cc$'' + ''^src/libstore/binary-cache-store\.cc$'' + ''^src/libstore/binary-cache-store\.hh$'' + ''^src/libstore/build-result\.hh$'' + ''^src/libstore/builtins\.hh$'' + ''^src/libstore/builtins/buildenv\.cc$'' + ''^src/libstore/builtins/buildenv\.hh$'' + ''^src/libstore/common-protocol-impl\.hh$'' + ''^src/libstore/common-protocol\.cc$'' + ''^src/libstore/common-protocol\.hh$'' + ''^src/libstore/content-address\.cc$'' + ''^src/libstore/content-address\.hh$'' + ''^src/libstore/daemon\.cc$'' + ''^src/libstore/daemon\.hh$'' + ''^src/libstore/derivations\.cc$'' + ''^src/libstore/derivations\.hh$'' + ''^src/libstore/derived-path-map\.cc$'' + ''^src/libstore/derived-path-map\.hh$'' + ''^src/libstore/derived-path\.cc$'' + ''^src/libstore/derived-path\.hh$'' + ''^src/libstore/downstream-placeholder\.cc$'' + ''^src/libstore/downstream-placeholder\.hh$'' + ''^src/libstore/dummy-store\.cc$'' + ''^src/libstore/export-import\.cc$'' + ''^src/libstore/filetransfer\.cc$'' + ''^src/libstore/filetransfer\.hh$'' + ''^src/libstore/gc-store\.hh$'' + ''^src/libstore/globals\.cc$'' + ''^src/libstore/globals\.hh$'' + ''^src/libstore/http-binary-cache-store\.cc$'' + ''^src/libstore/legacy-ssh-store\.cc$'' + ''^src/libstore/legacy-ssh-store\.hh$'' + ''^src/libstore/length-prefixed-protocol-helper\.hh$'' + ''^src/libstore/linux/personality\.cc$'' + ''^src/libstore/linux/personality\.hh$'' + ''^src/libstore/local-binary-cache-store\.cc$'' + ''^src/libstore/local-fs-store\.cc$'' + ''^src/libstore/local-fs-store\.hh$'' + ''^src/libstore/log-store\.cc$'' + ''^src/libstore/log-store\.hh$'' + ''^src/libstore/machines\.cc$'' + ''^src/libstore/machines\.hh$'' + ''^src/libstore/make-content-addressed\.cc$'' + ''^src/libstore/make-content-addressed\.hh$'' + ''^src/libstore/misc\.cc$'' + ''^src/libstore/names\.cc$'' + ''^src/libstore/names\.hh$'' + ''^src/libstore/nar-accessor\.cc$'' + ''^src/libstore/nar-accessor\.hh$'' + ''^src/libstore/nar-info-disk-cache\.cc$'' + ''^src/libstore/nar-info-disk-cache\.hh$'' + ''^src/libstore/nar-info\.cc$'' + ''^src/libstore/nar-info\.hh$'' + ''^src/libstore/outputs-spec\.cc$'' + ''^src/libstore/outputs-spec\.hh$'' + ''^src/libstore/parsed-derivations\.cc$'' + ''^src/libstore/path-info\.cc$'' + ''^src/libstore/path-info\.hh$'' + ''^src/libstore/path-references\.cc$'' + ''^src/libstore/path-regex\.hh$'' + ''^src/libstore/path-with-outputs\.cc$'' + ''^src/libstore/path\.cc$'' + ''^src/libstore/path\.hh$'' + ''^src/libstore/pathlocks\.cc$'' + ''^src/libstore/pathlocks\.hh$'' + ''^src/libstore/profiles\.cc$'' + ''^src/libstore/profiles\.hh$'' + ''^src/libstore/realisation\.cc$'' + ''^src/libstore/realisation\.hh$'' + ''^src/libstore/remote-fs-accessor\.cc$'' + ''^src/libstore/remote-fs-accessor\.hh$'' + ''^src/libstore/remote-store-connection\.hh$'' + ''^src/libstore/remote-store\.cc$'' + ''^src/libstore/remote-store\.hh$'' + ''^src/libstore/s3-binary-cache-store\.cc$'' + ''^src/libstore/s3\.hh$'' + ''^src/libstore/serve-protocol-impl\.cc$'' + ''^src/libstore/serve-protocol-impl\.hh$'' + ''^src/libstore/serve-protocol\.cc$'' + ''^src/libstore/serve-protocol\.hh$'' + ''^src/libstore/sqlite\.cc$'' + ''^src/libstore/sqlite\.hh$'' + ''^src/libstore/ssh-store-config\.hh$'' + ''^src/libstore/ssh-store\.cc$'' + ''^src/libstore/ssh\.cc$'' + ''^src/libstore/ssh\.hh$'' + ''^src/libstore/store-api\.cc$'' + ''^src/libstore/store-api\.hh$'' + ''^src/libstore/store-dir-config\.hh$'' + ''^src/libstore/build/derivation-goal\.cc$'' + ''^src/libstore/build/derivation-goal\.hh$'' + ''^src/libstore/build/drv-output-substitution-goal\.cc$'' + ''^src/libstore/build/drv-output-substitution-goal\.hh$'' + ''^src/libstore/build/entry-points\.cc$'' + ''^src/libstore/build/goal\.cc$'' + ''^src/libstore/build/goal\.hh$'' + ''^src/libstore/unix/build/hook-instance\.cc$'' + ''^src/libstore/unix/build/local-derivation-goal\.cc$'' + ''^src/libstore/unix/build/local-derivation-goal\.hh$'' + ''^src/libstore/build/substitution-goal\.cc$'' + ''^src/libstore/build/substitution-goal\.hh$'' + ''^src/libstore/build/worker\.cc$'' + ''^src/libstore/build/worker\.hh$'' + ''^src/libstore/builtins/fetchurl\.cc$'' + ''^src/libstore/builtins/unpack-channel\.cc$'' + ''^src/libstore/gc\.cc$'' + ''^src/libstore/local-overlay-store\.cc$'' + ''^src/libstore/local-overlay-store\.hh$'' + ''^src/libstore/local-store\.cc$'' + ''^src/libstore/local-store\.hh$'' + ''^src/libstore/unix/user-lock\.cc$'' + ''^src/libstore/unix/user-lock\.hh$'' + ''^src/libstore/optimise-store\.cc$'' + ''^src/libstore/unix/pathlocks\.cc$'' + ''^src/libstore/posix-fs-canonicalise\.cc$'' + ''^src/libstore/posix-fs-canonicalise\.hh$'' + ''^src/libstore/uds-remote-store\.cc$'' + ''^src/libstore/uds-remote-store\.hh$'' + ''^src/libstore/windows/build\.cc$'' + ''^src/libstore/worker-protocol-impl\.hh$'' + ''^src/libstore/worker-protocol\.cc$'' + ''^src/libstore/worker-protocol\.hh$'' + ''^src/libutil-c/nix_api_util_internal\.h$'' + ''^src/libutil/archive\.cc$'' + ''^src/libutil/archive\.hh$'' + ''^src/libutil/args\.cc$'' + ''^src/libutil/args\.hh$'' + ''^src/libutil/args/root\.hh$'' + ''^src/libutil/callback\.hh$'' + ''^src/libutil/canon-path\.cc$'' + ''^src/libutil/canon-path\.hh$'' + ''^src/libutil/chunked-vector\.hh$'' + ''^src/libutil/closure\.hh$'' + ''^src/libutil/comparator\.hh$'' + ''^src/libutil/compute-levels\.cc$'' + ''^src/libutil/config-impl\.hh$'' + ''^src/libutil/config\.cc$'' + ''^src/libutil/config\.hh$'' + ''^src/libutil/current-process\.cc$'' + ''^src/libutil/current-process\.hh$'' + ''^src/libutil/english\.cc$'' + ''^src/libutil/english\.hh$'' + ''^src/libutil/environment-variables\.cc$'' + ''^src/libutil/error\.cc$'' + ''^src/libutil/error\.hh$'' + ''^src/libutil/exit\.hh$'' + ''^src/libutil/experimental-features\.cc$'' + ''^src/libutil/experimental-features\.hh$'' + ''^src/libutil/file-content-address\.cc$'' + ''^src/libutil/file-content-address\.hh$'' + ''^src/libutil/file-descriptor\.cc$'' + ''^src/libutil/file-descriptor\.hh$'' + ''^src/libutil/file-path-impl\.hh$'' + ''^src/libutil/file-path\.hh$'' + ''^src/libutil/file-system\.cc$'' + ''^src/libutil/file-system\.hh$'' + ''^src/libutil/finally\.hh$'' + ''^src/libutil/fmt\.hh$'' + ''^src/libutil/fs-sink\.cc$'' + ''^src/libutil/fs-sink\.hh$'' + ''^src/libutil/git\.cc$'' + ''^src/libutil/git\.hh$'' + ''^src/libutil/hash\.cc$'' + ''^src/libutil/hash\.hh$'' + ''^src/libutil/hilite\.cc$'' + ''^src/libutil/hilite\.hh$'' + ''^src/libutil/source-accessor\.hh$'' + ''^src/libutil/json-impls\.hh$'' + ''^src/libutil/json-utils\.cc$'' + ''^src/libutil/json-utils\.hh$'' + ''^src/libutil/linux/cgroup\.cc$'' + ''^src/libutil/linux/namespaces\.cc$'' + ''^src/libutil/logging\.cc$'' + ''^src/libutil/logging\.hh$'' + ''^src/libutil/lru-cache\.hh$'' + ''^src/libutil/memory-source-accessor\.cc$'' + ''^src/libutil/memory-source-accessor\.hh$'' + ''^src/libutil/pool\.hh$'' + ''^src/libutil/position\.cc$'' + ''^src/libutil/position\.hh$'' + ''^src/libutil/posix-source-accessor\.cc$'' + ''^src/libutil/posix-source-accessor\.hh$'' + ''^src/libutil/processes\.hh$'' + ''^src/libutil/ref\.hh$'' + ''^src/libutil/references\.cc$'' + ''^src/libutil/references\.hh$'' + ''^src/libutil/regex-combinators\.hh$'' + ''^src/libutil/serialise\.cc$'' + ''^src/libutil/serialise\.hh$'' + ''^src/libutil/signals\.hh$'' + ''^src/libutil/signature/local-keys\.cc$'' + ''^src/libutil/signature/local-keys\.hh$'' + ''^src/libutil/signature/signer\.cc$'' + ''^src/libutil/signature/signer\.hh$'' + ''^src/libutil/source-accessor\.cc$'' + ''^src/libutil/source-accessor\.hh$'' + ''^src/libutil/source-path\.cc$'' + ''^src/libutil/source-path\.hh$'' + ''^src/libutil/split\.hh$'' + ''^src/libutil/suggestions\.cc$'' + ''^src/libutil/suggestions\.hh$'' + ''^src/libutil/sync\.hh$'' + ''^src/libutil/terminal\.cc$'' + ''^src/libutil/terminal\.hh$'' + ''^src/libutil/thread-pool\.cc$'' + ''^src/libutil/thread-pool\.hh$'' + ''^src/libutil/topo-sort\.hh$'' + ''^src/libutil/types\.hh$'' + ''^src/libutil/unix/file-descriptor\.cc$'' + ''^src/libutil/unix/file-path\.cc$'' + ''^src/libutil/unix/monitor-fd\.hh$'' + ''^src/libutil/unix/processes\.cc$'' + ''^src/libutil/unix/signals-impl\.hh$'' + ''^src/libutil/unix/signals\.cc$'' + ''^src/libutil/unix-domain-socket\.cc$'' + ''^src/libutil/unix/users\.cc$'' + ''^src/libutil/url-parts\.hh$'' + ''^src/libutil/url\.cc$'' + ''^src/libutil/url\.hh$'' + ''^src/libutil/users\.cc$'' + ''^src/libutil/users\.hh$'' + ''^src/libutil/util\.cc$'' + ''^src/libutil/util\.hh$'' + ''^src/libutil/variant-wrapper\.hh$'' + ''^src/libutil/windows/environment-variables\.cc$'' + ''^src/libutil/windows/file-descriptor\.cc$'' + ''^src/libutil/windows/file-path\.cc$'' + ''^src/libutil/windows/processes\.cc$'' + ''^src/libutil/windows/users\.cc$'' + ''^src/libutil/windows/windows-error\.cc$'' + ''^src/libutil/windows/windows-error\.hh$'' + ''^src/libutil/xml-writer\.cc$'' + ''^src/libutil/xml-writer\.hh$'' + ''^src/nix-build/nix-build\.cc$'' + ''^src/nix-channel/nix-channel\.cc$'' + ''^src/nix-collect-garbage/nix-collect-garbage\.cc$'' + ''^src/nix-env/buildenv.nix$'' + ''^src/nix-env/nix-env\.cc$'' + ''^src/nix-env/user-env\.cc$'' + ''^src/nix-env/user-env\.hh$'' + ''^src/nix-instantiate/nix-instantiate\.cc$'' + ''^src/nix-store/dotgraph\.cc$'' + ''^src/nix-store/graphml\.cc$'' + ''^src/nix-store/nix-store\.cc$'' + ''^src/nix/add-to-store\.cc$'' + ''^src/nix/app\.cc$'' + ''^src/nix/build\.cc$'' + ''^src/nix/bundle\.cc$'' + ''^src/nix/cat\.cc$'' + ''^src/nix/config-check\.cc$'' + ''^src/nix/config\.cc$'' + ''^src/nix/copy\.cc$'' + ''^src/nix/derivation-add\.cc$'' + ''^src/nix/derivation-show\.cc$'' + ''^src/nix/derivation\.cc$'' + ''^src/nix/develop\.cc$'' + ''^src/nix/diff-closures\.cc$'' + ''^src/nix/dump-path\.cc$'' + ''^src/nix/edit\.cc$'' + ''^src/nix/eval\.cc$'' + ''^src/nix/flake\.cc$'' + ''^src/nix/fmt\.cc$'' + ''^src/nix/hash\.cc$'' + ''^src/nix/log\.cc$'' + ''^src/nix/ls\.cc$'' + ''^src/nix/main\.cc$'' + ''^src/nix/make-content-addressed\.cc$'' + ''^src/nix/nar\.cc$'' + ''^src/nix/optimise-store\.cc$'' + ''^src/nix/path-from-hash-part\.cc$'' + ''^src/nix/path-info\.cc$'' + ''^src/nix/prefetch\.cc$'' + ''^src/nix/profile\.cc$'' + ''^src/nix/realisation\.cc$'' + ''^src/nix/registry\.cc$'' + ''^src/nix/repl\.cc$'' + ''^src/nix/run\.cc$'' + ''^src/nix/run\.hh$'' + ''^src/nix/search\.cc$'' + ''^src/nix/sigs\.cc$'' + ''^src/nix/store-copy-log\.cc$'' + ''^src/nix/store-delete\.cc$'' + ''^src/nix/store-gc\.cc$'' + ''^src/nix/store-info\.cc$'' + ''^src/nix/store-repair\.cc$'' + ''^src/nix/store\.cc$'' + ''^src/nix/unix/daemon\.cc$'' + ''^src/nix/upgrade-nix\.cc$'' + ''^src/nix/verify\.cc$'' + ''^src/nix/why-depends\.cc$'' + + ''^tests/functional/plugins/plugintest\.cc'' + ''^tests/functional/test-libstoreconsumer/main\.cc'' + ''^tests/nixos/ca-fd-leak/sender\.c'' + ''^tests/nixos/ca-fd-leak/smuggler\.c'' + ''^tests/unit/libexpr-support/tests/libexpr\.hh'' + ''^tests/unit/libexpr-support/tests/value/context\.cc'' + ''^tests/unit/libexpr-support/tests/value/context\.hh'' + ''^tests/unit/libexpr/derived-path\.cc'' + ''^tests/unit/libexpr/error_traces\.cc'' + ''^tests/unit/libexpr/eval\.cc'' + ''^tests/unit/libexpr/flake/flakeref\.cc'' + ''^tests/unit/libexpr/flake/url-name\.cc'' + ''^tests/unit/libexpr/json\.cc'' + ''^tests/unit/libexpr/main\.cc'' + ''^tests/unit/libexpr/primops\.cc'' + ''^tests/unit/libexpr/search-path\.cc'' + ''^tests/unit/libexpr/trivial\.cc'' + ''^tests/unit/libexpr/value/context\.cc'' + ''^tests/unit/libexpr/value/print\.cc'' + ''^tests/unit/libfetchers/public-key\.cc'' + ''^tests/unit/libstore-support/tests/derived-path\.cc'' + ''^tests/unit/libstore-support/tests/derived-path\.hh'' + ''^tests/unit/libstore-support/tests/libstore\.hh'' + ''^tests/unit/libstore-support/tests/nix_api_store\.hh'' + ''^tests/unit/libstore-support/tests/outputs-spec\.cc'' + ''^tests/unit/libstore-support/tests/outputs-spec\.hh'' + ''^tests/unit/libstore-support/tests/path\.cc'' + ''^tests/unit/libstore-support/tests/path\.hh'' + ''^tests/unit/libstore-support/tests/protocol\.hh'' + ''^tests/unit/libstore/common-protocol\.cc'' + ''^tests/unit/libstore/content-address\.cc'' + ''^tests/unit/libstore/derivation\.cc'' + ''^tests/unit/libstore/derived-path\.cc'' + ''^tests/unit/libstore/downstream-placeholder\.cc'' + ''^tests/unit/libstore/machines\.cc'' + ''^tests/unit/libstore/nar-info-disk-cache\.cc'' + ''^tests/unit/libstore/nar-info\.cc'' + ''^tests/unit/libstore/outputs-spec\.cc'' + ''^tests/unit/libstore/path-info\.cc'' + ''^tests/unit/libstore/path\.cc'' + ''^tests/unit/libstore/serve-protocol\.cc'' + ''^tests/unit/libstore/worker-protocol\.cc'' + ''^tests/unit/libutil-support/tests/characterization\.hh'' + ''^tests/unit/libutil-support/tests/hash\.cc'' + ''^tests/unit/libutil-support/tests/hash\.hh'' + ''^tests/unit/libutil/args\.cc'' + ''^tests/unit/libutil/canon-path\.cc'' + ''^tests/unit/libutil/chunked-vector\.cc'' + ''^tests/unit/libutil/closure\.cc'' + ''^tests/unit/libutil/compression\.cc'' + ''^tests/unit/libutil/config\.cc'' + ''^tests/unit/libutil/file-content-address\.cc'' + ''^tests/unit/libutil/git\.cc'' + ''^tests/unit/libutil/hash\.cc'' + ''^tests/unit/libutil/hilite\.cc'' + ''^tests/unit/libutil/json-utils\.cc'' + ''^tests/unit/libutil/logging\.cc'' + ''^tests/unit/libutil/lru-cache\.cc'' + ''^tests/unit/libutil/pool\.cc'' + ''^tests/unit/libutil/references\.cc'' + ''^tests/unit/libutil/suggestions\.cc'' + ''^tests/unit/libutil/tests\.cc'' + ''^tests/unit/libutil/url\.cc'' + ''^tests/unit/libutil/xml-writer\.cc'' + ]; + }; + shellcheck = { + enable = true; + excludes = [ + # We haven't linted these files yet + ''^config/install-sh$'' + ''^misc/systemv/nix-daemon$'' + ''^misc/bash/completion\.sh$'' + ''^misc/fish/completion\.fish$'' + ''^misc/zsh/completion\.zsh$'' + ''^scripts/check-hydra-status\.sh$'' + ''^scripts/create-darwin-volume\.sh$'' + ''^scripts/install-darwin-multi-user\.sh$'' + ''^scripts/install-multi-user\.sh$'' + ''^scripts/install-nix-from-closure\.sh$'' + ''^scripts/install-systemd-multi-user\.sh$'' + ''^src/nix/get-env\.sh$'' + ''^tests/functional/bash-profile\.sh$'' + ''^tests/functional/binary-cache-build-remote\.sh$'' + ''^tests/functional/binary-cache\.sh$'' + ''^tests/functional/brotli\.sh$'' + ''^tests/functional/build-delete\.sh$'' + ''^tests/functional/build-dry\.sh$'' + ''^tests/functional/build\.sh$'' + ''^tests/functional/ca/build-cache\.sh$'' + ''^tests/functional/ca/build-dry\.sh$'' + ''^tests/functional/ca/build-with-garbage-path\.sh$'' + ''^tests/functional/ca/build\.sh$'' + ''^tests/functional/ca/common\.sh$'' + ''^tests/functional/ca/concurrent-builds\.sh$'' + ''^tests/functional/ca/derivation-json\.sh$'' + ''^tests/functional/ca/duplicate-realisation-in-closure\.sh$'' + ''^tests/functional/ca/eval-store\.sh$'' + ''^tests/functional/ca/gc\.sh$'' + ''^tests/functional/ca/import-derivation\.sh$'' + ''^tests/functional/ca/new-build-cmd\.sh$'' + ''^tests/functional/ca/nix-copy\.sh$'' + ''^tests/functional/ca/nix-run\.sh$'' + ''^tests/functional/ca/nix-shell\.sh$'' + ''^tests/functional/ca/post-hook\.sh$'' + ''^tests/functional/ca/recursive\.sh$'' + ''^tests/functional/ca/repl\.sh$'' + ''^tests/functional/ca/selfref-gc\.sh$'' + ''^tests/functional/ca/signatures\.sh$'' + ''^tests/functional/ca/substitute\.sh$'' + ''^tests/functional/ca/why-depends\.sh$'' + ''^tests/functional/case-hack\.sh$'' + ''^tests/functional/check-refs\.sh$'' + ''^tests/functional/check-reqs\.sh$'' + ''^tests/functional/check\.sh$'' + ''^tests/functional/chroot-store\.sh$'' + ''^tests/functional/common/vars-and-functions\.sh$'' + ''^tests/functional/completions\.sh$'' + ''^tests/functional/compression-levels\.sh$'' + ''^tests/functional/compute-levels\.sh$'' + ''^tests/functional/config\.sh$'' + ''^tests/functional/db-migration\.sh$'' + ''^tests/functional/debugger\.sh$'' + ''^tests/functional/dependencies\.builder0\.sh$'' + ''^tests/functional/dependencies\.sh$'' + ''^tests/functional/derivation-json\.sh$'' + ''^tests/functional/dump-db\.sh$'' + ''^tests/functional/dyn-drv/build-built-drv\.sh$'' + ''^tests/functional/dyn-drv/common\.sh$'' + ''^tests/functional/dyn-drv/dep-built-drv\.sh$'' + ''^tests/functional/dyn-drv/eval-outputOf\.sh$'' + ''^tests/functional/dyn-drv/old-daemon-error-hack\.sh$'' + ''^tests/functional/dyn-drv/recursive-mod-json\.sh$'' + ''^tests/functional/dyn-drv/text-hashed-output\.sh$'' + ''^tests/functional/eval-store\.sh$'' + ''^tests/functional/eval\.sh$'' + ''^tests/functional/experimental-features\.sh$'' + ''^tests/functional/export-graph\.sh$'' + ''^tests/functional/export\.sh$'' + ''^tests/functional/extra-sandbox-profile\.sh$'' + ''^tests/functional/fetchClosure\.sh$'' + ''^tests/functional/fetchGit\.sh$'' + ''^tests/functional/fetchGitRefs\.sh$'' + ''^tests/functional/fetchGitSubmodules\.sh$'' + ''^tests/functional/fetchGitVerification\.sh$'' + ''^tests/functional/fetchMercurial\.sh$'' + ''^tests/functional/fetchPath\.sh$'' + ''^tests/functional/fetchTree-file\.sh$'' + ''^tests/functional/fetchurl\.sh$'' + ''^tests/functional/filter-source\.sh$'' + ''^tests/functional/fixed\.builder1\.sh$'' + ''^tests/functional/fixed\.builder2\.sh$'' + ''^tests/functional/fixed\.sh$'' + ''^tests/functional/flakes/absolute-attr-paths\.sh$'' + ''^tests/functional/flakes/absolute-paths\.sh$'' + ''^tests/functional/flakes/build-paths\.sh$'' + ''^tests/functional/flakes/bundle\.sh$'' + ''^tests/functional/flakes/check\.sh$'' + ''^tests/functional/flakes/circular\.sh$'' + ''^tests/functional/flakes/common\.sh$'' + ''^tests/functional/flakes/config\.sh$'' + ''^tests/functional/flakes/develop\.sh$'' + ''^tests/functional/flakes/flake-in-submodule\.sh$'' + ''^tests/functional/flakes/flakes\.sh$'' + ''^tests/functional/flakes/follow-paths\.sh$'' + ''^tests/functional/flakes/init\.sh$'' + ''^tests/functional/flakes/inputs\.sh$'' + ''^tests/functional/flakes/mercurial\.sh$'' + ''^tests/functional/flakes/prefetch\.sh$'' + ''^tests/functional/flakes/run\.sh$'' + ''^tests/functional/flakes/search-root\.sh$'' + ''^tests/functional/flakes/show\.sh$'' + ''^tests/functional/flakes/unlocked-override\.sh$'' + ''^tests/functional/fmt\.sh$'' + ''^tests/functional/fmt\.simple\.sh$'' + ''^tests/functional/function-trace\.sh$'' + ''^tests/functional/gc-auto\.sh$'' + ''^tests/functional/gc-concurrent\.builder\.sh$'' + ''^tests/functional/gc-concurrent\.sh$'' + ''^tests/functional/gc-concurrent2\.builder\.sh$'' + ''^tests/functional/gc-non-blocking\.sh$'' + ''^tests/functional/gc-runtime\.sh$'' + ''^tests/functional/gc\.sh$'' + ''^tests/functional/git-hashing/common\.sh$'' + ''^tests/functional/git-hashing/simple\.sh$'' + ''^tests/functional/hash-convert\.sh$'' + ''^tests/functional/hash-path\.sh$'' + ''^tests/functional/help\.sh$'' + ''^tests/functional/import-derivation\.sh$'' + ''^tests/functional/impure-derivations\.sh$'' + ''^tests/functional/impure-env\.sh$'' + ''^tests/functional/impure-eval\.sh$'' + ''^tests/functional/install-darwin\.sh$'' + ''^tests/functional/lang-test-infra\.sh$'' + ''^tests/functional/lang\.sh$'' + ''^tests/functional/lang/framework\.sh$'' + ''^tests/functional/legacy-ssh-store\.sh$'' + ''^tests/functional/linux-sandbox\.sh$'' + ''^tests/functional/local-overlay-store/add-lower-inner\.sh$'' + ''^tests/functional/local-overlay-store/add-lower\.sh$'' + ''^tests/functional/local-overlay-store/bad-uris\.sh$'' + ''^tests/functional/local-overlay-store/build-inner\.sh$'' + ''^tests/functional/local-overlay-store/build\.sh$'' + ''^tests/functional/local-overlay-store/check-post-init-inner\.sh$'' + ''^tests/functional/local-overlay-store/check-post-init\.sh$'' + ''^tests/functional/local-overlay-store/common\.sh$'' + ''^tests/functional/local-overlay-store/delete-duplicate-inner\.sh$'' + ''^tests/functional/local-overlay-store/delete-duplicate\.sh$'' + ''^tests/functional/local-overlay-store/delete-refs-inner\.sh$'' + ''^tests/functional/local-overlay-store/delete-refs\.sh$'' + ''^tests/functional/local-overlay-store/gc-inner\.sh$'' + ''^tests/functional/local-overlay-store/gc\.sh$'' + ''^tests/functional/local-overlay-store/optimise-inner\.sh$'' + ''^tests/functional/local-overlay-store/optimise\.sh$'' + ''^tests/functional/local-overlay-store/redundant-add-inner\.sh$'' + ''^tests/functional/local-overlay-store/redundant-add\.sh$'' + ''^tests/functional/local-overlay-store/remount\.sh$'' + ''^tests/functional/local-overlay-store/stale-file-handle-inner\.sh$'' + ''^tests/functional/local-overlay-store/stale-file-handle\.sh$'' + ''^tests/functional/local-overlay-store/verify-inner\.sh$'' + ''^tests/functional/local-overlay-store/verify\.sh$'' + ''^tests/functional/logging\.sh$'' + ''^tests/functional/misc\.sh$'' + ''^tests/functional/multiple-outputs\.sh$'' + ''^tests/functional/nar-access\.sh$'' + ''^tests/functional/nested-sandboxing\.sh$'' + ''^tests/functional/nested-sandboxing/command\.sh$'' + ''^tests/functional/nix-build\.sh$'' + ''^tests/functional/nix-channel\.sh$'' + ''^tests/functional/nix-collect-garbage-d\.sh$'' + ''^tests/functional/nix-copy-ssh-common\.sh$'' + ''^tests/functional/nix-copy-ssh-ng\.sh$'' + ''^tests/functional/nix-copy-ssh\.sh$'' + ''^tests/functional/nix-daemon-untrusting\.sh$'' + ''^tests/functional/nix-profile\.sh$'' + ''^tests/functional/nix-shell\.sh$'' + ''^tests/functional/nix_path\.sh$'' + ''^tests/functional/optimise-store\.sh$'' + ''^tests/functional/output-normalization\.sh$'' + ''^tests/functional/parallel\.builder\.sh$'' + ''^tests/functional/parallel\.sh$'' + ''^tests/functional/pass-as-file\.sh$'' + ''^tests/functional/path-from-hash-part\.sh$'' + ''^tests/functional/path-info\.sh$'' + ''^tests/functional/placeholders\.sh$'' + ''^tests/functional/plugins\.sh$'' + ''^tests/functional/post-hook\.sh$'' + ''^tests/functional/pure-eval\.sh$'' + ''^tests/functional/push-to-store-old\.sh$'' + ''^tests/functional/push-to-store\.sh$'' + ''^tests/functional/read-only-store\.sh$'' + ''^tests/functional/readfile-context\.sh$'' + ''^tests/functional/recursive\.sh$'' + ''^tests/functional/referrers\.sh$'' + ''^tests/functional/remote-store\.sh$'' + ''^tests/functional/repair\.sh$'' + ''^tests/functional/restricted\.sh$'' + ''^tests/functional/search\.sh$'' + ''^tests/functional/secure-drv-outputs\.sh$'' + ''^tests/functional/selfref-gc\.sh$'' + ''^tests/functional/shell\.sh$'' + ''^tests/functional/shell\.shebang\.sh$'' + ''^tests/functional/signing\.sh$'' + ''^tests/functional/simple\.builder\.sh$'' + ''^tests/functional/simple\.sh$'' + ''^tests/functional/ssh-relay\.sh$'' + ''^tests/functional/store-info\.sh$'' + ''^tests/functional/structured-attrs\.sh$'' + ''^tests/functional/substitute-with-invalid-ca\.sh$'' + ''^tests/functional/suggestions\.sh$'' + ''^tests/functional/supplementary-groups\.sh$'' + ''^tests/functional/tarball\.sh$'' + ''^tests/functional/test-infra\.sh$'' + ''^tests/functional/test-libstoreconsumer\.sh$'' + ''^tests/functional/timeout\.sh$'' + ''^tests/functional/toString-path\.sh$'' + ''^tests/functional/user-envs-migration\.sh$'' + ''^tests/functional/user-envs-test-case\.sh$'' + ''^tests/functional/user-envs\.builder\.sh$'' + ''^tests/functional/user-envs\.sh$'' + ''^tests/functional/why-depends\.sh$'' + ''^tests/functional/zstd\.sh$'' + ''^tests/unit/libutil/data/git/check-data\.sh$'' + ]; + }; # TODO: nixfmt, https://github.com/NixOS/nixfmt/issues/153 }; - - excludes = [ - # We don't want to format test data - # ''tests/(?!nixos/).*\.nix'' - ''^tests/.*'' - - # Don't format vendored code - ''^src/toml11/.*'' - ''^doc/manual/redirects\.js$'' - ''^doc/manual/theme/highlight\.js$'' - - # We haven't applied formatting to these files yet - ''^doc/manual/redirects\.js$'' - ''^doc/manual/theme/highlight\.js$'' - ''^precompiled-headers\.h$'' - ''^src/build-remote/build-remote\.cc$'' - ''^src/libcmd/built-path\.cc$'' - ''^src/libcmd/built-path\.hh$'' - ''^src/libcmd/command\.cc$'' - ''^src/libcmd/command\.hh$'' - ''^src/libcmd/common-eval-args\.cc$'' - ''^src/libcmd/common-eval-args\.hh$'' - ''^src/libcmd/editor-for\.cc$'' - ''^src/libcmd/installable-attr-path\.cc$'' - ''^src/libcmd/installable-attr-path\.hh$'' - ''^src/libcmd/installable-derived-path\.cc$'' - ''^src/libcmd/installable-derived-path\.hh$'' - ''^src/libcmd/installable-flake\.cc$'' - ''^src/libcmd/installable-flake\.hh$'' - ''^src/libcmd/installable-value\.cc$'' - ''^src/libcmd/installable-value\.hh$'' - ''^src/libcmd/installables\.cc$'' - ''^src/libcmd/installables\.hh$'' - ''^src/libcmd/legacy\.hh$'' - ''^src/libcmd/markdown\.cc$'' - ''^src/libcmd/misc-store-flags\.cc$'' - ''^src/libcmd/repl-interacter\.cc$'' - ''^src/libcmd/repl-interacter\.hh$'' - ''^src/libcmd/repl\.cc$'' - ''^src/libcmd/repl\.hh$'' - ''^src/libexpr-c/nix_api_expr\.cc$'' - ''^src/libexpr-c/nix_api_external\.cc$'' - ''^src/libexpr/attr-path\.cc$'' - ''^src/libexpr/attr-path\.hh$'' - ''^src/libexpr/attr-set\.cc$'' - ''^src/libexpr/attr-set\.hh$'' - ''^src/libexpr/eval-cache\.cc$'' - ''^src/libexpr/eval-cache\.hh$'' - ''^src/libexpr/eval-error\.cc$'' - ''^src/libexpr/eval-inline\.hh$'' - ''^src/libexpr/eval-settings\.cc$'' - ''^src/libexpr/eval-settings\.hh$'' - ''^src/libexpr/eval\.cc$'' - ''^src/libexpr/eval\.hh$'' - ''^src/libexpr/flake/config\.cc$'' - ''^src/libexpr/flake/flake\.cc$'' - ''^src/libexpr/flake/flake\.hh$'' - ''^src/libexpr/flake/flakeref\.cc$'' - ''^src/libexpr/flake/flakeref\.hh$'' - ''^src/libexpr/flake/lockfile\.cc$'' - ''^src/libexpr/flake/lockfile\.hh$'' - ''^src/libexpr/flake/url-name\.cc$'' - ''^src/libexpr/function-trace\.cc$'' - ''^src/libexpr/gc-small-vector\.hh$'' - ''^src/libexpr/get-drvs\.cc$'' - ''^src/libexpr/get-drvs\.hh$'' - ''^src/libexpr/json-to-value\.cc$'' - ''^src/libexpr/nixexpr\.cc$'' - ''^src/libexpr/nixexpr\.hh$'' - ''^src/libexpr/parser-state\.hh$'' - ''^src/libexpr/pos-table\.hh$'' - ''^src/libexpr/primops\.cc$'' - ''^src/libexpr/primops\.hh$'' - ''^src/libexpr/primops/context\.cc$'' - ''^src/libexpr/primops/fetchClosure\.cc$'' - ''^src/libexpr/primops/fetchMercurial\.cc$'' - ''^src/libexpr/primops/fetchTree\.cc$'' - ''^src/libexpr/primops/fromTOML\.cc$'' - ''^src/libexpr/print-ambiguous\.cc$'' - ''^src/libexpr/print-ambiguous\.hh$'' - ''^src/libexpr/print-options\.hh$'' - ''^src/libexpr/print\.cc$'' - ''^src/libexpr/print\.hh$'' - ''^src/libexpr/search-path\.cc$'' - ''^src/libexpr/symbol-table\.hh$'' - ''^src/libexpr/value-to-json\.cc$'' - ''^src/libexpr/value-to-json\.hh$'' - ''^src/libexpr/value-to-xml\.cc$'' - ''^src/libexpr/value-to-xml\.hh$'' - ''^src/libexpr/value\.hh$'' - ''^src/libexpr/value/context\.cc$'' - ''^src/libexpr/value/context\.hh$'' - ''^src/libfetchers/attrs\.cc$'' - ''^src/libfetchers/cache\.cc$'' - ''^src/libfetchers/cache\.hh$'' - ''^src/libfetchers/fetch-settings\.cc$'' - ''^src/libfetchers/fetch-settings\.hh$'' - ''^src/libfetchers/fetch-to-store\.cc$'' - ''^src/libfetchers/fetchers\.cc$'' - ''^src/libfetchers/fetchers\.hh$'' - ''^src/libfetchers/filtering-source-accessor\.cc$'' - ''^src/libfetchers/filtering-source-accessor\.hh$'' - ''^src/libfetchers/fs-source-accessor\.cc$'' - ''^src/libfetchers/fs-source-accessor\.hh$'' - ''^src/libfetchers/git-utils\.cc$'' - ''^src/libfetchers/git-utils\.hh$'' - ''^src/libfetchers/github\.cc$'' - ''^src/libfetchers/indirect\.cc$'' - ''^src/libfetchers/memory-source-accessor\.cc$'' - ''^src/libfetchers/path\.cc$'' - ''^src/libfetchers/registry\.cc$'' - ''^src/libfetchers/registry\.hh$'' - ''^src/libfetchers/tarball\.cc$'' - ''^src/libfetchers/tarball\.hh$'' - ''^src/libfetchers/unix/git\.cc$'' - ''^src/libfetchers/unix/mercurial\.cc$'' - ''^src/libmain/common-args\.cc$'' - ''^src/libmain/common-args\.hh$'' - ''^src/libmain/loggers\.cc$'' - ''^src/libmain/loggers\.hh$'' - ''^src/libmain/progress-bar\.cc$'' - ''^src/libmain/shared\.cc$'' - ''^src/libmain/shared\.hh$'' - ''^src/libmain/unix/stack\.cc$'' - ''^src/libstore/binary-cache-store\.cc$'' - ''^src/libstore/binary-cache-store\.hh$'' - ''^src/libstore/build-result\.hh$'' - ''^src/libstore/builtins\.hh$'' - ''^src/libstore/builtins/buildenv\.cc$'' - ''^src/libstore/builtins/buildenv\.hh$'' - ''^src/libstore/common-protocol-impl\.hh$'' - ''^src/libstore/common-protocol\.cc$'' - ''^src/libstore/common-protocol\.hh$'' - ''^src/libstore/content-address\.cc$'' - ''^src/libstore/content-address\.hh$'' - ''^src/libstore/daemon\.cc$'' - ''^src/libstore/daemon\.hh$'' - ''^src/libstore/derivations\.cc$'' - ''^src/libstore/derivations\.hh$'' - ''^src/libstore/derived-path-map\.cc$'' - ''^src/libstore/derived-path-map\.hh$'' - ''^src/libstore/derived-path\.cc$'' - ''^src/libstore/derived-path\.hh$'' - ''^src/libstore/downstream-placeholder\.cc$'' - ''^src/libstore/downstream-placeholder\.hh$'' - ''^src/libstore/dummy-store\.cc$'' - ''^src/libstore/export-import\.cc$'' - ''^src/libstore/filetransfer\.cc$'' - ''^src/libstore/filetransfer\.hh$'' - ''^src/libstore/gc-store\.hh$'' - ''^src/libstore/globals\.cc$'' - ''^src/libstore/globals\.hh$'' - ''^src/libstore/http-binary-cache-store\.cc$'' - ''^src/libstore/legacy-ssh-store\.cc$'' - ''^src/libstore/legacy-ssh-store\.hh$'' - ''^src/libstore/length-prefixed-protocol-helper\.hh$'' - ''^src/libstore/linux/personality\.cc$'' - ''^src/libstore/linux/personality\.hh$'' - ''^src/libstore/local-binary-cache-store\.cc$'' - ''^src/libstore/local-fs-store\.cc$'' - ''^src/libstore/local-fs-store\.hh$'' - ''^src/libstore/log-store\.cc$'' - ''^src/libstore/log-store\.hh$'' - ''^src/libstore/machines\.cc$'' - ''^src/libstore/machines\.hh$'' - ''^src/libstore/make-content-addressed\.cc$'' - ''^src/libstore/make-content-addressed\.hh$'' - ''^src/libstore/misc\.cc$'' - ''^src/libstore/names\.cc$'' - ''^src/libstore/names\.hh$'' - ''^src/libstore/nar-accessor\.cc$'' - ''^src/libstore/nar-accessor\.hh$'' - ''^src/libstore/nar-info-disk-cache\.cc$'' - ''^src/libstore/nar-info-disk-cache\.hh$'' - ''^src/libstore/nar-info\.cc$'' - ''^src/libstore/nar-info\.hh$'' - ''^src/libstore/outputs-spec\.cc$'' - ''^src/libstore/outputs-spec\.hh$'' - ''^src/libstore/parsed-derivations\.cc$'' - ''^src/libstore/path-info\.cc$'' - ''^src/libstore/path-info\.hh$'' - ''^src/libstore/path-references\.cc$'' - ''^src/libstore/path-regex\.hh$'' - ''^src/libstore/path-with-outputs\.cc$'' - ''^src/libstore/path\.cc$'' - ''^src/libstore/path\.hh$'' - ''^src/libstore/pathlocks\.cc$'' - ''^src/libstore/pathlocks\.hh$'' - ''^src/libstore/profiles\.cc$'' - ''^src/libstore/profiles\.hh$'' - ''^src/libstore/realisation\.cc$'' - ''^src/libstore/realisation\.hh$'' - ''^src/libstore/remote-fs-accessor\.cc$'' - ''^src/libstore/remote-fs-accessor\.hh$'' - ''^src/libstore/remote-store-connection\.hh$'' - ''^src/libstore/remote-store\.cc$'' - ''^src/libstore/remote-store\.hh$'' - ''^src/libstore/s3-binary-cache-store\.cc$'' - ''^src/libstore/s3\.hh$'' - ''^src/libstore/serve-protocol-impl\.cc$'' - ''^src/libstore/serve-protocol-impl\.hh$'' - ''^src/libstore/serve-protocol\.cc$'' - ''^src/libstore/serve-protocol\.hh$'' - ''^src/libstore/sqlite\.cc$'' - ''^src/libstore/sqlite\.hh$'' - ''^src/libstore/ssh-store-config\.hh$'' - ''^src/libstore/ssh-store\.cc$'' - ''^src/libstore/ssh\.cc$'' - ''^src/libstore/ssh\.hh$'' - ''^src/libstore/store-api\.cc$'' - ''^src/libstore/store-api\.hh$'' - ''^src/libstore/store-dir-config\.hh$'' - ''^src/libstore/unix/build/derivation-goal\.cc$'' - ''^src/libstore/unix/build/derivation-goal\.hh$'' - ''^src/libstore/unix/build/drv-output-substitution-goal\.cc$'' - ''^src/libstore/unix/build/drv-output-substitution-goal\.hh$'' - ''^src/libstore/unix/build/entry-points\.cc$'' - ''^src/libstore/unix/build/goal\.cc$'' - ''^src/libstore/unix/build/goal\.hh$'' - ''^src/libstore/unix/build/hook-instance\.cc$'' - ''^src/libstore/unix/build/local-derivation-goal\.cc$'' - ''^src/libstore/unix/build/local-derivation-goal\.hh$'' - ''^src/libstore/unix/build/substitution-goal\.cc$'' - ''^src/libstore/unix/build/substitution-goal\.hh$'' - ''^src/libstore/unix/build/worker\.cc$'' - ''^src/libstore/unix/build/worker\.hh$'' - ''^src/libstore/unix/builtins/fetchurl\.cc$'' - ''^src/libstore/unix/builtins/unpack-channel\.cc$'' - ''^src/libstore/gc\.cc$'' - ''^src/libstore/unix/local-overlay-store\.cc$'' - ''^src/libstore/unix/local-overlay-store\.hh$'' - ''^src/libstore/local-store\.cc$'' - ''^src/libstore/local-store\.hh$'' - ''^src/libstore/unix/lock\.cc$'' - ''^src/libstore/unix/lock\.hh$'' - ''^src/libstore/optimise-store\.cc$'' - ''^src/libstore/unix/pathlocks\.cc$'' - ''^src/libstore/posix-fs-canonicalise\.cc$'' - ''^src/libstore/posix-fs-canonicalise\.hh$'' - ''^src/libstore/uds-remote-store\.cc$'' - ''^src/libstore/uds-remote-store\.hh$'' - ''^src/libstore/windows/build\.cc$'' - ''^src/libstore/worker-protocol-impl\.hh$'' - ''^src/libstore/worker-protocol\.cc$'' - ''^src/libstore/worker-protocol\.hh$'' - ''^src/libutil-c/nix_api_util_internal\.h$'' - ''^src/libutil/archive\.cc$'' - ''^src/libutil/archive\.hh$'' - ''^src/libutil/args\.cc$'' - ''^src/libutil/args\.hh$'' - ''^src/libutil/args/root\.hh$'' - ''^src/libutil/callback\.hh$'' - ''^src/libutil/canon-path\.cc$'' - ''^src/libutil/canon-path\.hh$'' - ''^src/libutil/chunked-vector\.hh$'' - ''^src/libutil/closure\.hh$'' - ''^src/libutil/comparator\.hh$'' - ''^src/libutil/compute-levels\.cc$'' - ''^src/libutil/config-impl\.hh$'' - ''^src/libutil/config\.cc$'' - ''^src/libutil/config\.hh$'' - ''^src/libutil/current-process\.cc$'' - ''^src/libutil/current-process\.hh$'' - ''^src/libutil/english\.cc$'' - ''^src/libutil/english\.hh$'' - ''^src/libutil/environment-variables\.cc$'' - ''^src/libutil/error\.cc$'' - ''^src/libutil/error\.hh$'' - ''^src/libutil/exit\.hh$'' - ''^src/libutil/experimental-features\.cc$'' - ''^src/libutil/experimental-features\.hh$'' - ''^src/libutil/file-content-address\.cc$'' - ''^src/libutil/file-content-address\.hh$'' - ''^src/libutil/file-descriptor\.cc$'' - ''^src/libutil/file-descriptor\.hh$'' - ''^src/libutil/file-path-impl\.hh$'' - ''^src/libutil/file-path\.hh$'' - ''^src/libutil/file-system\.cc$'' - ''^src/libutil/file-system\.hh$'' - ''^src/libutil/finally\.hh$'' - ''^src/libutil/fmt\.hh$'' - ''^src/libutil/fs-sink\.cc$'' - ''^src/libutil/fs-sink\.hh$'' - ''^src/libutil/git\.cc$'' - ''^src/libutil/git\.hh$'' - ''^src/libutil/hash\.cc$'' - ''^src/libutil/hash\.hh$'' - ''^src/libutil/hilite\.cc$'' - ''^src/libutil/hilite\.hh$'' - ''^src/libutil/source-accessor\.hh$'' - ''^src/libutil/json-impls\.hh$'' - ''^src/libutil/json-utils\.cc$'' - ''^src/libutil/json-utils\.hh$'' - ''^src/libutil/linux/cgroup\.cc$'' - ''^src/libutil/linux/namespaces\.cc$'' - ''^src/libutil/logging\.cc$'' - ''^src/libutil/logging\.hh$'' - ''^src/libutil/lru-cache\.hh$'' - ''^src/libutil/memory-source-accessor\.cc$'' - ''^src/libutil/memory-source-accessor\.hh$'' - ''^src/libutil/pool\.hh$'' - ''^src/libutil/position\.cc$'' - ''^src/libutil/position\.hh$'' - ''^src/libutil/posix-source-accessor\.cc$'' - ''^src/libutil/posix-source-accessor\.hh$'' - ''^src/libutil/processes\.hh$'' - ''^src/libutil/ref\.hh$'' - ''^src/libutil/references\.cc$'' - ''^src/libutil/references\.hh$'' - ''^src/libutil/regex-combinators\.hh$'' - ''^src/libutil/serialise\.cc$'' - ''^src/libutil/serialise\.hh$'' - ''^src/libutil/signals\.hh$'' - ''^src/libutil/signature/local-keys\.cc$'' - ''^src/libutil/signature/local-keys\.hh$'' - ''^src/libutil/signature/signer\.cc$'' - ''^src/libutil/signature/signer\.hh$'' - ''^src/libutil/source-accessor\.cc$'' - ''^src/libutil/source-accessor\.hh$'' - ''^src/libutil/source-path\.cc$'' - ''^src/libutil/source-path\.hh$'' - ''^src/libutil/split\.hh$'' - ''^src/libutil/suggestions\.cc$'' - ''^src/libutil/suggestions\.hh$'' - ''^src/libutil/sync\.hh$'' - ''^src/libutil/terminal\.cc$'' - ''^src/libutil/terminal\.hh$'' - ''^src/libutil/thread-pool\.cc$'' - ''^src/libutil/thread-pool\.hh$'' - ''^src/libutil/topo-sort\.hh$'' - ''^src/libutil/types\.hh$'' - ''^src/libutil/unix/file-descriptor\.cc$'' - ''^src/libutil/unix/file-path\.cc$'' - ''^src/libutil/unix/monitor-fd\.hh$'' - ''^src/libutil/unix/processes\.cc$'' - ''^src/libutil/unix/signals-impl\.hh$'' - ''^src/libutil/unix/signals\.cc$'' - ''^src/libutil/unix-domain-socket\.cc$'' - ''^src/libutil/unix/users\.cc$'' - ''^src/libutil/url-parts\.hh$'' - ''^src/libutil/url\.cc$'' - ''^src/libutil/url\.hh$'' - ''^src/libutil/users\.cc$'' - ''^src/libutil/users\.hh$'' - ''^src/libutil/util\.cc$'' - ''^src/libutil/util\.hh$'' - ''^src/libutil/variant-wrapper\.hh$'' - ''^src/libutil/windows/environment-variables\.cc$'' - ''^src/libutil/windows/file-descriptor\.cc$'' - ''^src/libutil/windows/file-path\.cc$'' - ''^src/libutil/windows/processes\.cc$'' - ''^src/libutil/windows/users\.cc$'' - ''^src/libutil/windows/windows-error\.cc$'' - ''^src/libutil/windows/windows-error\.hh$'' - ''^src/libutil/xml-writer\.cc$'' - ''^src/libutil/xml-writer\.hh$'' - ''^src/nix-build/nix-build\.cc$'' - ''^src/nix-channel/nix-channel\.cc$'' - ''^src/nix-collect-garbage/nix-collect-garbage\.cc$'' - ''^src/nix-env/buildenv.nix$'' - ''^src/nix-env/nix-env\.cc$'' - ''^src/nix-env/user-env\.cc$'' - ''^src/nix-env/user-env\.hh$'' - ''^src/nix-instantiate/nix-instantiate\.cc$'' - ''^src/nix-store/dotgraph\.cc$'' - ''^src/nix-store/graphml\.cc$'' - ''^src/nix-store/nix-store\.cc$'' - ''^src/nix/add-to-store\.cc$'' - ''^src/nix/app\.cc$'' - ''^src/nix/build\.cc$'' - ''^src/nix/bundle\.cc$'' - ''^src/nix/cat\.cc$'' - ''^src/nix/config-check\.cc$'' - ''^src/nix/config\.cc$'' - ''^src/nix/copy\.cc$'' - ''^src/nix/derivation-add\.cc$'' - ''^src/nix/derivation-show\.cc$'' - ''^src/nix/derivation\.cc$'' - ''^src/nix/develop\.cc$'' - ''^src/nix/diff-closures\.cc$'' - ''^src/nix/dump-path\.cc$'' - ''^src/nix/edit\.cc$'' - ''^src/nix/eval\.cc$'' - ''^src/nix/flake\.cc$'' - ''^src/nix/fmt\.cc$'' - ''^src/nix/hash\.cc$'' - ''^src/nix/log\.cc$'' - ''^src/nix/ls\.cc$'' - ''^src/nix/main\.cc$'' - ''^src/nix/make-content-addressed\.cc$'' - ''^src/nix/nar\.cc$'' - ''^src/nix/optimise-store\.cc$'' - ''^src/nix/path-from-hash-part\.cc$'' - ''^src/nix/path-info\.cc$'' - ''^src/nix/prefetch\.cc$'' - ''^src/nix/profile\.cc$'' - ''^src/nix/realisation\.cc$'' - ''^src/nix/registry\.cc$'' - ''^src/nix/repl\.cc$'' - ''^src/nix/run\.cc$'' - ''^src/nix/run\.hh$'' - ''^src/nix/search\.cc$'' - ''^src/nix/sigs\.cc$'' - ''^src/nix/store-copy-log\.cc$'' - ''^src/nix/store-delete\.cc$'' - ''^src/nix/store-gc\.cc$'' - ''^src/nix/store-info\.cc$'' - ''^src/nix/store-repair\.cc$'' - ''^src/nix/store\.cc$'' - ''^src/nix/unix/daemon\.cc$'' - ''^src/nix/upgrade-nix\.cc$'' - ''^src/nix/verify\.cc$'' - ''^src/nix/why-depends\.cc$'' - ]; }; - }; # We'll be pulling from this in the main flake diff --git a/mk/common-test.sh b/mk/common-test.sh index 2abea7887..c80abd381 100644 --- a/mk/common-test.sh +++ b/mk/common-test.sh @@ -1,19 +1,23 @@ +# shellcheck shell=bash + # Remove overall test dir (at most one of the two should match) and # remove file extension. -test_name=$(echo -n "$test" | sed \ + +test_name=$(echo -n "${test?must be defined by caller (test runner)}" | sed \ -e "s|^tests/unit/[^/]*/data/||" \ -e "s|^tests/functional/||" \ -e "s|\.sh$||" \ ) +# shellcheck disable=SC2016 TESTS_ENVIRONMENT=( "TEST_NAME=$test_name" 'NIX_REMOTE=' 'PS4=+(${BASH_SOURCE[0]-$0}:$LINENO) ' ) -: ${BASH:=/usr/bin/env bash} +read -r -a bash <<< "${BASH:-/usr/bin/env bash}" run () { - cd "$(dirname $1)" && env "${TESTS_ENVIRONMENT[@]}" $BASH -x -e -u -o pipefail $(basename $1) + cd "$(dirname "$1")" && env "${TESTS_ENVIRONMENT[@]}" "${bash[@]}" -x -e -u -o pipefail "$(basename "$1")" } diff --git a/mk/debug-test.sh b/mk/debug-test.sh index 1cd6f9dce..0dd4406c3 100755 --- a/mk/debug-test.sh +++ b/mk/debug-test.sh @@ -3,12 +3,8 @@ set -eu -o pipefail test=$1 -init=${2-} dir="$(dirname "${BASH_SOURCE[0]}")" source "$dir/common-test.sh" -if [ -n "$init" ]; then - (run "$init" 2>/dev/null > /dev/null) -fi run "$test" diff --git a/mk/lib.mk b/mk/lib.mk index a002d823f..1e7af6ad5 100644 --- a/mk/lib.mk +++ b/mk/lib.mk @@ -87,15 +87,14 @@ $(foreach script, $(bin-scripts), $(eval $(call install-program-in,$(script),$(b $(foreach script, $(bin-scripts), $(eval programs-list += $(script))) $(foreach script, $(noinst-scripts), $(eval programs-list += $(script))) $(foreach template, $(template-files), $(eval $(call instantiate-template,$(template)))) -install_test_init=tests/functional/init.sh $(foreach test, $(install-tests), \ - $(eval $(call run-test,$(test),$(install_test_init))) \ + $(eval $(call run-test,$(test))) \ $(eval installcheck: $(test).test)) $(foreach test-group, $(install-tests-groups), \ - $(eval $(call run-test-group,$(test-group),$(install_test_init))) \ + $(eval $(call run-test-group,$(test-group))) \ $(eval installcheck: $(test-group).test-group) \ $(foreach test, $($(test-group)-tests), \ - $(eval $(call run-test,$(test),$(install_test_init))) \ + $(eval $(call run-test,$(test))) \ $(eval $(test-group).test-group: $(test).test))) # Compilation database. diff --git a/mk/run-test.sh b/mk/run-test.sh index 177a452e8..543c845e1 100755 --- a/mk/run-test.sh +++ b/mk/run-test.sh @@ -8,7 +8,6 @@ yellow="" normal="" test=$1 -init=${2-} dir="$(dirname "${BASH_SOURCE[0]}")" source "$dir/common-test.sh" @@ -22,20 +21,18 @@ if [ -t 1 ]; then fi run_test () { - if [ -n "$init" ]; then - (run "$init" 2>/dev/null > /dev/null) - fi log="$(run "$test" 2>&1)" && status=0 || status=$? } run_test -if [ $status -eq 0 ]; then +if [[ "$status" = 0 ]]; then echo "$post_run_msg [${green}PASS$normal]" -elif [ $status -eq 99 ]; then +elif [[ "$status" = 99 ]]; then echo "$post_run_msg [${yellow}SKIP$normal]" else echo "$post_run_msg [${red}FAIL$normal]" + # shellcheck disable=SC2001 echo "$log" | sed 's/^/ /' exit "$status" fi diff --git a/mk/tests.mk b/mk/tests.mk index bac9b704a..0a10f6d3b 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -12,8 +12,8 @@ endef define run-test - $(eval $(call run-bash,$1.test,$1 $(test-deps),mk/run-test.sh $1 $2)) - $(eval $(call run-bash,$1.test-debug,$1 $(test-deps),mk/debug-test.sh $1 $2)) + $(eval $(call run-bash,$1.test,$1 $(test-deps),mk/run-test.sh $1)) + $(eval $(call run-bash,$1.test-debug,$1 $(test-deps),mk/debug-test.sh $1)) endef diff --git a/scripts/bigsur-nixbld-user-migration.sh b/scripts/bigsur-nixbld-user-migration.sh index f1619fd56..0eb312e07 100755 --- a/scripts/bigsur-nixbld-user-migration.sh +++ b/scripts/bigsur-nixbld-user-migration.sh @@ -3,7 +3,7 @@ ((NEW_NIX_FIRST_BUILD_UID=301)) id_available(){ - dscl . list /Users UniqueID | grep -E '\b'$1'\b' >/dev/null + dscl . list /Users UniqueID | grep -E '\b'"$1"'\b' >/dev/null } change_nixbld_names_and_ids(){ @@ -26,18 +26,18 @@ change_nixbld_names_and_ids(){ fi done - if [[ $name == _* ]]; then + if [[ "$name" == _* ]]; then echo " It looks like $name has already been renamed--skipping." else # first 3 are cleanup, it's OK if they aren't here - sudo dscl . delete /Users/$name dsAttrTypeNative:_writers_passwd &>/dev/null || true - sudo dscl . change /Users/$name NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true + sudo dscl . delete "/Users/$name" dsAttrTypeNative:_writers_passwd &>/dev/null || true + sudo dscl . change "/Users/$name" NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true # remove existing user from group - sudo dseditgroup -o edit -t user -d $name nixbld || true - sudo dscl . change /Users/$name UniqueID $uid $next_id - sudo dscl . change /Users/$name RecordName $name _$name + sudo dseditgroup -o edit -t user -d "$name" nixbld || true + sudo dscl . change "/Users/$name" UniqueID "$uid" "$next_id" + sudo dscl . change "/Users/$name" RecordName "$name" "_$name" # add renamed user to group - sudo dseditgroup -o edit -t user -a _$name nixbld + sudo dseditgroup -o edit -t user -a "_$name" nixbld echo " $name migrated to _$name (uid: $next_id)" fi done < <(dscl . list /Users UniqueID | grep nixbld | sort -n -k2) diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index 0ec72e797..eb124c0b5 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -1,4 +1,5 @@ # Only execute this file once per shell. +# This file is tested by tests/installer/default.nix. if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi __ETC_PROFILE_NIX_SOURCED=1 @@ -9,11 +10,9 @@ else NIX_LINK_NEW=$HOME/.local/state/nix/profile fi if [ -e "$NIX_LINK_NEW" ]; then - NIX_LINK="$NIX_LINK_NEW" -else - if [ -t 2 ] && [ -e "$NIX_LINK_NEW" ]; then + if [ -t 2 ] && [ -e "$NIX_LINK" ]; then warning="\033[1;35mwarning:\033[0m" - printf "$warning Both %s and legacy %s exist; using the latter.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2 + printf "$warning Both %s and legacy %s exist; using the former.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2 if [ "$(realpath "$NIX_LINK")" = "$(realpath "$NIX_LINK_NEW")" ]; then printf " Since the profiles match, you can safely delete either of them.\n" 1>&2 else @@ -26,6 +25,7 @@ else printf "$warning Profiles do not match. You should manually migrate from %s to %s.\n" "$NIX_LINK" "$NIX_LINK_NEW" 1>&2 fi fi + NIX_LINK="$NIX_LINK_NEW" fi export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index 44bc96e89..e868399b1 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -1,3 +1,4 @@ +# This file is tested by tests/installer/default.nix. if [ -n "$HOME" ] && [ -n "$USER" ]; then # Set up the per-user profile. @@ -9,11 +10,9 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then NIX_LINK_NEW="$HOME/.local/state/nix/profile" fi if [ -e "$NIX_LINK_NEW" ]; then - NIX_LINK="$NIX_LINK_NEW" - else - if [ -t 2 ] && [ -e "$NIX_LINK_NEW" ]; then + if [ -t 2 ] && [ -e "$NIX_LINK" ]; then warning="\033[1;35mwarning:\033[0m" - printf "$warning Both %s and legacy %s exist; using the latter.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2 + printf "$warning Both %s and legacy %s exist; using the former.\n" "$NIX_LINK_NEW" "$NIX_LINK" 1>&2 if [ "$(realpath "$NIX_LINK")" = "$(realpath "$NIX_LINK_NEW")" ]; then printf " Since the profiles match, you can safely delete either of them.\n" 1>&2 else @@ -26,6 +25,7 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then printf "$warning Profiles do not match. You should manually migrate from %s to %s.\n" "$NIX_LINK" "$NIX_LINK_NEW" 1>&2 fi fi + NIX_LINK="$NIX_LINK_NEW" fi # Set up environment. diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 18eee830b..582e6d623 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -37,7 +37,7 @@ static std::string currentLoad; static AutoCloseFD openSlotLock(const Machine & m, uint64_t slot) { - return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri), slot), true); + return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri.render()), slot), true); } static bool allSupportedLocally(Store & store, const std::set& requiredFeatures) { @@ -99,7 +99,7 @@ static int main_build_remote(int argc, char * * argv) } std::optional drvPath; - std::string storeUri; + StoreReference storeUri; while (true) { @@ -135,7 +135,7 @@ static int main_build_remote(int argc, char * * argv) Machine * bestMachine = nullptr; uint64_t bestLoad = 0; for (auto & m : machines) { - debug("considering building on remote machine '%s'", m.storeUri); + debug("considering building on remote machine '%s'", m.storeUri.render()); if (m.enabled && m.systemSupported(neededSystem) && @@ -233,7 +233,7 @@ static int main_build_remote(int argc, char * * argv) try { - Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", bestMachine->storeUri)); + Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", bestMachine->storeUri.render())); sshStore = bestMachine->openStore(); sshStore->connect(); @@ -242,7 +242,7 @@ static int main_build_remote(int argc, char * * argv) } catch (std::exception & e) { auto msg = chomp(drainFD(5, false)); printError("cannot build on '%s': %s%s", - bestMachine->storeUri, e.what(), + bestMachine->storeUri.render(), e.what(), msg.empty() ? "" : ": " + msg); bestMachine->enabled = false; continue; @@ -257,15 +257,15 @@ connected: assert(sshStore); - std::cerr << "# accept\n" << storeUri << "\n"; + std::cerr << "# accept\n" << storeUri.render() << "\n"; auto inputs = readStrings(source); auto wantedOutputs = readStrings(source); - AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri) + ".upload-lock", true); + AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri.render()) + ".upload-lock", true); { - Activity act(*logger, lvlTalkative, actUnknown, fmt("waiting for the upload lock to '%s'", storeUri)); + Activity act(*logger, lvlTalkative, actUnknown, fmt("waiting for the upload lock to '%s'", storeUri.render())); auto old = signal(SIGALRM, handleAlarm); alarm(15 * 60); @@ -278,7 +278,7 @@ connected: auto substitute = settings.buildersUseSubstitutes ? Substitute : NoSubstitute; { - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri)); + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri.render())); copyPaths(*store, *sshStore, store->parseStorePathSet(inputs), NoRepair, NoCheckSigs, substitute); } @@ -316,7 +316,7 @@ connected: optResult = sshStore->buildDerivation(*drvPath, (const BasicDerivation &) drv); auto & result = *optResult; if (!result.success()) - throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri, result.errorMsg); + throw Error("build of '%s' on '%s' failed: %s", store->printStorePath(*drvPath), storeUri.render(), result.errorMsg); } else { copyClosure(*store, *sshStore, StorePathSet {*drvPath}, NoRepair, NoCheckSigs, substitute); auto res = sshStore->buildPathsWithResults({ @@ -359,7 +359,7 @@ connected: } if (!missingPaths.empty()) { - Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri)); + Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri.render())); if (auto localStore = store.dynamic_pointer_cast()) for (auto & path : missingPaths) localStore->locksHeld.insert(store->printStorePath(path)); /* FIXME: ugly */ diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 155b43b70..cd0f19257 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -20,7 +20,7 @@ MixEvalArgs::MixEvalArgs() .description = "Pass the value *expr* as the argument *name* to Nix functions.", .category = category, .labels = {"name", "expr"}, - .handler = {[&](std::string name, std::string expr) { autoArgs.insert_or_assign(name, AutoArg{AutoArgExpr(expr)}); }} + .handler = {[&](std::string name, std::string expr) { autoArgs.insert_or_assign(name, AutoArg{AutoArgExpr{expr}}); }} }); addFlag({ @@ -28,7 +28,7 @@ MixEvalArgs::MixEvalArgs() .description = "Pass the string *string* as the argument *name* to Nix functions.", .category = category, .labels = {"name", "string"}, - .handler = {[&](std::string name, std::string s) { autoArgs.insert_or_assign(name, AutoArg{AutoArgString(s)}); }}, + .handler = {[&](std::string name, std::string s) { autoArgs.insert_or_assign(name, AutoArg{AutoArgString{s}}); }}, }); addFlag({ @@ -36,7 +36,7 @@ MixEvalArgs::MixEvalArgs() .description = "Pass the contents of file *path* as the argument *name* to Nix functions.", .category = category, .labels = {"name", "path"}, - .handler = {[&](std::string name, std::string path) { autoArgs.insert_or_assign(name, AutoArg{AutoArgFile(path)}); }}, + .handler = {[&](std::string name, std::string path) { autoArgs.insert_or_assign(name, AutoArg{AutoArgFile{path}}); }}, .completer = completePath }); diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc index 3ec1c1614..8917e7a01 100644 --- a/src/libcmd/installable-attr-path.cc +++ b/src/libcmd/installable-attr-path.cc @@ -75,6 +75,8 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() std::set outputsToInstall; for (auto & output : packageInfo.queryOutputs(false, true)) outputsToInstall.insert(output.first); + if (outputsToInstall.empty()) + outputsToInstall.insert("out"); return OutputsSpec::Names { std::move(outputsToInstall) }; }, [&](const ExtendedOutputsSpec::Explicit & e) -> OutputsSpec { diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 6ff837ddc..d42fa7aac 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -106,9 +106,14 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() fmt("while evaluating the flake output attribute '%s'", attrPath))) { return { *derivedPathWithInfo }; + } else { + throw Error( + "expected flake output attribute '%s' to be a derivation or path but found %s: %s", + attrPath, + showType(v), + ValuePrinter(*this->state, v, errorPrintOptions) + ); } - else - throw Error("flake output attribute '%s' is not a derivation or path", attrPath); } auto drvPath = attr->forceDerivation(); diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 43e312540..6835c512c 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -601,6 +601,37 @@ std::vector Installable::build( return res; } +static void throwBuildErrors( + std::vector & buildResults, + const Store & store) +{ + std::vector failed; + for (auto & buildResult : buildResults) { + if (!buildResult.success()) { + failed.push_back(buildResult); + } + } + + auto failedResult = failed.begin(); + if (failedResult != failed.end()) { + if (failed.size() == 1) { + failedResult->rethrow(); + } else { + StringSet failedPaths; + for (; failedResult != failed.end(); failedResult++) { + if (!failedResult->errorMsg.empty()) { + logError(ErrorInfo{ + .level = lvlError, + .msg = failedResult->errorMsg, + }); + } + failedPaths.insert(failedResult->path.to_string(store)); + } + throw Error("build of %s failed", concatStringsSep(", ", quoteStrings(failedPaths))); + } + } +} + std::vector, BuiltPathWithResult>> Installable::build2( ref evalStore, ref store, @@ -662,10 +693,9 @@ std::vector, BuiltPathWithResult>> Installable::build if (settings.printMissing) printMissing(store, pathsToBuild, lvlInfo); - for (auto & buildResult : store->buildPathsWithResults(pathsToBuild, bMode, evalStore)) { - if (!buildResult.success()) - buildResult.rethrow(); - + auto buildResults = store->buildPathsWithResults(pathsToBuild, bMode, evalStore); + throwBuildErrors(buildResults, *store); + for (auto & buildResult : buildResults) { for (auto & aux : backmap[buildResult.path]) { std::visit(overloaded { [&](const DerivedPath::Built & bfd) { diff --git a/src/libcmd/misc-store-flags.cc b/src/libcmd/misc-store-flags.cc index e66d3f63b..06552c032 100644 --- a/src/libcmd/misc-store-flags.cc +++ b/src/libcmd/misc-store-flags.cc @@ -81,9 +81,15 @@ Args::Flag fileIngestionMethod(FileIngestionMethod * method) How to compute the hash of the input. One of: - - `nar` (the default): Serialises the input as an archive (following the [_Nix Archive Format_](https://edolstra.github.io/pubs/phd-thesis.pdf#page=101)) and passes that to the hash function. + - `nar` (the default): + Serialises the input as a + [Nix Archive](@docroot@/store/file-system-object/content-address.md#serial-nix-archive) + and passes that to the hash function. - - `flat`: Assumes that the input is a single file and directly passes it to the hash function; + - `flat`: + Assumes that the input is a single file and + [directly passes](@docroot@/store/file-system-object/content-address.md#serial-flat) + it to the hash function. )", .labels = {"file-ingestion-method"}, .handler = {[method](std::string s) { @@ -101,15 +107,23 @@ Args::Flag contentAddressMethod(ContentAddressMethod * method) How to compute the content-address of the store object. One of: - - `nar` (the default): Serialises the input as an archive (following the [_Nix Archive Format_](https://edolstra.github.io/pubs/phd-thesis.pdf#page=101)) and passes that to the hash function. + - [`nar`](@docroot@/store/store-object/content-address.md#method-nix-archive) + (the default): + Serialises the input as a + [Nix Archive](@docroot@/store/file-system-object/content-address.md#serial-nix-archive) + and passes that to the hash function. - - `flat`: Assumes that the input is a single file and directly passes it to the hash function; + - [`flat`](@docroot@/store/store-object/content-address.md#method-flat): + Assumes that the input is a single file and + [directly passes](@docroot@/store/file-system-object/content-address.md#serial-flat) + it to the hash function. - - `text`: Like `flat`, but used for - [derivations](@docroot@/glossary.md#store-derivation) serialized in store object and + - [`text`](@docroot@/store/store-object/content-address.md#method-text): + Like `flat`, but used for + [derivations](@docroot@/glossary.md#store-derivation) serialized in store object and [`builtins.toFile`](@docroot@/language/builtins.html#builtins-toFile). For advanced use-cases only; - for regular usage prefer `nar` and `flat. + for regular usage prefer `nar` and `flat`. )", .labels = {"content-address-method"}, .handler = {[method](std::string s) { diff --git a/src/libcmd/network-proxy.cc b/src/libcmd/network-proxy.cc index 633b2c005..4b7d2441f 100644 --- a/src/libcmd/network-proxy.cc +++ b/src/libcmd/network-proxy.cc @@ -25,7 +25,10 @@ static StringSet getExcludingNoProxyVariables() static const StringSet excludeVariables{"no_proxy", "NO_PROXY"}; StringSet variables; std::set_difference( - networkProxyVariables.begin(), networkProxyVariables.end(), excludeVariables.begin(), excludeVariables.end(), + networkProxyVariables.begin(), + networkProxyVariables.end(), + excludeVariables.begin(), + excludeVariables.end(), std::inserter(variables, variables.begin())); return variables; } diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 8a9155ab6..c47665fe0 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -137,12 +137,13 @@ void runNix(Path program, const Strings & args, { auto subprocessEnv = getEnv(); subprocessEnv["NIX_CONFIG"] = globalConfig.toKeyValue(); - + //isInteractive avoid grabling interactive commands runProgram2(RunOptions { .program = settings.nixBinDir+ "/" + program, .args = args, .environment = subprocessEnv, .input = input, + .isInteractive = true, }); return; @@ -259,7 +260,7 @@ StringSet NixRepl::completePrefix(const std::string & prefix) try { auto dir = std::string(cur, 0, slash); auto prefix2 = std::string(cur, slash + 1); - for (auto & entry : readDirectory(dir == "" ? "/" : dir)) { + for (auto & entry : std::filesystem::directory_iterator{dir == "" ? "/" : dir}) { auto name = entry.path().filename().string(); if (name[0] != '.' && hasPrefix(name, prefix2)) completions.insert(prev + entry.path().string()); @@ -304,6 +305,8 @@ StringSet NixRepl::completePrefix(const std::string & prefix) // Quietly ignore evaluation errors. } catch (BadURL & e) { // Quietly ignore BadURL flake-related errors. + } catch (FileNotFound & e) { + // Quietly ignore non-existent file beeing `import`-ed. } } @@ -508,13 +511,9 @@ ProcessLineResult NixRepl::processLine(std::string line) auto editor = args.front(); args.pop_front(); - // avoid garbling the editor with the progress bar - logger->pause(); - Finally resume([&]() { logger->resume(); }); - // runProgram redirects stdout to a StringSink, // using runProgram2 to allow editors to display their UI - runProgram2(RunOptions { .program = editor, .lookupPath = true, .args = args }); + runProgram2(RunOptions { .program = editor, .lookupPath = true, .args = args , .isInteractive = true }); // Reload right after exiting the editor state->resetFileCache(); diff --git a/src/libexpr-c/nix_api_expr.cc b/src/libexpr-c/nix_api_expr.cc index a29c3425e..b86d745db 100644 --- a/src/libexpr-c/nix_api_expr.cc +++ b/src/libexpr-c/nix_api_expr.cc @@ -65,6 +65,17 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, V NIXC_CATCH_ERRS } +nix_err nix_value_call_multi(nix_c_context * context, EvalState * state, Value * fn, size_t nargs, Value ** args, Value * value) +{ + if (context) + context->last_err_code = NIX_OK; + try { + state->state.callFunction(*(nix::Value *) fn, nargs, (nix::Value * *)args, *(nix::Value *) value, nix::noPos); + state->state.forceValue(*(nix::Value *) value, nix::noPos); + } + NIXC_CATCH_ERRS +} + nix_err nix_value_force(nix_c_context * context, EvalState * state, Value * value) { if (context) diff --git a/src/libexpr-c/nix_api_expr.h b/src/libexpr-c/nix_api_expr.h index 04fc92f0f..0d324b148 100644 --- a/src/libexpr-c/nix_api_expr.h +++ b/src/libexpr-c/nix_api_expr.h @@ -3,25 +3,7 @@ /** @defgroup libexpr libexpr * @brief Bindings to the Nix language evaluator * - * Example (without error handling): - * @code{.c} - * int main() { - * nix_libexpr_init(NULL); - * - * Store* store = nix_store_open(NULL, "dummy", NULL); - * EvalState* state = nix_state_create(NULL, NULL, store); // empty nix path - * Value *value = nix_alloc_value(NULL, state); - * - * nix_expr_eval_from_string(NULL, state, "builtins.nixVersion", ".", value); - * nix_value_force(NULL, state, value); - * printf("nix version: %s\n", nix_get_string(NULL, value)); - * - * nix_gc_decref(NULL, value); - * nix_state_free(state); - * nix_store_free(store); - * return 0; - * } - * @endcode + * See *[Embedding the Nix Evaluator](@ref nix_evaluator_example)* for an example. * @{ */ /** @file @@ -30,6 +12,7 @@ #include "nix_api_store.h" #include "nix_api_util.h" +#include #ifdef __cplusplus extern "C" { @@ -98,6 +81,46 @@ nix_err nix_expr_eval_from_string( */ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, Value * arg, Value * value); +/** + * @brief Calls a Nix function with multiple arguments. + * + * Technically these are functions that return functions. It is common for Nix + * functions to be curried, so this function is useful for calling them. + * + * @param[out] context Optional, stores error information + * @param[in] state The state of the evaluation. + * @param[in] fn The Nix function to call. + * @param[in] nargs The number of arguments. + * @param[in] args The arguments to pass to the function. + * @param[out] value The result of the function call. + * + * @see nix_value_call For the single argument primitive. + * @see NIX_VALUE_CALL For a macro that wraps this function for convenience. + */ +nix_err nix_value_call_multi( + nix_c_context * context, EvalState * state, Value * fn, size_t nargs, Value ** args, Value * value); + +/** + * @brief Calls a Nix function with multiple arguments. + * + * Technically these are functions that return functions. It is common for Nix + * functions to be curried, so this function is useful for calling them. + * + * @param[out] context Optional, stores error information + * @param[in] state The state of the evaluation. + * @param[out] value The result of the function call. + * @param[in] fn The Nix function to call. + * @param[in] args The arguments to pass to the function. + * + * @see nix_value_call_multi + */ +#define NIX_VALUE_CALL(context, state, value, fn, ...) \ + do { \ + Value * args_array[] = {__VA_ARGS__}; \ + size_t nargs = sizeof(args_array) / sizeof(args_array[0]); \ + nix_value_call_multi(context, state, fn, nargs, args_array, value); \ + } while (0) + /** * @brief Forces the evaluation of a Nix value. * @@ -106,10 +129,8 @@ nix_err nix_value_call(nix_c_context * context, EvalState * state, Value * fn, V * * This function converts these Values into their final type. * - * @note You don't need this function for basic API usage, since all functions - * that return a value call it for you. The only place you will see a - * NIX_TYPE_THUNK is in the arguments that are passed to a PrimOp function - * you supplied to nix_alloc_primop. + * @note You don't need this function for basic API usage very often, since all functions that return a `Value` call it + * for you. This function is mainly needed before calling @ref getters. * * @param[out] context Optional, stores error information * @param[in] state The state of the evaluation. @@ -140,7 +161,7 @@ nix_err nix_value_force_deep(nix_c_context * context, EvalState * state, Value * * @brief Create a new Nix language evaluator state. * * @param[out] context Optional, stores error information - * @param[in] lookupPath Array of strings corresponding to entries in NIX_PATH. + * @param[in] lookupPath Null-terminated array of strings corresponding to entries in NIX_PATH. * @param[in] store The Nix store to use. * @return A new Nix state or NULL on failure. */ diff --git a/src/libexpr-c/nix_api_external.h b/src/libexpr-c/nix_api_external.h index 12ea00407..6c524b975 100644 --- a/src/libexpr-c/nix_api_external.h +++ b/src/libexpr-c/nix_api_external.h @@ -48,7 +48,7 @@ void nix_set_string_return(nix_string_return * str, const char * c); * Print to the nix_printer * * @param[out] context Optional, stores error information - * @param printer The nix_printer to print to + * @param[out] printer The nix_printer to print to * @param[in] str The string to print * @returns NIX_OK if everything worked */ @@ -136,7 +136,7 @@ typedef struct NixCExternalValueDesc * or setting it to the empty string, will make the conversion throw an error. */ void (*printValueAsJSON)( - void * self, EvalState *, bool strict, nix_string_context * c, bool copyToStore, nix_string_return * res); + void * self, EvalState * state, bool strict, nix_string_context * c, bool copyToStore, nix_string_return * res); /** * @brief Convert the external value to XML * @@ -155,7 +155,7 @@ typedef struct NixCExternalValueDesc */ void (*printValueAsXML)( void * self, - EvalState *, + EvalState * state, int strict, int location, void * doc, diff --git a/src/libexpr-c/nix_api_value.cc b/src/libexpr-c/nix_api_value.cc index 0366e5020..978cf7f43 100644 --- a/src/libexpr-c/nix_api_value.cc +++ b/src/libexpr-c/nix_api_value.cc @@ -73,10 +73,43 @@ static void nix_c_primop_wrapper( PrimOpFun f, void * userdata, nix::EvalState & state, const nix::PosIdx pos, nix::Value ** args, nix::Value & v) { nix_c_context ctx; - f(userdata, &ctx, (EvalState *) &state, (Value **) args, (Value *) &v); - /* TODO: In the future, this should throw different errors depending on the error code */ - if (ctx.last_err_code != NIX_OK) - state.error("Error from builtin function: %s", *ctx.last_err).atPos(pos).debugThrow(); + + // v currently has a thunk, but the C API initializers require an uninitialized value. + // + // We can't destroy the thunk, because that makes it impossible to retry, + // which is needed for tryEval and for evaluation drivers that evaluate more + // than one value (e.g. an attrset with two derivations, both of which + // reference v). + // + // Instead we create a temporary value, and then assign the result to v. + // This does not give the primop definition access to the thunk, but that's + // ok because we don't see a need for this yet (e.g. inspecting thunks, + // or maybe something to make blackholes work better; we don't know). + nix::Value vTmp; + + f(userdata, &ctx, (EvalState *) &state, (Value **) args, (Value *) &vTmp); + + if (ctx.last_err_code != NIX_OK) { + /* TODO: Throw different errors depending on the error code */ + state.error("Error from custom function: %s", *ctx.last_err).atPos(pos).debugThrow(); + } + + if (!vTmp.isValid()) { + state.error("Implementation error in custom function: return value was not initialized") + .atPos(pos) + .debugThrow(); + } + + if (vTmp.type() == nix::nThunk) { + // We might allow this in the future if it makes sense for the evaluator + // e.g. implementing tail recursion by returning a thunk to the next + // "iteration". Until then, this is most likely a mistake or misunderstanding. + state.error("Implementation error in custom function: return value must not be a thunk") + .atPos(pos) + .debugThrow(); + } + + v = vTmp; } PrimOp * nix_alloc_primop( diff --git a/src/libexpr-c/nix_api_value.h b/src/libexpr-c/nix_api_value.h index b2b3439ef..244860707 100644 --- a/src/libexpr-c/nix_api_value.h +++ b/src/libexpr-c/nix_api_value.h @@ -79,6 +79,7 @@ typedef struct nix_realised_string nix_realised_string; * @{ */ /** @brief Function pointer for primops + * * When you want to return an error, call nix_set_err_msg(context, NIX_ERR_UNKNOWN, "your error message here"). * * @param[in] user_data Arbitrary data that was initially supplied to nix_alloc_primop @@ -147,7 +148,8 @@ Value * nix_alloc_value(nix_c_context * context, EvalState * state); * @brief Functions to inspect and change Nix language values, represented by Value. * @{ */ -/** @name Getters +/** @anchor getters + * @name Getters */ /**@{*/ /** @brief Get value type diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index d60967a14..2630c34d5 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -7,6 +7,25 @@ namespace nix::eval_cache { +CachedEvalError::CachedEvalError(ref cursor, Symbol attr) + : EvalError(cursor->root->state, "cached failure of attribute '%s'", cursor->getAttrPathStr(attr)) + , cursor(cursor), attr(attr) +{ } + +void CachedEvalError::force() +{ + auto & v = cursor->forceValue(); + + if (v.type() == nAttrs) { + auto a = v.attrs()->get(this->attr); + + state.forceValue(*a->value, a->pos); + } + + // Shouldn't happen. + throw EvalError(state, "evaluation of cached failed attribute '%s' unexpectedly succeeded", cursor->getAttrPathStr(attr)); +} + static const char * schema = R"sql( create table if not exists Attributes ( parent integer not null, @@ -470,7 +489,7 @@ Suggestions AttrCursor::getSuggestionsForAttr(Symbol name) return Suggestions::bestMatches(strAttrNames, root->state.symbols[name]); } -std::shared_ptr AttrCursor::maybeGetAttr(Symbol name, bool forceErrors) +std::shared_ptr AttrCursor::maybeGetAttr(Symbol name) { if (root->db) { if (!cachedValue) @@ -487,12 +506,9 @@ std::shared_ptr AttrCursor::maybeGetAttr(Symbol name, bool forceErro if (attr) { if (std::get_if(&attr->second)) return nullptr; - else if (std::get_if(&attr->second)) { - if (forceErrors) - debug("reevaluating failed cached attribute '%s'", getAttrPathStr(name)); - else - throw CachedEvalError(root->state, "cached failure of attribute '%s'", getAttrPathStr(name)); - } else + else if (std::get_if(&attr->second)) + throw CachedEvalError(ref(shared_from_this()), name); + else return std::make_shared(root, std::make_pair(shared_from_this(), name), nullptr, std::move(attr)); } @@ -537,9 +553,9 @@ std::shared_ptr AttrCursor::maybeGetAttr(std::string_view name) return maybeGetAttr(root->state.symbols.create(name)); } -ref AttrCursor::getAttr(Symbol name, bool forceErrors) +ref AttrCursor::getAttr(Symbol name) { - auto p = maybeGetAttr(name, forceErrors); + auto p = maybeGetAttr(name); if (!p) throw Error("attribute '%s' does not exist", getAttrPathStr(name)); return ref(p); @@ -550,11 +566,11 @@ ref AttrCursor::getAttr(std::string_view name) return getAttr(root->state.symbols.create(name)); } -OrSuggestions> AttrCursor::findAlongAttrPath(const std::vector & attrPath, bool force) +OrSuggestions> AttrCursor::findAlongAttrPath(const std::vector & attrPath) { auto res = shared_from_this(); for (auto & attr : attrPath) { - auto child = res->maybeGetAttr(attr, force); + auto child = res->maybeGetAttr(attr); if (!child) { auto suggestions = res->getSuggestionsForAttr(attr); return OrSuggestions>::failed(suggestions); @@ -751,8 +767,9 @@ bool AttrCursor::isDerivation() StorePath AttrCursor::forceDerivation() { - auto aDrvPath = getAttr(root->state.sDrvPath, true); + auto aDrvPath = getAttr(root->state.sDrvPath); auto drvPath = root->state.store->parseStorePath(aDrvPath->getString()); + drvPath.requireDerivation(); if (!root->state.store->isValidPath(drvPath) && !settings.readOnlyMode) { /* The eval cache contains 'drvPath', but the actual path has been garbage-collected. So force it to be regenerated. */ diff --git a/src/libexpr/eval-cache.hh b/src/libexpr/eval-cache.hh index 46c4999c8..cac985829 100644 --- a/src/libexpr/eval-cache.hh +++ b/src/libexpr/eval-cache.hh @@ -10,14 +10,28 @@ namespace nix::eval_cache { -MakeError(CachedEvalError, EvalError); - struct AttrDb; class AttrCursor; +struct CachedEvalError : EvalError +{ + const ref cursor; + const Symbol attr; + + CachedEvalError(ref cursor, Symbol attr); + + /** + * Evaluate this attribute, which should result in a regular + * `EvalError` exception being thrown. + */ + [[noreturn]] + void force(); +}; + class EvalCache : public std::enable_shared_from_this { friend class AttrCursor; + friend class CachedEvalError; std::shared_ptr db; EvalState & state; @@ -73,6 +87,7 @@ typedef std::variant< class AttrCursor : public std::enable_shared_from_this { friend class EvalCache; + friend class CachedEvalError; ref root; typedef std::optional, Symbol>> Parent; @@ -102,11 +117,11 @@ public: Suggestions getSuggestionsForAttr(Symbol name); - std::shared_ptr maybeGetAttr(Symbol name, bool forceErrors = false); + std::shared_ptr maybeGetAttr(Symbol name); std::shared_ptr maybeGetAttr(std::string_view name); - ref getAttr(Symbol name, bool forceErrors = false); + ref getAttr(Symbol name); ref getAttr(std::string_view name); @@ -114,7 +129,7 @@ public: * Get an attribute along a chain of attrsets. Note that this does * not auto-call functors or functions. */ - OrSuggestions> findAlongAttrPath(const std::vector & attrPath, bool force = false); + OrSuggestions> findAlongAttrPath(const std::vector & attrPath); std::string getString(); diff --git a/src/libexpr/eval-error.cc b/src/libexpr/eval-error.cc index 8db03610b..282f5554a 100644 --- a/src/libexpr/eval-error.cc +++ b/src/libexpr/eval-error.cc @@ -27,8 +27,7 @@ EvalErrorBuilder & EvalErrorBuilder::atPos(Value & value, PosIdx fallback) template EvalErrorBuilder & EvalErrorBuilder::withTrace(PosIdx pos, const std::string_view text) { - error.err.traces.push_front( - Trace{.pos = error.state.positions[pos], .hint = HintFmt(std::string(text))}); + error.addTrace(error.state.positions[pos], text); return *this; } @@ -99,7 +98,6 @@ template class EvalErrorBuilder; template class EvalErrorBuilder; template class EvalErrorBuilder; template class EvalErrorBuilder; -template class EvalErrorBuilder; template class EvalErrorBuilder; } diff --git a/src/libexpr/eval-error.hh b/src/libexpr/eval-error.hh index 7e0cbe982..27407eb6e 100644 --- a/src/libexpr/eval-error.hh +++ b/src/libexpr/eval-error.hh @@ -43,7 +43,6 @@ MakeError(Abort, EvalError); MakeError(TypeError, EvalError); MakeError(UndefinedVarError, EvalError); MakeError(MissingArgumentError, EvalError); -MakeError(CachedEvalError, EvalError); MakeError(InfiniteRecursionError, EvalError); struct InvalidPathError : public EvalError diff --git a/src/libexpr/eval-settings.hh b/src/libexpr/eval-settings.hh index 60d3a6f25..dbfc3b2c7 100644 --- a/src/libexpr/eval-settings.hh +++ b/src/libexpr/eval-settings.hh @@ -15,8 +15,24 @@ struct EvalSettings : Config static std::string resolvePseudoUrl(std::string_view url); - Setting enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", - "Whether builtin functions that allow executing native code should be enabled."}; + Setting enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation", R"( + Enable built-in functions that allow executing native code. + + In particular, this adds: + - `builtins.importNative` *path* + + Load a dynamic shared object (DSO) at *path* which exposes a function pointer to a procedure that initialises a Nix language value, and return that value. + The procedure must have the following signature: + ```cpp + extern "C" typedef void (*ValueInitialiser) (EvalState & state, Value & v); + ``` + + The [Nix C++ API documentation](@docroot@/contributing/documentation.md#api-documentation) has more details on evaluator internals. + + - `builtins.exec` *arguments* + + Execute a program, where *arguments* are specified as a list of strings, and parse its output as a Nix expression. + )"}; Setting nixPath{ this, getDefaultNixPath(), "nix-path", diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index d7e3a2cdb..c1dadeee0 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -28,13 +28,13 @@ #include #include #include +#include #include #include #include #include #include #include -#include #include #include diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc index 3c7ed5d8a..e0c5d4512 100644 --- a/src/libexpr/flake/config.cc +++ b/src/libexpr/flake/config.cc @@ -32,7 +32,7 @@ static void writeTrustedList(const TrustedList & trustedList) void ConfigFile::apply() { - std::set whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry", "commit-lockfile-summary"}; + std::set whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry", "commit-lock-file-summary", "commit-lockfile-summary"}; for (auto & [name, value] : settings) { diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index cf10ed84a..ed16a51a1 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -69,13 +69,21 @@ std::string PackageInfo::querySystem() const std::optional PackageInfo::queryDrvPath() const { if (!drvPath && attrs) { - NixStringContext context; - if (auto i = attrs->get(state->sDrvPath)) - drvPath = {state->coerceToStorePath(i->pos, *i->value, context, "while evaluating the 'drvPath' attribute of a derivation")}; - else + if (auto i = attrs->get(state->sDrvPath)) { + NixStringContext context; + auto found = state->coerceToStorePath(i->pos, *i->value, context, "while evaluating the 'drvPath' attribute of a derivation"); + try { + found.requireDerivation(); + } catch (Error & e) { + e.addTrace(state->positions[i->pos], "while evaluating the 'drvPath' attribute of a derivation"); + throw; + } + drvPath = {std::move(found)}; + } else drvPath = {std::nullopt}; } - return drvPath.value_or(std::nullopt); + drvPath.value_or(std::nullopt); + return *drvPath; } diff --git a/src/libexpr/lexer.l b/src/libexpr/lexer.l index ee2b6b807..8c0f9d1f2 100644 --- a/src/libexpr/lexer.l +++ b/src/libexpr/lexer.l @@ -20,8 +20,6 @@ #pragma clang diagnostic ignored "-Wunneeded-internal-declaration" #endif -#include - #include "nixexpr.hh" #include "parser-tab.hh" @@ -129,9 +127,10 @@ or { return OR_KW; } {ID} { yylval->id = {yytext, (size_t) yyleng}; return ID; } {INT} { errno = 0; - try { - yylval->n = boost::lexical_cast(yytext); - } catch (const boost::bad_lexical_cast &) { + std::optional numMay = string2Int(yytext); + if (numMay.has_value()) { + yylval->n = *numMay; + } else { throw ParseError(ErrorInfo{ .msg = HintFmt("invalid integer '%1%'", yytext), .pos = state->positions[CUR_POS], diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index c1e2b0448..44198a252 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -6,6 +6,7 @@ #include "print.hh" #include +#include namespace nix { diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 6b947b40d..7371bd488 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -26,6 +26,7 @@ #include #include +#include #include #ifndef _WIN32 @@ -2261,7 +2262,7 @@ static void addPath( std::string_view name, SourcePath path, Value * filterFun, - FileIngestionMethod method, + ContentAddressMethod method, const std::optional expectedHash, Value & v, const NixStringContext & context) @@ -2293,11 +2294,10 @@ static void addPath( std::optional expectedStorePath; if (expectedHash) - expectedStorePath = state.store->makeFixedOutputPath(name, FixedOutputInfo { - .method = method, - .hash = *expectedHash, - .references = {}, - }); + expectedStorePath = state.store->makeFixedOutputPathFromCA(name, ContentAddressWithReferences::fromParts( + method, + *expectedHash, + {})); if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) { auto dstPath = fetchToStore( @@ -2393,7 +2393,7 @@ static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value std::optional path; std::string name; Value * filterFun = nullptr; - auto method = FileIngestionMethod::Recursive; + ContentAddressMethod method = FileIngestionMethod::Recursive; std::optional expectedHash; NixStringContext context; @@ -2408,7 +2408,9 @@ static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value else if (n == "filter") state.forceFunction(*(filterFun = attr.value), attr.pos, "while evaluating the `filter` parameter passed to builtins.path"); else if (n == "recursive") - method = FileIngestionMethod { state.forceBool(*attr.value, attr.pos, "while evaluating the `recursive` attribute passed to builtins.path") }; + method = state.forceBool(*attr.value, attr.pos, "while evaluating the `recursive` attribute passed to builtins.path") + ? FileIngestionMethod::Recursive + : FileIngestionMethod::Flat; else if (n == "sha256") expectedHash = newHashAllowEmpty(state.forceStringNoCtx(*attr.value, attr.pos, "while evaluating the `sha256` attribute passed to builtins.path"), HashAlgorithm::SHA256); else @@ -4515,7 +4517,7 @@ void EvalState::createBaseEnv() 1683705525 ``` - The [store path](@docroot@/glossary.md#gloss-store-path) of a derivation depending on `currentTime` will differ for each evaluation, unless both evaluate `builtins.currentTime` in the same second. + The [store path](@docroot@/store/store-path.md) of a derivation depending on `currentTime` will differ for each evaluation, unless both evaluate `builtins.currentTime` in the same second. )", .impureOnly = true, }); diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index e27f30512..fa462dc33 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -200,8 +200,8 @@ static RegisterPrimOp primop_fetchTree({ .doc = R"( Fetch a file system tree or a plain file using one of the supported backends and return an attribute set with: - - the resulting fixed-output [store path](@docroot@/glossary.md#gloss-store-path) - - the corresponding [NAR](@docroot@/glossary.md#gloss-nar) hash + - the resulting fixed-output [store path](@docroot@/store/store-path.md) + - the corresponding [NAR](@docroot@/store/file-system-object/content-address.md#serial-nix-archive) hash - backend-specific metadata (currently not documented). *input* must be an attribute set with the following attributes: diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc index 7799a0bbe..920490cfa 100644 --- a/src/libexpr/print.cc +++ b/src/libexpr/print.cc @@ -1,5 +1,6 @@ #include #include +#include #include "print.hh" #include "ansicolor.hh" @@ -271,16 +272,27 @@ private: void printDerivation(Value & v) { - NixStringContext context; - std::string storePath; - if (auto i = v.attrs()->get(state.sDrvPath)) - storePath = state.store->printStorePath(state.coerceToStorePath(i->pos, *i->value, context, "while evaluating the drvPath of a derivation")); + std::optional storePath; + if (auto i = v.attrs()->get(state.sDrvPath)) { + NixStringContext context; + storePath = state.coerceToStorePath(i->pos, *i->value, context, "while evaluating the drvPath of a derivation"); + } + + /* This unforutately breaks printing nested values because of + how the pretty printer is used (when pretting printing and warning + to same terminal / std stream). */ +#if 0 + if (storePath && !storePath->isDerivation()) + warn( + "drvPath attribute '%s' is not a valid store path to a derivation, this value not work properly", + state.store->printStorePath(*storePath)); +#endif if (options.ansiColors) output << ANSI_GREEN; output << "«derivation"; - if (!storePath.empty()) { - output << " " << storePath; + if (storePath) { + output << " " << state.store->printStorePath(*storePath); } output << "»"; if (options.ansiColors) diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 61cf2d310..208cab21d 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -449,7 +449,7 @@ public: return std::string_view(payload.string.c_str); } - const char * const c_str() const + const char * c_str() const { assert(internalType == tString); return payload.string.c_str; diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh index d085f0d82..50cd4d161 100644 --- a/src/libfetchers/fetch-settings.hh +++ b/src/libfetchers/fetch-settings.hh @@ -87,12 +87,12 @@ struct FetchSettings : public Config {}, true, Xp::Flakes}; Setting commitLockFileSummary{ - this, "", "commit-lockfile-summary", + this, "", "commit-lock-file-summary", R"( The commit summary to use when committing changed flake lock files. If empty, the summary is generated based on the action performed. )", - {}, true, Xp::Flakes}; + {"commit-lockfile-summary"}, true, Xp::Flakes}; Setting trustTarballsFromGitForges{ this, true, "trust-tarballs-from-git-forges", diff --git a/src/libfetchers/filtering-source-accessor.cc b/src/libfetchers/filtering-source-accessor.cc index dfd9e536d..d4557b6d4 100644 --- a/src/libfetchers/filtering-source-accessor.cc +++ b/src/libfetchers/filtering-source-accessor.cc @@ -2,6 +2,12 @@ namespace nix { +std::optional FilteringSourceAccessor::getPhysicalPath(const CanonPath & path) +{ + checkAccess(path); + return next->getPhysicalPath(prefix / path); +} + std::string FilteringSourceAccessor::readFile(const CanonPath & path) { checkAccess(path); diff --git a/src/libfetchers/filtering-source-accessor.hh b/src/libfetchers/filtering-source-accessor.hh index 9ec7bc21f..1f8d84e53 100644 --- a/src/libfetchers/filtering-source-accessor.hh +++ b/src/libfetchers/filtering-source-accessor.hh @@ -30,6 +30,8 @@ struct FilteringSourceAccessor : SourceAccessor displayPrefix.clear(); } + std::optional getPhysicalPath(const CanonPath & path) override; + std::string readFile(const CanonPath & path) override; bool pathExists(const CanonPath & path) override; diff --git a/src/libfetchers/unix/git.cc b/src/libfetchers/git.cc similarity index 98% rename from src/libfetchers/unix/git.cc rename to src/libfetchers/git.cc index fa7ef3621..ce80932f6 100644 --- a/src/libfetchers/unix/git.cc +++ b/src/libfetchers/git.cc @@ -19,7 +19,10 @@ #include #include #include -#include + +#ifndef _WIN32 +# include +#endif using namespace std::string_literals; @@ -40,6 +43,7 @@ bool isCacheFileWithinTtl(time_t now, const struct stat & st) bool touchCacheFile(const Path & path, time_t touch_time) { +#ifndef _WIN32 // TODO implement struct timeval times[2]; times[0].tv_sec = touch_time; times[0].tv_usec = 0; @@ -47,6 +51,9 @@ bool touchCacheFile(const Path & path, time_t touch_time) times[1].tv_usec = 0; return lutimes(path.c_str(), times) == 0; +#else + return false; +#endif } Path getCachePath(std::string_view key, bool shallow) @@ -98,7 +105,15 @@ bool storeCachedHead(const std::string & actualUrl, const std::string & headRef) try { runProgram("git", true, { "-C", cacheDir, "--git-dir", ".", "symbolic-ref", "--", "HEAD", headRef }); } catch (ExecError &e) { - if (!WIFEXITED(e.status)) throw; + if ( +#ifndef WIN32 // TODO abstract over exit status handling on Windows + !WIFEXITED(e.status) +#else + e.status != 0 +#endif + ) + throw; + return false; } /* No need to touch refs/HEAD, because `git symbolic-ref` updates the mtime. */ @@ -329,7 +344,13 @@ struct GitInputScheme : InputScheme .program = "git", .args = {"-C", repoInfo.url, "--git-dir", repoInfo.gitDir, "check-ignore", "--quiet", std::string(path.rel())}, }); - auto exitCode = WEXITSTATUS(result.first); + auto exitCode = +#ifndef WIN32 // TODO abstract over exit status handling on Windows + WEXITSTATUS(result.first) +#else + result.first +#endif + ; if (exitCode != 0) { // The path is not `.gitignore`d, we can add the file. diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc index d62a7482e..267e8607f 100644 --- a/src/libfetchers/github.cc +++ b/src/libfetchers/github.cc @@ -433,9 +433,15 @@ struct GitLabInputScheme : GitArchiveInputScheme store->toRealPath( downloadFile(store, url, "source", headers).storePath))); - return RefInfo { - .rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1) - }; + if (json.is_array() && json.size() == 1 && json[0]["id"] != nullptr) { + return RefInfo { + .rev = Hash::parseAny(std::string(json[0]["id"]), HashAlgorithm::SHA1) + }; + } if (json.is_array() && json.size() == 0) { + throw Error("No commits returned by GitLab API -- does the git ref really exist?"); + } else { + throw Error("Unexpected response received from GitLab: %s", json); + } } DownloadUrl getDownloadUrl(const Input & input) const override diff --git a/src/libfetchers/local.mk b/src/libfetchers/local.mk index 0fef1466b..e229a0993 100644 --- a/src/libfetchers/local.mk +++ b/src/libfetchers/local.mk @@ -5,16 +5,10 @@ libfetchers_NAME = libnixfetchers libfetchers_DIR := $(d) libfetchers_SOURCES := $(wildcard $(d)/*.cc) -ifdef HOST_UNIX - libfetchers_SOURCES += $(wildcard $(d)/unix/*.cc) -endif # Not just for this library itself, but also for downstream libraries using this library INCLUDE_libfetchers := -I $(d) -ifdef HOST_UNIX - INCLUDE_libfetchers += -I $(d)/unix -endif libfetchers_CXXFLAGS += $(INCLUDE_libutil) $(INCLUDE_libstore) $(INCLUDE_libfetchers) diff --git a/src/libfetchers/unix/mercurial.cc b/src/libfetchers/mercurial.cc similarity index 100% rename from src/libfetchers/unix/mercurial.cc rename to src/libfetchers/mercurial.cc diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index e19b18505..5de367052 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -145,9 +145,27 @@ DownloadTarballResult downloadTarball( // TODO: fall back to cached value if download fails. + AutoDelete cleanupTemp; + /* Note: if the download is cached, `importTarball()` will receive no data, which causes it to import an empty tarball. */ - TarArchive archive { *source }; + auto archive = + hasSuffix(toLower(parseURL(url).path), ".zip") + ? ({ + /* In streaming mode, libarchive doesn't handle + symlinks in zip files correctly (#10649). So write + the entire file to disk so libarchive can access it + in random-access mode. */ + auto [fdTemp, path] = createTempFile("nix-zipfile"); + cleanupTemp.reset(path); + debug("downloading '%s' into '%s'...", url, path); + { + FdSink sink(fdTemp.get()); + source->drainInto(sink); + } + TarArchive{path}; + }) + : TarArchive{*source}; auto parseSink = getTarballCache()->getFileSystemObjectSink(); auto lastModified = unpackTarfileToSink(archive, *parseSink); @@ -184,7 +202,7 @@ struct CurlInputScheme : InputScheme { const std::set transportUrlSchemes = {"file", "http", "https"}; - const bool hasTarballExtension(std::string_view path) const + bool hasTarballExtension(std::string_view path) const { return hasSuffix(path, ".zip") || hasSuffix(path, ".tar") || hasSuffix(path, ".tgz") || hasSuffix(path, ".tar.gz") diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index ce45eae2b..bb4c52ef7 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/src/libstore-c/nix_api_store.h b/src/libstore-c/nix_api_store.h index 209f91f0d..d3cb8fab8 100644 --- a/src/libstore-c/nix_api_store.h +++ b/src/libstore-c/nix_api_store.h @@ -54,16 +54,20 @@ nix_err nix_libstore_init_no_load_config(nix_c_context * context); nix_err nix_init_plugins(nix_c_context * context); /** - * @brief Open a nix store + * @brief Open a nix store. + * * Store instances may share state and resources behind the scenes. + * * @param[out] context Optional, stores error information - * @param[in] uri URI of the nix store, copied - * @param[in] params optional, array of key-value pairs, {{"endpoint", - * "https://s3.local"}} + * @param[in] uri URI of the Nix store, copied. See [*Store URL format* in the Nix Reference + * Manual](https://nixos.org/manual/nix/stable/store/types/#store-url-format). + * @param[in] params optional, null-terminated array of key-value pairs, e.g. {{"endpoint", + * "https://s3.local"}}. See [*Store Types* in the Nix Reference + * Manual](https://nixos.org/manual/nix/stable/store/types). * @return a Store pointer, NULL in case of errors * @see nix_store_free */ -Store * nix_store_open(nix_c_context *, const char * uri, const char *** params); +Store * nix_store_open(nix_c_context * context, const char * uri, const char *** params); /** * @brief Deallocate a nix store and free any resources if not also held by other Store instances. @@ -155,7 +159,9 @@ nix_err nix_store_realise( /** * @brief get the version of a nix store. + * * If the store doesn't have a version (like the dummy store), returns an empty string. + * * @param[out] context Optional, stores error information * @param[in] store nix store reference * @param[in] callback Called with the version. diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 67d00f364..95a8d5a7a 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include diff --git a/src/libstore/unix/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc similarity index 97% rename from src/libstore/unix/build/derivation-goal.cc rename to src/libstore/build/derivation-goal.cc index 8d6e35015..4226fb61a 100644 --- a/src/libstore/unix/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -1,5 +1,8 @@ #include "derivation-goal.hh" -#include "hook-instance.hh" +#ifndef _WIN32 // TODO enable build hook on Windows +# include "hook-instance.hh" +#endif +#include "processes.hh" #include "worker.hh" #include "builtins.hh" #include "builtins/buildenv.hh" @@ -19,19 +22,8 @@ #include #include -#include -#include -#include -#include #include -#include #include -#include -#include -#include - -#include -#include #include @@ -101,7 +93,9 @@ std::string DerivationGoal::key() void DerivationGoal::killChild() { +#ifndef _WIN32 // TODO enable build hook on Windows hook.reset(); +#endif } @@ -641,9 +635,17 @@ void DerivationGoal::started() buildMode == bmCheck ? "checking outputs of '%s'" : "building '%s'", worker.store.printStorePath(drvPath)); fmt("building '%s'", worker.store.printStorePath(drvPath)); +#ifndef _WIN32 // TODO enable build hook on Windows if (hook) msg += fmt(" on '%s'", machineName); +#endif act = std::make_unique(*logger, lvlInfo, actBuild, msg, - Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", 1, 1}); + Logger::Fields{worker.store.printStorePath(drvPath), +#ifndef _WIN32 // TODO enable build hook on Windows + hook ? machineName : +#endif + "", + 1, + 1}); mcRunningBuilds = std::make_unique>(worker.runningBuilds); worker.updateProgress(); } @@ -778,12 +780,18 @@ static void movePath(const Path & src, const Path & dst) { auto st = lstat(src); - bool changePerm = (geteuid() && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR)); + bool changePerm = ( +#ifndef _WIN32 + geteuid() +#else + !isRootUser() +#endif + && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR)); if (changePerm) chmod_(src, st.st_mode | S_IWUSR); - renameFile(src, dst); + std::filesystem::rename(src, dst); if (changePerm) chmod_(dst, st.st_mode); @@ -796,7 +804,7 @@ void replaceValidPath(const Path & storePath, const Path & tmpPath) tmpPath (the replacement), so we have to move it out of the way first. We'd better not be interrupted here, because if we're repairing (say) Glibc, we end up with a broken system. */ - Path oldPath = fmt("%1%.old-%2%-%3%", storePath, getpid(), random()); + Path oldPath = fmt("%1%.old-%2%-%3%", storePath, getpid(), rand()); if (pathExists(storePath)) movePath(storePath, oldPath); @@ -818,14 +826,20 @@ void replaceValidPath(const Path & storePath, const Path & tmpPath) int DerivationGoal::getChildStatus() { +#ifndef _WIN32 // TODO enable build hook on Windows return hook->pid.kill(); +#else + return 0; +#endif } void DerivationGoal::closeReadPipes() { - hook->builderOut.readSide = -1; - hook->fromHook.readSide = -1; +#ifndef _WIN32 // TODO enable build hook on Windows + hook->builderOut.readSide.close(); + hook->fromHook.readSide.close(); +#endif } @@ -1019,13 +1033,16 @@ void DerivationGoal::buildDone() BuildResult::Status st = BuildResult::MiscFailure; +#ifndef _WIN32 if (hook && WIFEXITED(status) && WEXITSTATUS(status) == 101) st = BuildResult::TimedOut; else if (hook && (!WIFEXITED(status) || WEXITSTATUS(status) != 100)) { } - else { + else +#endif + { assert(derivationType); st = dynamic_cast(&e) ? BuildResult::NotDeterministic : @@ -1112,6 +1129,9 @@ void DerivationGoal::resolvedFinished() HookReply DerivationGoal::tryBuildHook() { +#ifdef _WIN32 // TODO enable build hook on Windows + return rpDecline; +#else if (settings.buildHook.get().empty() || !worker.tryBuildHook || !useDerivation) return rpDecline; if (!worker.hook) @@ -1205,17 +1225,18 @@ HookReply DerivationGoal::tryBuildHook() } hook->sink = FdSink(); - hook->toHook.writeSide = -1; + hook->toHook.writeSide.close(); /* Create the log file and pipe. */ Path logFile = openLogFile(); - std::set fds; + std::set fds; fds.insert(hook->fromHook.readSide.get()); fds.insert(hook->builderOut.readSide.get()); worker.childStarted(shared_from_this(), fds, false, false); return rpAccept; +#endif } @@ -1251,7 +1272,11 @@ Path DerivationGoal::openLogFile() Path logFileName = fmt("%s/%s%s", dir, baseName.substr(2), settings.compressLog ? ".bz2" : ""); - fdLogFile = open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0666); + fdLogFile = toDescriptor(open(logFileName.c_str(), O_CREAT | O_WRONLY | O_TRUNC +#ifndef _WIN32 + | O_CLOEXEC +#endif + , 0666)); if (!fdLogFile) throw SysError("creating log file '%1%'", logFileName); logFileSink = std::make_shared(fdLogFile.get()); @@ -1271,16 +1296,20 @@ void DerivationGoal::closeLogFile() if (logSink2) logSink2->finish(); if (logFileSink) logFileSink->flush(); logSink = logFileSink = 0; - fdLogFile = -1; + fdLogFile.close(); } -bool DerivationGoal::isReadDesc(int fd) +bool DerivationGoal::isReadDesc(Descriptor fd) { +#ifdef _WIN32 // TODO enable build hook on Windows + return false; +#else return fd == hook->builderOut.readSide.get(); +#endif } -void DerivationGoal::handleChildOutput(int fd, std::string_view data) +void DerivationGoal::handleChildOutput(Descriptor fd, std::string_view data) { // local & `ssh://`-builds are dealt with here. auto isWrittenToLog = isReadDesc(fd); @@ -1310,6 +1339,7 @@ void DerivationGoal::handleChildOutput(int fd, std::string_view data) if (logSink) (*logSink)(data); } +#ifndef _WIN32 // TODO enable build hook on Windows if (hook && fd == hook->fromHook.readSide.get()) { for (auto c : data) if (c == '\n') { @@ -1344,10 +1374,11 @@ void DerivationGoal::handleChildOutput(int fd, std::string_view data) } else currentHookLine += c; } +#endif } -void DerivationGoal::handleEOF(int fd) +void DerivationGoal::handleEOF(Descriptor fd) { if (!currentLogLine.empty()) flushLine(); worker.wakeUp(shared_from_this()); diff --git a/src/libstore/unix/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh similarity index 96% rename from src/libstore/unix/build/derivation-goal.hh rename to src/libstore/build/derivation-goal.hh index ddb5ee1e3..04f13aedd 100644 --- a/src/libstore/unix/build/derivation-goal.hh +++ b/src/libstore/build/derivation-goal.hh @@ -2,7 +2,9 @@ ///@file #include "parsed-derivations.hh" -#include "lock.hh" +#ifndef _WIN32 +# include "user-lock.hh" +#endif #include "outputs-spec.hh" #include "store-api.hh" #include "pathlocks.hh" @@ -12,7 +14,9 @@ namespace nix { using std::map; +#ifndef _WIN32 // TODO enable build hook on Windows struct HookInstance; +#endif typedef enum {rpAccept, rpDecline, rpPostpone} HookReply; @@ -178,10 +182,12 @@ struct DerivationGoal : public Goal std::string currentHookLine; +#ifndef _WIN32 // TODO enable build hook on Windows /** * The build hook. */ std::unique_ptr hook; +#endif /** * The sort of derivation we are building. @@ -287,13 +293,13 @@ struct DerivationGoal : public Goal virtual void cleanupPostOutputsRegisteredModeCheck(); virtual void cleanupPostOutputsRegisteredModeNonCheck(); - virtual bool isReadDesc(int fd); + virtual bool isReadDesc(Descriptor fd); /** * Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, std::string_view data) override; - void handleEOF(int fd) override; + void handleChildOutput(Descriptor fd, std::string_view data) override; + void handleEOF(Descriptor fd) override; void flushLine(); /** diff --git a/src/libstore/unix/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc similarity index 93% rename from src/libstore/unix/build/drv-output-substitution-goal.cc rename to src/libstore/build/drv-output-substitution-goal.cc index b30957c84..13a07e4ea 100644 --- a/src/libstore/unix/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -66,7 +66,11 @@ void DrvOutputSubstitutionGoal::tryNext() some other error occurs), so it must not touch `this`. So put the shared state in a separate refcounted object. */ downloadState = std::make_shared(); +#ifndef _WIN32 downloadState->outPipe.create(); +#else + downloadState->outPipe.createAsyncPipe(worker.ioport.get()); +#endif sub->queryRealisation( id, @@ -79,7 +83,13 @@ void DrvOutputSubstitutionGoal::tryNext() } } }); - worker.childStarted(shared_from_this(), {downloadState->outPipe.readSide.get()}, true, false); + worker.childStarted(shared_from_this(), { +#ifndef _WIN32 + downloadState->outPipe.readSide.get() +#else + &downloadState->outPipe +#endif + }, true, false); state = &DrvOutputSubstitutionGoal::realisationFetched; } @@ -158,7 +168,7 @@ void DrvOutputSubstitutionGoal::work() (this->*state)(); } -void DrvOutputSubstitutionGoal::handleEOF(int fd) +void DrvOutputSubstitutionGoal::handleEOF(Descriptor fd) { if (fd == downloadState->outPipe.readSide.get()) worker.wakeUp(shared_from_this()); } diff --git a/src/libstore/unix/build/drv-output-substitution-goal.hh b/src/libstore/build/drv-output-substitution-goal.hh similarity index 94% rename from src/libstore/unix/build/drv-output-substitution-goal.hh rename to src/libstore/build/drv-output-substitution-goal.hh index da2426e5e..6967ca84f 100644 --- a/src/libstore/unix/build/drv-output-substitution-goal.hh +++ b/src/libstore/build/drv-output-substitution-goal.hh @@ -1,11 +1,13 @@ #pragma once ///@file +#include +#include + #include "store-api.hh" #include "goal.hh" #include "realisation.hh" -#include -#include +#include "muxable-pipe.hh" namespace nix { @@ -43,7 +45,7 @@ class DrvOutputSubstitutionGoal : public Goal { struct DownloadState { - Pipe outPipe; + MuxablePipe outPipe; std::promise> promise; }; @@ -71,7 +73,7 @@ public: std::string key() override; void work() override; - void handleEOF(int fd) override; + void handleEOF(Descriptor fd) override; JobCategory jobCategory() const override { return JobCategory::Substitution; diff --git a/src/libstore/unix/build/entry-points.cc b/src/libstore/build/entry-points.cc similarity index 90% rename from src/libstore/unix/build/entry-points.cc rename to src/libstore/build/entry-points.cc index d4bead28e..784f618c1 100644 --- a/src/libstore/unix/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -1,6 +1,8 @@ #include "worker.hh" #include "substitution-goal.hh" -#include "derivation-goal.hh" +#ifndef _WIN32 // TODO Enable building on Windows +# include "derivation-goal.hh" +#endif #include "local-store.hh" namespace nix { @@ -25,9 +27,12 @@ void Store::buildPaths(const std::vector & reqs, BuildMode buildMod ex = std::move(i->ex); } if (i->exitCode != Goal::ecSuccess) { +#ifndef _WIN32 // TODO Enable building on Windows if (auto i2 = dynamic_cast(i.get())) failed.insert(printStorePath(i2->drvPath)); - else if (auto i2 = dynamic_cast(i.get())) + else +#endif + if (auto i2 = dynamic_cast(i.get())) failed.insert(printStorePath(i2->storePath)); } } @@ -74,7 +79,12 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat BuildMode buildMode) { Worker worker(*this, *this); +#ifndef _WIN32 // TODO Enable building on Windows auto goal = worker.makeBasicDerivationGoal(drvPath, drv, OutputsSpec::All {}, buildMode); +#else + std::shared_ptr goal; + throw UnimplementedError("Building derivations not yet implemented on windows."); +#endif try { worker.run(Goals{goal}); diff --git a/src/libstore/unix/build/goal.cc b/src/libstore/build/goal.cc similarity index 100% rename from src/libstore/unix/build/goal.cc rename to src/libstore/build/goal.cc diff --git a/src/libstore/unix/build/goal.hh b/src/libstore/build/goal.hh similarity index 97% rename from src/libstore/unix/build/goal.hh rename to src/libstore/build/goal.hh index 9af083230..0d9b828e1 100644 --- a/src/libstore/unix/build/goal.hh +++ b/src/libstore/build/goal.hh @@ -138,12 +138,12 @@ public: virtual void waiteeDone(GoalPtr waitee, ExitCode result); - virtual void handleChildOutput(int fd, std::string_view data) + virtual void handleChildOutput(Descriptor fd, std::string_view data) { abort(); } - virtual void handleEOF(int fd) + virtual void handleEOF(Descriptor fd) { abort(); } diff --git a/src/libstore/unix/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc similarity index 96% rename from src/libstore/unix/build/substitution-goal.cc rename to src/libstore/build/substitution-goal.cc index c7e8e2825..0be3d1e8d 100644 --- a/src/libstore/unix/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -212,7 +212,11 @@ void PathSubstitutionGoal::tryToRun() maintainRunningSubstitutions = std::make_unique>(worker.runningSubstitutions); worker.updateProgress(); +#ifndef _WIN32 outPipe.create(); +#else + outPipe.createAsyncPipe(worker.ioport.get()); +#endif promise = std::promise(); @@ -235,7 +239,13 @@ void PathSubstitutionGoal::tryToRun() } }); - worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false); + worker.childStarted(shared_from_this(), { +#ifndef _WIN32 + outPipe.readSide.get() +#else + &outPipe +#endif + }, true, false); state = &PathSubstitutionGoal::finished; } @@ -294,12 +304,12 @@ void PathSubstitutionGoal::finished() } -void PathSubstitutionGoal::handleChildOutput(int fd, std::string_view data) +void PathSubstitutionGoal::handleChildOutput(Descriptor fd, std::string_view data) { } -void PathSubstitutionGoal::handleEOF(int fd) +void PathSubstitutionGoal::handleEOF(Descriptor fd) { if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); } diff --git a/src/libstore/unix/build/substitution-goal.hh b/src/libstore/build/substitution-goal.hh similarity index 93% rename from src/libstore/unix/build/substitution-goal.hh rename to src/libstore/build/substitution-goal.hh index 1d389d328..1a051fc1f 100644 --- a/src/libstore/unix/build/substitution-goal.hh +++ b/src/libstore/build/substitution-goal.hh @@ -1,9 +1,9 @@ #pragma once ///@file -#include "lock.hh" #include "store-api.hh" #include "goal.hh" +#include "muxable-pipe.hh" namespace nix { @@ -45,7 +45,7 @@ struct PathSubstitutionGoal : public Goal /** * Pipe for the substituter's standard output. */ - Pipe outPipe; + MuxablePipe outPipe; /** * The substituter thread. @@ -111,8 +111,8 @@ public: /** * Callback used by the worker to write to the log. */ - void handleChildOutput(int fd, std::string_view data) override; - void handleEOF(int fd) override; + void handleChildOutput(Descriptor fd, std::string_view data) override; + void handleEOF(Descriptor fd) override; /* Called by destructor, can't be overridden */ void cleanup() override final; diff --git a/src/libstore/unix/build/worker.cc b/src/libstore/build/worker.cc similarity index 86% rename from src/libstore/unix/build/worker.cc rename to src/libstore/build/worker.cc index 2cca06213..b53dc771a 100644 --- a/src/libstore/unix/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -1,13 +1,15 @@ +#include "local-store.hh" #include "machines.hh" #include "worker.hh" #include "substitution-goal.hh" #include "drv-output-substitution-goal.hh" -#include "local-derivation-goal.hh" -#include "hook-instance.hh" +#include "derivation-goal.hh" +#ifndef _WIN32 // TODO Enable building on Windows +# include "local-derivation-goal.hh" +# include "hook-instance.hh" +#endif #include "signals.hh" -#include - namespace nix { Worker::Worker(Store & store, Store & evalStore) @@ -64,20 +66,27 @@ std::shared_ptr Worker::makeDerivationGoal(const StorePath & drv const OutputsSpec & wantedOutputs, BuildMode buildMode) { return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr { - return !dynamic_cast(&store) - ? std::make_shared(drvPath, wantedOutputs, *this, buildMode) - : std::make_shared(drvPath, wantedOutputs, *this, buildMode); + return +#ifndef _WIN32 // TODO Enable building on Windows + dynamic_cast(&store) + ? std::make_shared(drvPath, wantedOutputs, *this, buildMode) + : +#endif + std::make_shared(drvPath, wantedOutputs, *this, buildMode); }); } - std::shared_ptr Worker::makeBasicDerivationGoal(const StorePath & drvPath, const BasicDerivation & drv, const OutputsSpec & wantedOutputs, BuildMode buildMode) { return makeDerivationGoalCommon(drvPath, wantedOutputs, [&]() -> std::shared_ptr { - return !dynamic_cast(&store) - ? std::make_shared(drvPath, drv, wantedOutputs, *this, buildMode) - : std::make_shared(drvPath, drv, wantedOutputs, *this, buildMode); + return +#ifndef _WIN32 // TODO Enable building on Windows + dynamic_cast(&store) + ? std::make_shared(drvPath, drv, wantedOutputs, *this, buildMode) + : +#endif + std::make_shared(drvPath, drv, wantedOutputs, *this, buildMode); }); } @@ -143,7 +152,8 @@ void Worker::removeGoal(GoalPtr goal) { if (auto drvGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(drvGoal, derivationGoals); - else if (auto subGoal = std::dynamic_pointer_cast(goal)) + else + if (auto subGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(subGoal, substitutionGoals); else if (auto subGoal = std::dynamic_pointer_cast(goal)) nix::removeGoal(subGoal, drvOutputSubstitutionGoals); @@ -187,13 +197,13 @@ unsigned Worker::getNrSubstitutions() } -void Worker::childStarted(GoalPtr goal, const std::set & fds, +void Worker::childStarted(GoalPtr goal, const std::set & channels, bool inBuildSlot, bool respectTimeouts) { Child child; child.goal = goal; child.goal2 = goal.get(); - child.fds = fds; + child.channels = channels; child.timeStarted = child.lastOutput = steady_time_point::clock::now(); child.inBuildSlot = inBuildSlot; child.respectTimeouts = respectTimeouts; @@ -286,7 +296,8 @@ void Worker::run(const Goals & _topGoals) .drvPath = makeConstantStorePathRef(goal->drvPath), .outputs = goal->wantedOutputs, }); - } else if (auto goal = dynamic_cast(i.get())) { + } else + if (auto goal = dynamic_cast(i.get())) { topPaths.push_back(DerivedPath::Opaque{goal->storePath}); } } @@ -408,23 +419,25 @@ void Worker::waitForInput() if (useTimeout) vomit("sleeping %d seconds", timeout); + MuxablePipePollState state; + +#ifndef _WIN32 /* Use select() to wait for the input side of any logger pipe to become `available'. Note that `available' (i.e., non-blocking) includes EOF. */ - std::vector pollStatus; - std::map fdToPollStatus; for (auto & i : children) { - for (auto & j : i.fds) { - pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN }); - fdToPollStatus[j] = pollStatus.size() - 1; + for (auto & j : i.channels) { + state.pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN }); + state.fdToPollStatus[j] = state.pollStatus.size() - 1; } } +#endif - if (poll(pollStatus.data(), pollStatus.size(), - useTimeout ? timeout * 1000 : -1) == -1) { - if (errno == EINTR) return; - throw SysError("waiting for input"); - } + state.poll( +#ifdef _WIN32 + ioport.get(), +#endif + useTimeout ? (std::optional { timeout * 1000 }) : std::nullopt); auto after = steady_time_point::clock::now(); @@ -439,32 +452,18 @@ void Worker::waitForInput() GoalPtr goal = j->goal.lock(); assert(goal); - std::set fds2(j->fds); - std::vector buffer(4096); - for (auto & k : fds2) { - const auto fdPollStatusId = get(fdToPollStatus, k); - assert(fdPollStatusId); - assert(*fdPollStatusId < pollStatus.size()); - if (pollStatus.at(*fdPollStatusId).revents) { - ssize_t rd = ::read(k, buffer.data(), buffer.size()); - // FIXME: is there a cleaner way to handle pt close - // than EIO? Is this even standard? - if (rd == 0 || (rd == -1 && errno == EIO)) { - debug("%1%: got EOF", goal->getName()); - goal->handleEOF(k); - j->fds.erase(k); - } else if (rd == -1) { - if (errno != EINTR) - throw SysError("%s: read failed", goal->getName()); - } else { - printMsg(lvlVomit, "%1%: read %2% bytes", - goal->getName(), rd); - std::string_view data((char *) buffer.data(), rd); - j->lastOutput = after; - goal->handleChildOutput(k, data); - } - } - } + state.iterate( + j->channels, + [&](Descriptor k, std::string_view data) { + printMsg(lvlVomit, "%1%: read %2% bytes", + goal->getName(), data.size()); + j->lastOutput = after; + goal->handleChildOutput(k, data); + }, + [&](Descriptor k) { + debug("%1%: got EOF", goal->getName()); + goal->handleEOF(k); + }); if (goal->exitCode == Goal::ecBusy && 0 != settings.maxSilentTime && diff --git a/src/libstore/unix/build/worker.hh b/src/libstore/build/worker.hh similarity index 95% rename from src/libstore/unix/build/worker.hh rename to src/libstore/build/worker.hh index ced013ddd..7d67030d7 100644 --- a/src/libstore/unix/build/worker.hh +++ b/src/libstore/build/worker.hh @@ -2,10 +2,10 @@ ///@file #include "types.hh" -#include "lock.hh" #include "store-api.hh" #include "goal.hh" #include "realisation.hh" +#include "muxable-pipe.hh" #include #include @@ -36,14 +36,14 @@ typedef std::chrono::time_point steady_time_point; /** * A mapping used to remember for each child process to what goal it - * belongs, and file descriptors for receiving log data and output + * belongs, and comm channels for receiving log data and output * path creation commands. */ struct Child { WeakGoalPtr goal; Goal * goal2; // ugly hackery - std::set fds; + std::set channels; bool respectTimeouts; bool inBuildSlot; /** @@ -53,8 +53,10 @@ struct Child steady_time_point timeStarted; }; +#ifndef _WIN32 // TODO Enable building on Windows /* Forward definition. */ struct HookInstance; +#endif /** * The worker class. @@ -152,10 +154,16 @@ public: */ bool checkMismatch; +#ifdef _WIN32 + AutoCloseFD ioport; +#endif + Store & store; Store & evalStore; +#ifndef _WIN32 // TODO Enable building on Windows std::unique_ptr hook; +#endif uint64_t expectedBuilds = 0; uint64_t doneBuilds = 0; @@ -238,7 +246,7 @@ public: * Registers a running child process. `inBuildSlot` means that * the process counts towards the jobs limit. */ - void childStarted(GoalPtr goal, const std::set & fds, + void childStarted(GoalPtr goal, const std::set & channels, bool inBuildSlot, bool respectTimeouts); /** diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc index 5fcdf6f15..ab35c861d 100644 --- a/src/libstore/builtins/buildenv.cc +++ b/src/libstore/builtins/buildenv.cc @@ -17,10 +17,10 @@ struct State /* For each activated package, create symlinks */ static void createLinks(State & state, const Path & srcDir, const Path & dstDir, int priority) { - std::vector srcFiles; + std::filesystem::directory_iterator srcFiles; try { - srcFiles = readDirectory(srcDir); + srcFiles = std::filesystem::directory_iterator{srcDir}; } catch (std::filesystem::filesystem_error & e) { if (e.code() == std::errc::not_a_directory) { warn("not including '%s' in the user environment because it's not a directory", srcDir); diff --git a/src/libstore/unix/builtins/fetchurl.cc b/src/libstore/builtins/fetchurl.cc similarity index 100% rename from src/libstore/unix/builtins/fetchurl.cc rename to src/libstore/builtins/fetchurl.cc diff --git a/src/libstore/unix/builtins/unpack-channel.cc b/src/libstore/builtins/unpack-channel.cc similarity index 67% rename from src/libstore/unix/builtins/unpack-channel.cc rename to src/libstore/builtins/unpack-channel.cc index 47bf5d49c..a5f2b8e3a 100644 --- a/src/libstore/unix/builtins/unpack-channel.cc +++ b/src/libstore/builtins/unpack-channel.cc @@ -21,10 +21,13 @@ void builtinUnpackChannel( unpackTarfile(src, out); - auto entries = readDirectory(out); - if (entries.size() != 1) + auto entries = std::filesystem::directory_iterator{out}; + auto fileName = entries->path().string(); + auto fileCount = std::distance(std::filesystem::begin(entries), std::filesystem::end(entries)); + + if (fileCount != 1) throw Error("channel tarball '%s' contains more than one file", src); - renameFile(entries[0].path().string(), (out + "/" + channelName)); + std::filesystem::rename(fileName, (out + "/" + channelName)); } } diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 47d6d5541..fe60cb918 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -1,6 +1,7 @@ #include "daemon.hh" #include "signals.hh" #include "worker-protocol.hh" +#include "worker-protocol-connection.hh" #include "worker-protocol-impl.hh" #include "build-result.hh" #include "store-api.hh" @@ -19,6 +20,8 @@ # include "monitor-fd.hh" #endif +#include + namespace nix::daemon { Sink & operator << (Sink & sink, const Logger::Fields & fields) @@ -531,7 +534,7 @@ static void performOp(TunnelLogger * logger, ref store, auto drvs = WorkerProto::Serialise::read(*store, rconn); BuildMode mode = bmNormal; if (GET_PROTOCOL_MINOR(clientVersion) >= 15) { - mode = (BuildMode) readInt(from); + mode = WorkerProto::Serialise::read(*store, rconn); /* Repairing is not atomic, so disallowed for "untrusted" clients. @@ -555,7 +558,7 @@ static void performOp(TunnelLogger * logger, ref store, case WorkerProto::Op::BuildPathsWithResults: { auto drvs = WorkerProto::Serialise::read(*store, rconn); BuildMode mode = bmNormal; - mode = (BuildMode) readInt(from); + mode = WorkerProto::Serialise::read(*store, rconn); /* Repairing is not atomic, so disallowed for "untrusted" clients. @@ -586,7 +589,7 @@ static void performOp(TunnelLogger * logger, ref store, * correctly. */ readDerivation(from, *store, drv, Derivation::nameFromPath(drvPath)); - BuildMode buildMode = (BuildMode) readInt(from); + auto buildMode = WorkerProto::Serialise::read(*store, rconn); logger->startWork(); auto drvType = drv.type(); @@ -1026,11 +1029,9 @@ void processConnection( #endif /* Exchange the greeting. */ - unsigned int magic = readInt(from); - if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch"); - to << WORKER_MAGIC_2 << PROTOCOL_VERSION; - to.flush(); - WorkerProto::Version clientVersion = readInt(from); + WorkerProto::Version clientVersion = + WorkerProto::BasicServerConnection::handshake( + to, from, PROTOCOL_VERSION); if (clientVersion < 0x10a) throw Error("the Nix client version is too old"); @@ -1048,29 +1049,20 @@ void processConnection( printMsgUsing(prevLogger, lvlDebug, "%d operations", opCount); }); - if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from)) { - // Obsolete CPU affinity. - readInt(from); - } + WorkerProto::BasicServerConnection conn { + .to = to, + .from = from, + .clientVersion = clientVersion, + }; - if (GET_PROTOCOL_MINOR(clientVersion) >= 11) - readInt(from); // obsolete reserveSpace - - if (GET_PROTOCOL_MINOR(clientVersion) >= 33) - to << nixVersion; - - if (GET_PROTOCOL_MINOR(clientVersion) >= 35) { + conn.postHandshake(*store, { + .daemonNixVersion = nixVersion, // We and the underlying store both need to trust the client for // it to be trusted. - auto temp = trusted + .remoteTrustsUs = trusted ? store->isTrustedClient() - : std::optional { NotTrusted }; - WorkerProto::WriteConn wconn { - .to = to, - .version = clientVersion, - }; - WorkerProto::write(*store, wconn, temp); - } + : std::optional { NotTrusted }, + }); /* Send startup error messages to the client. */ tunnelLogger->startWork(); diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index fcf813a37..869880112 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -930,10 +930,9 @@ DerivationOutputsAndOptPaths BasicDerivation::outputsAndOptPaths(const StoreDirC std::string_view BasicDerivation::nameFromPath(const StorePath & drvPath) { + drvPath.requireDerivation(); auto nameWithSuffix = drvPath.name(); - constexpr std::string_view extension = ".drv"; - assert(hasSuffix(nameWithSuffix, extension)); - nameWithSuffix.remove_suffix(extension.size()); + nameWithSuffix.remove_suffix(drvExtension.size()); return nameWithSuffix; } @@ -1216,16 +1215,19 @@ nlohmann::json DerivationOutput::toJSON( }, [&](const DerivationOutput::CAFixed & dof) { res["path"] = store.printStorePath(dof.path(store, drvName, outputName)); - res["hashAlgo"] = dof.ca.printMethodAlgo(); + res["method"] = std::string { dof.ca.method.render() }; + res["hashAlgo"] = printHashAlgo(dof.ca.hash.algo); res["hash"] = dof.ca.hash.to_string(HashFormat::Base16, false); // FIXME print refs? }, [&](const DerivationOutput::CAFloating & dof) { - res["hashAlgo"] = std::string { dof.method.renderPrefix() } + printHashAlgo(dof.hashAlgo); + res["method"] = std::string { dof.method.render() }; + res["hashAlgo"] = printHashAlgo(dof.hashAlgo); }, [&](const DerivationOutput::Deferred &) {}, [&](const DerivationOutput::Impure & doi) { - res["hashAlgo"] = std::string { doi.method.renderPrefix() } + printHashAlgo(doi.hashAlgo); + res["method"] = std::string { doi.method.render() }; + res["hashAlgo"] = printHashAlgo(doi.hashAlgo); res["impure"] = true; }, }, raw); @@ -1245,12 +1247,13 @@ DerivationOutput DerivationOutput::fromJSON( keys.insert(key); auto methodAlgo = [&]() -> std::pair { - auto & str = getString(valueAt(json, "hashAlgo")); - std::string_view s = str; - ContentAddressMethod method = ContentAddressMethod::parsePrefix(s); + auto & method_ = getString(valueAt(json, "method")); + ContentAddressMethod method = ContentAddressMethod::parse(method_); if (method == TextIngestionMethod {}) xpSettings.require(Xp::DynamicDerivations); - auto hashAlgo = parseHashAlgo(s); + + auto & hashAlgo_ = getString(valueAt(json, "hashAlgo")); + auto hashAlgo = parseHashAlgo(hashAlgo_); return { std::move(method), std::move(hashAlgo) }; }; @@ -1260,7 +1263,7 @@ DerivationOutput DerivationOutput::fromJSON( }; } - else if (keys == (std::set { "path", "hashAlgo", "hash" })) { + else if (keys == (std::set { "path", "method", "hashAlgo", "hash" })) { auto [method, hashAlgo] = methodAlgo(); auto dof = DerivationOutput::CAFixed { .ca = ContentAddress { @@ -1273,7 +1276,7 @@ DerivationOutput DerivationOutput::fromJSON( return dof; } - else if (keys == (std::set { "hashAlgo" })) { + else if (keys == (std::set { "method", "hashAlgo" })) { xpSettings.require(Xp::CaDerivations); auto [method, hashAlgo] = methodAlgo(); return DerivationOutput::CAFloating { @@ -1286,7 +1289,7 @@ DerivationOutput DerivationOutput::fromJSON( return DerivationOutput::Deferred {}; } - else if (keys == (std::set { "hashAlgo", "impure" })) { + else if (keys == (std::set { "method", "hashAlgo", "impure" })) { xpSettings.require(Xp::ImpureDerivations); auto [method, hashAlgo] = methodAlgo(); return DerivationOutput::Impure { diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index 30f23cff9..0d5d03091 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -18,9 +18,12 @@ struct DummyStoreConfig : virtual StoreConfig { struct DummyStore : public virtual DummyStoreConfig, public virtual Store { - DummyStore(const std::string scheme, const std::string uri, const Params & params) + DummyStore(std::string_view scheme, std::string_view authority, const Params & params) : DummyStore(params) - { } + { + if (!authority.empty()) + throw UsageError("`%s` store URIs must not contain an authority part %s", scheme, authority); + } DummyStore(const Params & params) : StoreConfig(params) diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index 219b60c44..a54ebdcf3 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -580,7 +580,12 @@ struct curlFileTransfer : public FileTransfer #endif #if __linux__ - unshareFilesystem(); + try { + tryUnshareFilesystem(); + } catch (nix::Error & e) { + e.addTrace({}, "in download thread"); + throw; + } #endif std::map> items; diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 3cd4fb839..d3fa88f59 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -161,7 +161,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor) { /* Read the `temproots' directory for per-process temporary root files. */ - for (auto & i : readDirectory(tempRootsDir)) { + for (auto & i : std::filesystem::directory_iterator{tempRootsDir}) { auto name = i.path().filename().string(); if (name[0] == '.') { // Ignore hidden files. Some package managers (notably portage) create @@ -225,10 +225,10 @@ void LocalStore::findRoots(const Path & path, std::filesystem::file_type type, R try { if (type == std::filesystem::file_type::unknown) - type = getFileType(path); + type = std::filesystem::symlink_status(path).type(); if (type == std::filesystem::file_type::directory) { - for (auto & i : readDirectory(path)) + for (auto & i : std::filesystem::directory_iterator{path}) findRoots(i.path().string(), i.symlink_status().type(), roots); } @@ -781,7 +781,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) throw Error( "Cannot delete path '%1%' since it is still alive. " "To find out why, use: " - "nix-store --query --roots", + "nix-store --query --roots and nix-store --query --referrers", printStorePath(i)); } diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 4df2880e6..d9cab2fb8 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -345,7 +345,7 @@ void initPlugins() for (const auto & pluginFile : settings.pluginFiles.get()) { std::vector pluginFiles; try { - auto ents = readDirectory(pluginFile); + auto ents = std::filesystem::directory_iterator{pluginFile}; for (const auto & ent : ents) pluginFiles.emplace_back(ent.path()); } catch (std::filesystem::filesystem_error & e) { diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index dc53a07f1..1f3548497 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -910,7 +910,7 @@ public: "substituters", R"( A list of [URLs of Nix stores](@docroot@/store/types/index.md#store-url-format) to be used as substituters, separated by whitespace. - A substituter is an additional [store](@docroot@/glossary.md#gloss-store) from which Nix can obtain [store objects](@docroot@/glossary.md#gloss-store-object) instead of building them. + A substituter is an additional [store](@docroot@/glossary.md#gloss-store) from which Nix can obtain [store objects](@docroot@/store/store-object.md) instead of building them. Substituters are tried based on their priority value, which each substituter can set independently. Lower value means higher priority. diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 5da87e935..3328caef9 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -39,15 +39,20 @@ private: public: HttpBinaryCacheStore( - const std::string & scheme, - const Path & _cacheUri, + std::string_view scheme, + PathView _cacheUri, const Params & params) : StoreConfig(params) , BinaryCacheStoreConfig(params) , HttpBinaryCacheStoreConfig(params) , Store(params) , BinaryCacheStore(params) - , cacheUri(scheme + "://" + _cacheUri) + , cacheUri( + std::string { scheme } + + "://" + + (!_cacheUri.empty() + ? _cacheUri + : throw UsageError("`%s` Store requires a non-empty authority in Store URL", scheme))) { while (!cacheUri.empty() && cacheUri.back() == '/') cacheUri.pop_back(); diff --git a/src/libstore/indirect-root-store.cc b/src/libstore/indirect-root-store.cc index 082a458ab..844d0d6ed 100644 --- a/src/libstore/indirect-root-store.cc +++ b/src/libstore/indirect-root-store.cc @@ -12,7 +12,7 @@ void IndirectRootStore::makeSymlink(const Path & link, const Path & target) createSymlink(target, tempLink); /* Atomically replace the old one. */ - renameFile(tempLink, link); + std::filesystem::rename(tempLink, link); } Path IndirectRootStore::addPermRoot(const StorePath & storePath, const Path & _gcRoot) diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index e422adeec..9664b126e 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -4,6 +4,7 @@ #include "pool.hh" #include "remote-store.hh" #include "serve-protocol.hh" +#include "serve-protocol-connection.hh" #include "serve-protocol-impl.hh" #include "build-result.hh" #include "store-api.hh" @@ -28,26 +29,23 @@ struct LegacySSHStore::Connection : public ServeProto::BasicClientConnection bool good = true; }; - -LegacySSHStore::LegacySSHStore(const std::string & scheme, const std::string & host, const Params & params) +LegacySSHStore::LegacySSHStore( + std::string_view scheme, + std::string_view host, + const Params & params) : StoreConfig(params) - , CommonSSHStoreConfig(params) + , CommonSSHStoreConfig(scheme, host, params) , LegacySSHStoreConfig(params) , Store(params) - , host(host) , connections(make_ref>( std::max(1, (int) maxConnections), [this]() { return openConnection(); }, [](const ref & r) { return r->good; } )) - , master( - host, - sshKey, - sshPublicHostKey, + , master(createSSHMaster( // Use SSH master only if using more than 1 connection. connections->capacity() > 1, - compress, - logFD) + logFD)) { } @@ -76,7 +74,7 @@ ref LegacySSHStore::openConnection() conn->sshConn->in.close(); { NullSink nullSink; - conn->from.drainInto(nullSink); + tee.drainInto(nullSink); } throw Error("'nix-store --serve' protocol mismatch from '%s', got '%s'", host, chomp(saved.s)); @@ -105,24 +103,26 @@ void LegacySSHStore::queryPathInfoUncached(const StorePath & path, debug("querying remote host '%s' for info on '%s'", host, printStorePath(path)); - conn->to << ServeProto::Command::QueryPathInfos << PathSet{printStorePath(path)}; - conn->to.flush(); + auto infos = conn->queryPathInfos(*this, {path}); - auto p = readString(conn->from); - if (p.empty()) return callback(nullptr); - auto path2 = parseStorePath(p); - assert(path == path2); - auto info = std::make_shared( - path, - ServeProto::Serialise::read(*this, *conn)); + switch (infos.size()) { + case 0: + return callback(nullptr); + case 1: { + auto & [path2, info] = *infos.begin(); - if (info->narHash == Hash::dummy) - throw Error("NAR hash is now mandatory"); + if (info.narHash == Hash::dummy) + throw Error("NAR hash is now mandatory"); - auto s = readString(conn->from); - assert(s == ""); - - callback(std::move(info)); + assert(path == path2); + return callback(std::make_shared( + std::move(path), + std::move(info) + )); + } + default: + throw Error("More path infos returned than queried"); + } } catch (...) { callback.rethrow(); } } @@ -156,41 +156,38 @@ void LegacySSHStore::addToStore(const ValidPathInfo & info, Source & source, } conn->to.flush(); + if (readInt(conn->from) != 1) + throw Error("failed to add path '%s' to remote host '%s'", printStorePath(info.path), host); + } else { - conn->to - << ServeProto::Command::ImportPaths - << 1; - try { - copyNAR(source, conn->to); - } catch (...) { - conn->good = false; - throw; - } - conn->to - << exportMagic - << printStorePath(info.path); - ServeProto::write(*this, *conn, info.references); - conn->to - << (info.deriver ? printStorePath(*info.deriver) : "") - << 0 - << 0; - conn->to.flush(); + conn->importPaths(*this, [&](Sink & sink) { + try { + copyNAR(source, sink); + } catch (...) { + conn->good = false; + throw; + } + sink + << exportMagic + << printStorePath(info.path); + ServeProto::write(*this, *conn, info.references); + sink + << (info.deriver ? printStorePath(*info.deriver) : "") + << 0 + << 0; + }); } - - if (readInt(conn->from) != 1) - throw Error("failed to add path '%s' to remote host '%s'", printStorePath(info.path), host); } void LegacySSHStore::narFromPath(const StorePath & path, Sink & sink) { auto conn(connections->get()); - - conn->to << ServeProto::Command::DumpStorePath << printStorePath(path); - conn->to.flush(); - copyNAR(conn->from, sink); + conn->narFromPath(*this, path, [&](auto & source) { + copyNAR(source, sink); + }); } @@ -214,7 +211,7 @@ BuildResult LegacySSHStore::buildDerivation(const StorePath & drvPath, const Bas conn->putBuildDerivationRequest(*this, drvPath, drv, buildSettings()); - return ServeProto::Serialise::read(*this, *conn); + return conn->getBuildDerivationResponse(*this); } diff --git a/src/libstore/legacy-ssh-store.hh b/src/libstore/legacy-ssh-store.hh index 343823693..b683ed580 100644 --- a/src/libstore/legacy-ssh-store.hh +++ b/src/libstore/legacy-ssh-store.hh @@ -26,22 +26,27 @@ struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store { +#ifndef _WIN32 // Hack for getting remote build log output. // Intentionally not in `LegacySSHStoreConfig` so that it doesn't appear in // the documentation - const Setting logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"}; + const Setting logFD{this, INVALID_DESCRIPTOR, "log-fd", "file descriptor to which SSH's stderr is connected"}; +#else + Descriptor logFD = INVALID_DESCRIPTOR; +#endif struct Connection; - std::string host; - ref> connections; SSHMaster master; static std::set uriSchemes() { return {"ssh"}; } - LegacySSHStore(const std::string & scheme, const std::string & host, const Params & params); + LegacySSHStore( + std::string_view scheme, + std::string_view host, + const Params & params); ref openConnection(); diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index 3a48f4480..3e25ab8a4 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -28,9 +28,13 @@ private: public: + /** + * @param binaryCacheDir `file://` is a short-hand for `file:///` + * for now. + */ LocalBinaryCacheStore( - const std::string scheme, - const Path & binaryCacheDir, + std::string_view scheme, + PathView binaryCacheDir, const Params & params) : StoreConfig(params) , BinaryCacheStoreConfig(params) @@ -64,7 +68,7 @@ protected: AutoDelete del(tmp, false); StreamToSourceAdapter source(istream); writeFile(tmp, source); - renameFile(tmp, path2); + std::filesystem::rename(tmp, path2); del.cancel(); } @@ -83,7 +87,7 @@ protected: { StorePathSet paths; - for (auto & entry : readDirectory(binaryCacheDir)) { + for (auto & entry : std::filesystem::directory_iterator{binaryCacheDir}) { auto name = entry.path().filename().string(); if (name.size() != 40 || !hasSuffix(name, ".narinfo")) diff --git a/src/libstore/unix/local-overlay-store.cc b/src/libstore/local-overlay-store.cc similarity index 100% rename from src/libstore/unix/local-overlay-store.cc rename to src/libstore/local-overlay-store.cc diff --git a/src/libstore/unix/local-overlay-store.hh b/src/libstore/local-overlay-store.hh similarity index 98% rename from src/libstore/unix/local-overlay-store.hh rename to src/libstore/local-overlay-store.hh index 2c24285dd..35a301013 100644 --- a/src/libstore/unix/local-overlay-store.hh +++ b/src/libstore/local-overlay-store.hh @@ -92,7 +92,7 @@ class LocalOverlayStore : public virtual LocalOverlayStoreConfig, public virtual public: LocalOverlayStore(const Params & params); - LocalOverlayStore(std::string scheme, std::string path, const Params & params) + LocalOverlayStore(std::string_view scheme, PathView path, const Params & params) : LocalOverlayStore(params) { if (!path.empty()) diff --git a/src/libstore/unix/local-overlay-store.md b/src/libstore/local-overlay-store.md similarity index 100% rename from src/libstore/unix/local-overlay-store.md rename to src/libstore/local-overlay-store.md diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 33c4d7372..0c333bc31 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -463,10 +463,20 @@ LocalStore::LocalStore(const Params & params) } -LocalStore::LocalStore(std::string scheme, std::string path, const Params & params) - : LocalStore(params) +LocalStore::LocalStore( + std::string_view scheme, + PathView path, + const Params & _params) + : LocalStore([&]{ + // Default `?root` from `path` if non set + if (!path.empty() && _params.count("root") == 0) { + auto params = _params; + params.insert_or_assign("root", std::string { path }); + return params; + } + return _params; + }()) { - throw UnimplementedError("LocalStore"); } @@ -1406,7 +1416,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) printInfo("checking link hashes..."); - for (auto & link : readDirectory(linksDir)) { + for (auto & link : std::filesystem::directory_iterator{linksDir}) { auto name = link.path().filename(); printMsg(lvlTalkative, "checking contents of '%s'", name); PosixSourceAccessor accessor; @@ -1498,7 +1508,7 @@ LocalStore::VerificationResult LocalStore::verifyAllValidPaths(RepairFlag repair database and the filesystem) in the loop below, in order to catch invalid states. */ - for (auto & i : readDirectory(realStoreDir)) { + for (auto & i : std::filesystem::directory_iterator{realStoreDir.to_string()}) { try { storePathsInStoreDir.insert({i.path().filename().string()}); } catch (BadStorePath &) { } @@ -1779,7 +1789,7 @@ void LocalStore::addBuildLog(const StorePath & drvPath, std::string_view log) writeFile(tmpFile, compress("bzip2", log)); - renameFile(tmpFile, logPath); + std::filesystem::rename(tmpFile, logPath); } std::optional LocalStore::getVersion() diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index b3d7bd6d0..b0a0def9a 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -137,12 +137,15 @@ public: * necessary. */ LocalStore(const Params & params); - LocalStore(std::string scheme, std::string path, const Params & params); + LocalStore( + std::string_view scheme, + PathView path, + const Params & params); ~LocalStore(); static std::set uriSchemes() - { return {}; } + { return {"local"}; } /** * Implementations of abstract store API methods. diff --git a/src/libstore/local.mk b/src/libstore/local.mk index cc67da786..5dc8f3370 100644 --- a/src/libstore/local.mk +++ b/src/libstore/local.mk @@ -4,9 +4,9 @@ libstore_NAME = libnixstore libstore_DIR := $(d) -libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc) +libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc) ifdef HOST_UNIX - libstore_SOURCES += $(wildcard $(d)/unix/*.cc $(d)/unix/builtins/*.cc $(d)/unix/build/*.cc) + libstore_SOURCES += $(wildcard $(d)/unix/*.cc $(d)/unix/build/*.cc) endif ifdef HOST_LINUX libstore_SOURCES += $(wildcard $(d)/linux/*.cc) @@ -43,7 +43,7 @@ endif INCLUDE_libstore := -I $(d) -I $(d)/build ifdef HOST_UNIX - INCLUDE_libstore += -I $(d)/unix + INCLUDE_libstore += -I $(d)/unix -I $(d)/unix/build endif ifdef HOST_LINUX INCLUDE_libstore += -I $(d)/linux diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc index 2d461c63a..256cf9188 100644 --- a/src/libstore/machines.cc +++ b/src/libstore/machines.cc @@ -6,7 +6,8 @@ namespace nix { -Machine::Machine(decltype(storeUri) storeUri, +Machine::Machine( + const std::string & storeUri, decltype(systemTypes) systemTypes, decltype(sshKey) sshKey, decltype(maxJobs) maxJobs, @@ -14,7 +15,7 @@ Machine::Machine(decltype(storeUri) storeUri, decltype(supportedFeatures) supportedFeatures, decltype(mandatoryFeatures) mandatoryFeatures, decltype(sshPublicHostKey) sshPublicHostKey) : - storeUri( + storeUri(StoreReference::parse( // Backwards compatibility: if the URI is schemeless, is not a path, // and is not one of the special store connection words, prepend // ssh://. @@ -28,7 +29,7 @@ Machine::Machine(decltype(storeUri) storeUri, || hasPrefix(storeUri, "local?") || hasPrefix(storeUri, "?") ? storeUri - : "ssh://" + storeUri), + : "ssh://" + storeUri)), systemTypes(systemTypes), sshKey(sshKey), maxJobs(maxJobs), @@ -63,23 +64,26 @@ bool Machine::mandatoryMet(const std::set & features) const }); } -ref Machine::openStore() const +StoreReference Machine::completeStoreReference() const { - Store::Params storeParams; - if (hasPrefix(storeUri, "ssh://")) { - storeParams["max-connections"] = "1"; - storeParams["log-fd"] = "4"; + auto storeUri = this->storeUri; + + auto * generic = std::get_if(&storeUri.variant); + + if (generic && generic->scheme == "ssh") { + storeUri.params["max-connections"] = "1"; + storeUri.params["log-fd"] = "4"; } - if (hasPrefix(storeUri, "ssh://") || hasPrefix(storeUri, "ssh-ng://")) { + if (generic && (generic->scheme == "ssh" || generic->scheme == "ssh-ng")) { if (sshKey != "") - storeParams["ssh-key"] = sshKey; + storeUri.params["ssh-key"] = sshKey; if (sshPublicHostKey != "") - storeParams["base64-ssh-public-host-key"] = sshPublicHostKey; + storeUri.params["base64-ssh-public-host-key"] = sshPublicHostKey; } { - auto & fs = storeParams["system-features"]; + auto & fs = storeUri.params["system-features"]; auto append = [&](auto feats) { for (auto & f : feats) { if (fs.size() > 0) fs += ' '; @@ -90,7 +94,12 @@ ref Machine::openStore() const append(mandatoryFeatures); } - return nix::openStore(storeUri, storeParams); + return storeUri; +} + +ref Machine::openStore() const +{ + return nix::openStore(completeStoreReference()); } static std::vector expandBuilderLines(const std::string & builders) @@ -122,7 +131,7 @@ static std::vector expandBuilderLines(const std::string & builders) return result; } -static Machine parseBuilderLine(const std::string & line) +static Machine parseBuilderLine(const std::set & defaultSystems, const std::string & line) { const auto tokens = tokenizeString>(line); @@ -139,7 +148,7 @@ static Machine parseBuilderLine(const std::string & line) }; auto parseFloatField = [&](size_t fieldIndex) { - const auto result = string2Int(tokens[fieldIndex]); + const auto result = string2Float(tokens[fieldIndex]); if (!result) { throw FormatError("bad machine specification: failed to convert column #%lu in a row: '%s' to 'float'", fieldIndex, line); } @@ -159,29 +168,46 @@ static Machine parseBuilderLine(const std::string & line) if (!isSet(0)) throw FormatError("bad machine specification: store URL was not found at the first column of a row: '%s'", line); + // TODO use designated initializers, once C++ supports those with + // custom constructors. return { + // `storeUri` tokens[0], - isSet(1) ? tokenizeString>(tokens[1], ",") : std::set{settings.thisSystem}, + // `systemTypes` + isSet(1) ? tokenizeString>(tokens[1], ",") : defaultSystems, + // `sshKey` isSet(2) ? tokens[2] : "", + // `maxJobs` isSet(3) ? parseUnsignedIntField(3) : 1U, + // `speedFactor` isSet(4) ? parseFloatField(4) : 1.0f, + // `supportedFeatures` isSet(5) ? tokenizeString>(tokens[5], ",") : std::set{}, + // `mandatoryFeatures` isSet(6) ? tokenizeString>(tokens[6], ",") : std::set{}, + // `sshPublicHostKey` isSet(7) ? ensureBase64(7) : "" }; } -static Machines parseBuilderLines(const std::vector & builders) +static Machines parseBuilderLines(const std::set & defaultSystems, const std::vector & builders) { Machines result; - std::transform(builders.begin(), builders.end(), std::back_inserter(result), parseBuilderLine); + std::transform( + builders.begin(), builders.end(), std::back_inserter(result), + [&](auto && line) { return parseBuilderLine(defaultSystems, line); }); return result; } +Machines Machine::parseConfig(const std::set & defaultSystems, const std::string & s) +{ + const auto builderLines = expandBuilderLines(s); + return parseBuilderLines(defaultSystems, builderLines); +} + Machines getMachines() { - const auto builderLines = expandBuilderLines(settings.builders); - return parseBuilderLines(builderLines); + return Machine::parseConfig({settings.thisSystem}, settings.builders); } } diff --git a/src/libstore/machines.hh b/src/libstore/machines.hh index 8516409d4..2a55c4456 100644 --- a/src/libstore/machines.hh +++ b/src/libstore/machines.hh @@ -2,14 +2,19 @@ ///@file #include "types.hh" +#include "store-reference.hh" namespace nix { class Store; +struct Machine; + +typedef std::vector Machines; + struct Machine { - const std::string storeUri; + const StoreReference storeUri; const std::set systemTypes; const std::string sshKey; const unsigned int maxJobs; @@ -36,7 +41,8 @@ struct Machine { */ bool mandatoryMet(const std::set & features) const; - Machine(decltype(storeUri) storeUri, + Machine( + const std::string & storeUri, decltype(systemTypes) systemTypes, decltype(sshKey) sshKey, decltype(maxJobs) maxJobs, @@ -45,13 +51,38 @@ struct Machine { decltype(mandatoryFeatures) mandatoryFeatures, decltype(sshPublicHostKey) sshPublicHostKey); + /** + * Elaborate `storeUri` into a complete store reference, + * incorporating information from the other fields of the `Machine` + * as applicable. + */ + StoreReference completeStoreReference() const; + + /** + * Open a `Store` for this machine. + * + * Just a simple function composition: + * ```c++ + * nix::openStore(completeStoreReference()) + * ``` + */ ref openStore() const; + + /** + * Parse a machine configuration. + * + * Every machine is specified on its own line, and lines beginning + * with `@` are interpreted as paths to other configuration files in + * the same format. + */ + static Machines parseConfig(const std::set & defaultSystems, const std::string & config); }; -typedef std::vector Machines; - -void parseMachines(const std::string & s, Machines & machines); - +/** + * Parse machines from the global config + * + * @todo Remove, globals are bad. + */ Machines getMachines(); } diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index a29281953..d8459d4d7 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -135,18 +135,37 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); /** * Write a JSON representation of store object metadata, such as the * hash and the references. + * + * @note Do *not* use `ValidPathInfo::toJSON` because this function is + * subject to stronger stability requirements since it is used to + * prepare build environments. Perhaps someday we'll have a versionining + * mechanism to allow this to evolve again and get back in sync, but for + * now we must not change - not even extend - the behavior. */ static nlohmann::json pathInfoToJSON( Store & store, const StorePathSet & storePaths) { - nlohmann::json::array_t jsonList = nlohmann::json::array(); + using nlohmann::json; + + nlohmann::json::array_t jsonList = json::array(); for (auto & storePath : storePaths) { auto info = store.queryPathInfo(storePath); - auto & jsonPath = jsonList.emplace_back( - info->toJSON(store, false, HashFormat::Nix32)); + auto & jsonPath = jsonList.emplace_back(json::object()); + + jsonPath["narHash"] = info->narHash.to_string(HashFormat::Nix32, true); + jsonPath["narSize"] = info->narSize; + + { + auto & jsonRefs = jsonPath["references"] = json::array(); + for (auto & ref : info->references) + jsonRefs.emplace_back(store.printStorePath(ref)); + } + + if (info->ca) + jsonPath["ca"] = renderContentAddress(info->ca); // Add the path to the object whose metadata we are including. jsonPath["path"] = store.printStorePath(storePath); diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index 6523cb425..ddd7f50d9 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -161,28 +161,23 @@ nlohmann::json UnkeyedValidPathInfo::toJSON( jsonObject["narSize"] = narSize; { - auto& jsonRefs = (jsonObject["references"] = json::array()); + auto & jsonRefs = jsonObject["references"] = json::array(); for (auto & ref : references) jsonRefs.emplace_back(store.printStorePath(ref)); } - if (ca) - jsonObject["ca"] = renderContentAddress(ca); + jsonObject["ca"] = ca ? (std::optional { renderContentAddress(*ca) }) : std::nullopt; if (includeImpureInfo) { - if (deriver) - jsonObject["deriver"] = store.printStorePath(*deriver); + jsonObject["deriver"] = deriver ? (std::optional { store.printStorePath(*deriver) }) : std::nullopt; - if (registrationTime) - jsonObject["registrationTime"] = registrationTime; + jsonObject["registrationTime"] = registrationTime ? (std::optional { registrationTime }) : std::nullopt; - if (ultimate) - jsonObject["ultimate"] = ultimate; + jsonObject["ultimate"] = ultimate; - if (!sigs.empty()) { - for (auto & sig : sigs) - jsonObject["signatures"].push_back(sig); - } + auto & sigsObj = jsonObject["signatures"] = json::array(); + for (auto & sig : sigs) + sigsObj.push_back(sig); } return jsonObject; @@ -210,20 +205,25 @@ UnkeyedValidPathInfo UnkeyedValidPathInfo::fromJSON( throw; } + // New format as this as nullable but mandatory field; handling + // missing is for back-compat. if (json.contains("ca")) - res.ca = ContentAddress::parse(getString(valueAt(json, "ca"))); + if (auto * rawCa = getNullable(valueAt(json, "ca"))) + res.ca = ContentAddress::parse(getString(*rawCa)); if (json.contains("deriver")) - res.deriver = store.parseStorePath(getString(valueAt(json, "deriver"))); + if (auto * rawDeriver = getNullable(valueAt(json, "deriver"))) + res.deriver = store.parseStorePath(getString(*rawDeriver)); if (json.contains("registrationTime")) - res.registrationTime = getInteger(valueAt(json, "registrationTime")); + if (auto * rawRegistrationTime = getNullable(valueAt(json, "registrationTime"))) + res.registrationTime = getInteger(*rawRegistrationTime); if (json.contains("ultimate")) res.ultimate = getBoolean(valueAt(json, "ultimate")); if (json.contains("signatures")) - res.sigs = valueAt(json, "signatures"); + res.sigs = getStringSet(valueAt(json, "signatures")); return res; } diff --git a/src/libstore/path.cc b/src/libstore/path.cc index 4b806e408..8d9726722 100644 --- a/src/libstore/path.cc +++ b/src/libstore/path.cc @@ -49,11 +49,17 @@ StorePath::StorePath(const Hash & hash, std::string_view _name) checkName(baseName, name()); } -bool StorePath::isDerivation() const +bool StorePath::isDerivation() const noexcept { return hasSuffix(name(), drvExtension); } +void StorePath::requireDerivation() const +{ + if (!isDerivation()) + throw FormatError("store path '%s' is not a valid derivation path", to_string()); +} + StorePath StorePath::dummy("ffffffffffffffffffffffffffffffff-x"); StorePath StorePath::random(std::string_view name) diff --git a/src/libstore/path.hh b/src/libstore/path.hh index 4ca6747b3..4abbfcd7c 100644 --- a/src/libstore/path.hh +++ b/src/libstore/path.hh @@ -13,7 +13,7 @@ struct Hash; * \ref StorePath "Store path" is the fundamental reference type of Nix. * A store paths refers to a Store object. * - * See glossary.html#gloss-store-path for more information on a + * See store/store-path.html for more information on a * conceptual level. */ class StorePath @@ -35,30 +35,23 @@ public: StorePath(const Hash & hash, std::string_view name); - std::string_view to_string() const + std::string_view to_string() const noexcept { return baseName; } - bool operator < (const StorePath & other) const - { - return baseName < other.baseName; - } - - bool operator == (const StorePath & other) const - { - return baseName == other.baseName; - } - - bool operator != (const StorePath & other) const - { - return baseName != other.baseName; - } + bool operator == (const StorePath & other) const noexcept = default; + auto operator <=> (const StorePath & other) const noexcept = default; /** * Check whether a file name ends with the extension for derivations. */ - bool isDerivation() const; + bool isDerivation() const noexcept; + + /** + * Throw an exception if `isDerivation` is false. + */ + void requireDerivation() const; std::string_view name() const { @@ -82,7 +75,7 @@ typedef std::vector StorePaths; * The file extension of \ref Derivation derivations when serialized * into store objects. */ -const std::string drvExtension = ".drv"; +constexpr std::string_view drvExtension = ".drv"; } diff --git a/src/libstore/posix-fs-canonicalise.cc b/src/libstore/posix-fs-canonicalise.cc index d70ef6fae..d8bae13f5 100644 --- a/src/libstore/posix-fs-canonicalise.cc +++ b/src/libstore/posix-fs-canonicalise.cc @@ -144,8 +144,7 @@ static void canonicalisePathMetaData_( #endif if (S_ISDIR(st.st_mode)) { - std::vector entries = readDirectory(path); - for (auto & i : entries) + for (auto & i : std::filesystem::directory_iterator{path}) canonicalisePathMetaData_( i.path().string(), #ifndef _WIN32 diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index fa8026703..d0da96262 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -37,7 +37,7 @@ std::pair> findGenerations(Path pro std::filesystem::path profileDir = dirOf(profile); auto profileName = std::string(baseNameOf(profile)); - for (auto & i : readDirectory(profileDir.string())) { + for (auto & i : std::filesystem::directory_iterator{profileDir}) { if (auto n = parseName(profileName, i.path().filename().string())) { auto path = i.path().string(); gens.push_back({ diff --git a/src/libstore/remote-store-connection.hh b/src/libstore/remote-store-connection.hh index 44328b06b..405120ee9 100644 --- a/src/libstore/remote-store-connection.hh +++ b/src/libstore/remote-store-connection.hh @@ -3,6 +3,7 @@ #include "remote-store.hh" #include "worker-protocol.hh" +#include "worker-protocol-connection.hh" #include "pool.hh" namespace nix { @@ -14,90 +15,13 @@ namespace nix { * Contains `Source` and `Sink` for actual communication, along with * other information learned when negotiating the connection. */ -struct RemoteStore::Connection +struct RemoteStore::Connection : WorkerProto::BasicClientConnection, + WorkerProto::ClientHandshakeInfo { - /** - * Send with this. - */ - FdSink to; - - /** - * Receive with this. - */ - FdSource from; - - /** - * Worker protocol version used for the connection. - * - * Despite its name, I think it is actually the maximum version both - * sides support. (If the maximum doesn't exist, we would fail to - * establish a connection and produce a value of this type.) - */ - WorkerProto::Version daemonVersion; - - /** - * Whether the remote side trusts us or not. - * - * 3 values: "yes", "no", or `std::nullopt` for "unknown". - * - * Note that the "remote side" might not be just the end daemon, but - * also an intermediary forwarder that can make its own trusting - * decisions. This would be the intersection of all their trust - * decisions, since it takes only one link in the chain to start - * denying operations. - */ - std::optional remoteTrustsUs; - - /** - * The version of the Nix daemon that is processing our requests. - * - * Do note, it may or may not communicating with another daemon, - * rather than being an "end" `LocalStore` or similar. - */ - std::optional daemonNixVersion; - /** * Time this connection was established. */ std::chrono::time_point startTime; - - /** - * Coercion to `WorkerProto::ReadConn`. This makes it easy to use the - * factored out worker protocol searlizers with a - * `RemoteStore::Connection`. - * - * The worker protocol connection types are unidirectional, unlike - * this type. - */ - operator WorkerProto::ReadConn () - { - return WorkerProto::ReadConn { - .from = from, - .version = daemonVersion, - }; - } - - /** - * Coercion to `WorkerProto::WriteConn`. This makes it easy to use the - * factored out worker protocol searlizers with a - * `RemoteStore::Connection`. - * - * The worker protocol connection types are unidirectional, unlike - * this type. - */ - operator WorkerProto::WriteConn () - { - return WorkerProto::WriteConn { - .to = to, - .version = daemonVersion, - }; - } - - virtual ~Connection(); - - virtual void closeWrite() = 0; - - std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0, bool flush = true); }; /** diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index 09196481b..d6efc14f9 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -69,50 +69,26 @@ void RemoteStore::initConnection(Connection & conn) /* Send the magic greeting, check for the reply. */ try { conn.from.endOfFileError = "Nix daemon disconnected unexpectedly (maybe it crashed?)"; - conn.to << WORKER_MAGIC_1; - conn.to.flush(); + StringSink saved; + TeeSource tee(conn.from, saved); try { - TeeSource tee(conn.from, saved); - unsigned int magic = readInt(tee); - if (magic != WORKER_MAGIC_2) - throw Error("protocol mismatch"); + conn.daemonVersion = WorkerProto::BasicClientConnection::handshake( + conn.to, tee, PROTOCOL_VERSION); } catch (SerialisationError & e) { /* In case the other side is waiting for our input, close it. */ conn.closeWrite(); - auto msg = conn.from.drain(); - throw Error("protocol mismatch, got '%s'", chomp(saved.s + msg)); + { + NullSink nullSink; + tee.drainInto(nullSink); + } + throw Error("protocol mismatch, got '%s'", chomp(saved.s)); } - conn.from >> conn.daemonVersion; - if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) - throw Error("Nix daemon protocol version not supported"); - if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10) - throw Error("the Nix daemon version is too old"); - conn.to << PROTOCOL_VERSION; + static_cast(conn) = conn.postHandshake(*this); - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) { - // Obsolete CPU affinity. - conn.to << 0; - } - - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11) - conn.to << false; // obsolete reserveSpace - - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 33) { - conn.to.flush(); - conn.daemonNixVersion = readString(conn.from); - } - - if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) { - conn.remoteTrustsUs = WorkerProto::Serialise>::read(*this, conn); - } else { - // We don't know the answer; protocol to old. - conn.remoteTrustsUs = std::nullopt; - } - - auto ex = conn.processStderr(); + auto ex = conn.processStderrReturn(); if (ex) std::rethrow_exception(ex); } catch (Error & e) { @@ -158,7 +134,7 @@ void RemoteStore::setOptions(Connection & conn) conn.to << i.first << i.second.value; } - auto ex = conn.processStderr(); + auto ex = conn.processStderrReturn(); if (ex) std::rethrow_exception(ex); } @@ -173,28 +149,7 @@ RemoteStore::ConnectionHandle::~ConnectionHandle() void RemoteStore::ConnectionHandle::processStderr(Sink * sink, Source * source, bool flush) { - auto ex = handle->processStderr(sink, source, flush); - if (ex) { - daemonException = true; - try { - std::rethrow_exception(ex); - } catch (const Error & e) { - // Nix versions before #4628 did not have an adequate behavior for reporting that the derivation format was upgraded. - // To avoid having to add compatibility logic in many places, we expect to catch almost all occurrences of the - // old incomprehensible error here, so that we can explain to users what's going on when their daemon is - // older than #4628 (2023). - if (experimentalFeatureSettings.isEnabled(Xp::DynamicDerivations) && - GET_PROTOCOL_MINOR(handle->daemonVersion) <= 35) - { - auto m = e.msg(); - if (m.find("parsing derivation") != std::string::npos && - m.find("expected string") != std::string::npos && - m.find("Derive([") != std::string::npos) - throw Error("%s, this might be because the daemon is too old to understand dependencies on dynamic derivations. Check to see if the raw derivation is in the form '%s'", std::move(m), "DrvWithVersion(..)"); - } - throw; - } - } + handle->processStderr(&daemonException, sink, source, flush); } @@ -226,13 +181,7 @@ StorePathSet RemoteStore::queryValidPaths(const StorePathSet & paths, Substitute if (isValidPath(i)) res.insert(i); return res; } else { - conn->to << WorkerProto::Op::QueryValidPaths; - WorkerProto::write(*this, *conn, paths); - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 27) { - conn->to << maybeSubstitute; - } - conn.processStderr(); - return WorkerProto::Serialise::read(*this, *conn); + return conn->queryValidPaths(*this, &conn.daemonException, paths, maybeSubstitute); } } @@ -322,22 +271,10 @@ void RemoteStore::queryPathInfoUncached(const StorePath & path, std::shared_ptr info; { auto conn(getConnection()); - conn->to << WorkerProto::Op::QueryPathInfo << printStorePath(path); - try { - conn.processStderr(); - } catch (Error & e) { - // Ugly backwards compatibility hack. - if (e.msg().find("is not valid") != std::string::npos) - throw InvalidPath(std::move(e.info())); - throw; - } - if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) { - bool valid; conn->from >> valid; - if (!valid) throw InvalidPath("path '%s' is not valid", printStorePath(path)); - } info = std::make_shared( StorePath{path}, - WorkerProto::Serialise::read(*this, *conn)); + conn->queryPathInfo(*this, &conn.daemonException, path)); + } callback(std::move(info)); } catch (...) { callback.rethrow(); } @@ -542,8 +479,6 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, auto conn(getConnection()); if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) { - conn->to << WorkerProto::Op::ImportPaths; - auto source2 = sinkToSource([&](Sink & sink) { sink << 1 // == path follows ; @@ -558,11 +493,7 @@ void RemoteStore::addToStore(const ValidPathInfo & info, Source & source, << 0 // == no path follows ; }); - - conn.processStderr(0, source2.get()); - - auto importedPaths = WorkerProto::Serialise::read(*this, *conn); - assert(importedPaths.size() <= 1); + conn->importPaths(*this, &conn.daemonException, *source2); } else { @@ -807,9 +738,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD BuildMode buildMode) { auto conn(getConnection()); - conn->to << WorkerProto::Op::BuildDerivation << printStorePath(drvPath); - writeDerivation(conn->to, *this, drv); - conn->to << buildMode; + conn->putBuildDerivationRequest(*this, &conn.daemonException, drvPath, drv, buildMode); conn.processStderr(); return WorkerProto::Serialise::read(*this, *conn); } @@ -827,9 +756,7 @@ void RemoteStore::ensurePath(const StorePath & path) void RemoteStore::addTempRoot(const StorePath & path) { auto conn(getConnection()); - conn->to << WorkerProto::Op::AddTempRoot << printStorePath(path); - conn.processStderr(); - readInt(conn->from); + conn->addTempRoot(*this, &conn.daemonException, path); } @@ -969,22 +896,12 @@ void RemoteStore::flushBadConnections() connections->flushBad(); } - -RemoteStore::Connection::~Connection() -{ - try { - to.flush(); - } catch (...) { - ignoreException(); - } -} - void RemoteStore::narFromPath(const StorePath & path, Sink & sink) { - auto conn(connections->get()); - conn->to << WorkerProto::Op::NarFromPath << printStorePath(path); - conn->processStderr(); - copyNAR(conn->from, sink); + auto conn(getConnection()); + conn->narFromPath(*this, &conn.daemonException, path, [&](Source & source) { + copyNAR(conn->from, sink); + }); } ref RemoteStore::getFSAccessor(bool requireValidPath) @@ -992,91 +909,6 @@ ref RemoteStore::getFSAccessor(bool requireValidPath) return make_ref(ref(shared_from_this())); } -static Logger::Fields readFields(Source & from) -{ - Logger::Fields fields; - size_t size = readInt(from); - for (size_t n = 0; n < size; n++) { - auto type = (decltype(Logger::Field::type)) readInt(from); - if (type == Logger::Field::tInt) - fields.push_back(readNum(from)); - else if (type == Logger::Field::tString) - fields.push_back(readString(from)); - else - throw Error("got unsupported field type %x from Nix daemon", (int) type); - } - return fields; -} - - -std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source, bool flush) -{ - if (flush) - to.flush(); - - while (true) { - - auto msg = readNum(from); - - if (msg == STDERR_WRITE) { - auto s = readString(from); - if (!sink) throw Error("no sink"); - (*sink)(s); - } - - else if (msg == STDERR_READ) { - if (!source) throw Error("no source"); - size_t len = readNum(from); - auto buf = std::make_unique(len); - writeString({(const char *) buf.get(), source->read(buf.get(), len)}, to); - to.flush(); - } - - else if (msg == STDERR_ERROR) { - if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) { - return std::make_exception_ptr(readError(from)); - } else { - auto error = readString(from); - unsigned int status = readInt(from); - return std::make_exception_ptr(Error(status, error)); - } - } - - else if (msg == STDERR_NEXT) - printError(chomp(readString(from))); - - else if (msg == STDERR_START_ACTIVITY) { - auto act = readNum(from); - auto lvl = (Verbosity) readInt(from); - auto type = (ActivityType) readInt(from); - auto s = readString(from); - auto fields = readFields(from); - auto parent = readNum(from); - logger->startActivity(act, lvl, type, s, fields, parent); - } - - else if (msg == STDERR_STOP_ACTIVITY) { - auto act = readNum(from); - logger->stopActivity(act); - } - - else if (msg == STDERR_RESULT) { - auto act = readNum(from); - auto type = (ResultType) readInt(from); - auto fields = readFields(from); - logger->result(act, type, fields); - } - - else if (msg == STDERR_LAST) - break; - - else - throw Error("got unknown message type %x from Nix daemon", msg); - } - - return nullptr; -} - void RemoteStore::ConnectionHandle::withFramedSink(std::function fun) { (*this)->to.flush(); diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 1a62d92d4..e9850dce6 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -213,7 +213,7 @@ struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig support it. > **Note** - > + > > HTTPS should be used if the cache might contain sensitive > information. )"}; @@ -224,7 +224,7 @@ struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig Do not specify this setting if you're using Amazon S3. > **Note** - > + > > This endpoint must support HTTPS and will use path-based > addressing instead of virtual host based addressing. )"}; @@ -269,8 +269,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3Helper s3Helper; S3BinaryCacheStoreImpl( - const std::string & uriScheme, - const std::string & bucketName, + std::string_view uriScheme, + std::string_view bucketName, const Params & params) : StoreConfig(params) , BinaryCacheStoreConfig(params) @@ -281,6 +281,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual , bucketName(bucketName) , s3Helper(profile, region, scheme, endpoint) { + if (bucketName.empty()) + throw UsageError("`%s` store requires a bucket name in its Store URI", uriScheme); diskCache = getNarInfoDiskCache(); } diff --git a/src/libstore/serve-protocol-connection.cc b/src/libstore/serve-protocol-connection.cc new file mode 100644 index 000000000..07379999b --- /dev/null +++ b/src/libstore/serve-protocol-connection.cc @@ -0,0 +1,106 @@ +#include "serve-protocol-connection.hh" +#include "serve-protocol-impl.hh" +#include "build-result.hh" +#include "derivations.hh" + +namespace nix { + +ServeProto::Version ServeProto::BasicClientConnection::handshake( + BufferedSink & to, Source & from, ServeProto::Version localVersion, std::string_view host) +{ + to << SERVE_MAGIC_1 << localVersion; + to.flush(); + + unsigned int magic = readInt(from); + if (magic != SERVE_MAGIC_2) + throw Error("'nix-store --serve' protocol mismatch from '%s'", host); + auto remoteVersion = readInt(from); + if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200) + throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host); + return std::min(remoteVersion, localVersion); +} + +ServeProto::Version +ServeProto::BasicServerConnection::handshake(BufferedSink & to, Source & from, ServeProto::Version localVersion) +{ + unsigned int magic = readInt(from); + if (magic != SERVE_MAGIC_1) + throw Error("protocol mismatch"); + to << SERVE_MAGIC_2 << localVersion; + to.flush(); + auto remoteVersion = readInt(from); + return std::min(remoteVersion, localVersion); +} + +StorePathSet ServeProto::BasicClientConnection::queryValidPaths( + const StoreDirConfig & store, bool lock, const StorePathSet & paths, SubstituteFlag maybeSubstitute) +{ + to << ServeProto::Command::QueryValidPaths << lock << maybeSubstitute; + write(store, *this, paths); + to.flush(); + + return Serialise::read(store, *this); +} + +std::map +ServeProto::BasicClientConnection::queryPathInfos(const StoreDirConfig & store, const StorePathSet & paths) +{ + std::map infos; + + to << ServeProto::Command::QueryPathInfos; + ServeProto::write(store, *this, paths); + to.flush(); + + while (true) { + auto storePathS = readString(from); + if (storePathS == "") + break; + + auto storePath = store.parseStorePath(storePathS); + assert(paths.count(storePath) == 1); + auto info = ServeProto::Serialise::read(store, *this); + infos.insert_or_assign(std::move(storePath), std::move(info)); + } + + return infos; +} + +void ServeProto::BasicClientConnection::putBuildDerivationRequest( + const StoreDirConfig & store, + const StorePath & drvPath, + const BasicDerivation & drv, + const ServeProto::BuildOptions & options) +{ + to << ServeProto::Command::BuildDerivation << store.printStorePath(drvPath); + writeDerivation(to, store, drv); + + ServeProto::write(store, *this, options); + + to.flush(); +} + +BuildResult ServeProto::BasicClientConnection::getBuildDerivationResponse(const StoreDirConfig & store) +{ + return ServeProto::Serialise::read(store, *this); +} + +void ServeProto::BasicClientConnection::narFromPath( + const StoreDirConfig & store, const StorePath & path, std::function fun) +{ + to << ServeProto::Command::DumpStorePath << store.printStorePath(path); + to.flush(); + + fun(from); +} + +void ServeProto::BasicClientConnection::importPaths(const StoreDirConfig & store, std::function fun) +{ + to << ServeProto::Command::ImportPaths; + fun(to); + to.flush(); + + if (readInt(from) != 1) + throw Error("remote machine failed to import closure"); +} + +} diff --git a/src/libstore/serve-protocol-connection.hh b/src/libstore/serve-protocol-connection.hh new file mode 100644 index 000000000..73bf71443 --- /dev/null +++ b/src/libstore/serve-protocol-connection.hh @@ -0,0 +1,108 @@ +#pragma once +///@file + +#include "serve-protocol.hh" +#include "store-api.hh" + +namespace nix { + +struct ServeProto::BasicClientConnection +{ + FdSink to; + FdSource from; + ServeProto::Version remoteVersion; + + /** + * Establishes connection, negotiating version. + * + * @return the version provided by the other side of the + * connection. + * + * @param to Taken by reference to allow for various error handling + * mechanisms. + * + * @param from Taken by reference to allow for various error + * handling mechanisms. + * + * @param localVersion Our version which is sent over + * + * @param host Just used to add context to thrown exceptions. + */ + static ServeProto::Version + handshake(BufferedSink & to, Source & from, ServeProto::Version localVersion, std::string_view host); + + /** + * Coercion to `ServeProto::ReadConn`. This makes it easy to use the + * factored out serve protocol serializers with a + * `LegacySSHStore::Connection`. + * + * The serve protocol connection types are unidirectional, unlike + * this type. + */ + operator ServeProto::ReadConn() + { + return ServeProto::ReadConn{ + .from = from, + .version = remoteVersion, + }; + } + + /** + * Coercion to `ServeProto::WriteConn`. This makes it easy to use the + * factored out serve protocol serializers with a + * `LegacySSHStore::Connection`. + * + * The serve protocol connection types are unidirectional, unlike + * this type. + */ + operator ServeProto::WriteConn() + { + return ServeProto::WriteConn{ + .to = to, + .version = remoteVersion, + }; + } + + StorePathSet queryValidPaths( + const StoreDirConfig & remoteStore, bool lock, const StorePathSet & paths, SubstituteFlag maybeSubstitute); + + std::map queryPathInfos(const StoreDirConfig & store, const StorePathSet & paths); + ; + + void putBuildDerivationRequest( + const StoreDirConfig & store, + const StorePath & drvPath, + const BasicDerivation & drv, + const ServeProto::BuildOptions & options); + + /** + * Get the response, must be paired with + * `putBuildDerivationRequest`. + */ + BuildResult getBuildDerivationResponse(const StoreDirConfig & store); + + void narFromPath(const StoreDirConfig & store, const StorePath & path, std::function fun); + + void importPaths(const StoreDirConfig & store, std::function fun); +}; + +struct ServeProto::BasicServerConnection +{ + /** + * Establishes connection, negotiating version. + * + * @return the version provided by the other side of the + * connection. + * + * @param to Taken by reference to allow for various error handling + * mechanisms. + * + * @param from Taken by reference to allow for various error + * handling mechanisms. + * + * @param localVersion Our version which is sent over + */ + static ServeProto::Version handshake(BufferedSink & to, Source & from, ServeProto::Version localVersion); +}; + +} diff --git a/src/libstore/serve-protocol-impl.cc b/src/libstore/serve-protocol-impl.cc deleted file mode 100644 index b39212884..000000000 --- a/src/libstore/serve-protocol-impl.cc +++ /dev/null @@ -1,69 +0,0 @@ -#include "serve-protocol-impl.hh" -#include "build-result.hh" -#include "derivations.hh" - -namespace nix { - -ServeProto::Version ServeProto::BasicClientConnection::handshake( - BufferedSink & to, - Source & from, - ServeProto::Version localVersion, - std::string_view host) -{ - to << SERVE_MAGIC_1 << localVersion; - to.flush(); - - unsigned int magic = readInt(from); - if (magic != SERVE_MAGIC_2) - throw Error("'nix-store --serve' protocol mismatch from '%s'", host); - auto remoteVersion = readInt(from); - if (GET_PROTOCOL_MAJOR(remoteVersion) != 0x200) - throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host); - return remoteVersion; -} - -ServeProto::Version ServeProto::BasicServerConnection::handshake( - BufferedSink & to, - Source & from, - ServeProto::Version localVersion) -{ - unsigned int magic = readInt(from); - if (magic != SERVE_MAGIC_1) throw Error("protocol mismatch"); - to << SERVE_MAGIC_2 << localVersion; - to.flush(); - return readInt(from); -} - - -StorePathSet ServeProto::BasicClientConnection::queryValidPaths( - const Store & store, - bool lock, const StorePathSet & paths, - SubstituteFlag maybeSubstitute) -{ - to - << ServeProto::Command::QueryValidPaths - << lock - << maybeSubstitute; - write(store, *this, paths); - to.flush(); - - return Serialise::read(store, *this); -} - - -void ServeProto::BasicClientConnection::putBuildDerivationRequest( - const Store & store, - const StorePath & drvPath, const BasicDerivation & drv, - const ServeProto::BuildOptions & options) -{ - to - << ServeProto::Command::BuildDerivation - << store.printStorePath(drvPath); - writeDerivation(to, store, drv); - - ServeProto::write(store, *this, options); - - to.flush(); -} - -} diff --git a/src/libstore/serve-protocol-impl.hh b/src/libstore/serve-protocol-impl.hh index fd8d94697..67bc5dc6e 100644 --- a/src/libstore/serve-protocol-impl.hh +++ b/src/libstore/serve-protocol-impl.hh @@ -57,101 +57,4 @@ struct ServeProto::Serialise /* protocol-specific templates */ -struct ServeProto::BasicClientConnection -{ - FdSink to; - FdSource from; - ServeProto::Version remoteVersion; - - /** - * Establishes connection, negotiating version. - * - * @return the version provided by the other side of the - * connection. - * - * @param to Taken by reference to allow for various error handling - * mechanisms. - * - * @param from Taken by reference to allow for various error - * handling mechanisms. - * - * @param localVersion Our version which is sent over - * - * @param host Just used to add context to thrown exceptions. - */ - static ServeProto::Version handshake( - BufferedSink & to, - Source & from, - ServeProto::Version localVersion, - std::string_view host); - - /** - * Coercion to `ServeProto::ReadConn`. This makes it easy to use the - * factored out serve protocol serializers with a - * `LegacySSHStore::Connection`. - * - * The serve protocol connection types are unidirectional, unlike - * this type. - */ - operator ServeProto::ReadConn () - { - return ServeProto::ReadConn { - .from = from, - .version = remoteVersion, - }; - } - - /** - * Coercion to `ServeProto::WriteConn`. This makes it easy to use the - * factored out serve protocol serializers with a - * `LegacySSHStore::Connection`. - * - * The serve protocol connection types are unidirectional, unlike - * this type. - */ - operator ServeProto::WriteConn () - { - return ServeProto::WriteConn { - .to = to, - .version = remoteVersion, - }; - } - - StorePathSet queryValidPaths( - const Store & remoteStore, - bool lock, const StorePathSet & paths, - SubstituteFlag maybeSubstitute); - - /** - * Just the request half, because Hydra may do other things between - * issuing the request and reading the `BuildResult` response. - */ - void putBuildDerivationRequest( - const Store & store, - const StorePath & drvPath, const BasicDerivation & drv, - const ServeProto::BuildOptions & options); -}; - -struct ServeProto::BasicServerConnection -{ - /** - * Establishes connection, negotiating version. - * - * @return the version provided by the other side of the - * connection. - * - * @param to Taken by reference to allow for various error handling - * mechanisms. - * - * @param from Taken by reference to allow for various error - * handling mechanisms. - * - * @param localVersion Our version which is sent over - */ - static ServeProto::Version handshake( - BufferedSink & to, - Source & from, - ServeProto::Version localVersion); -}; - } diff --git a/src/libstore/ssh-store-config.cc b/src/libstore/ssh-store-config.cc new file mode 100644 index 000000000..e81a94874 --- /dev/null +++ b/src/libstore/ssh-store-config.cc @@ -0,0 +1,43 @@ +#include + +#include "ssh-store-config.hh" +#include "ssh.hh" + +namespace nix { + +static std::string extractConnStr(std::string_view scheme, std::string_view _connStr) +{ + if (_connStr.empty()) + throw UsageError("`%s` store requires a valid SSH host as the authority part in Store URI", scheme); + + std::string connStr{_connStr}; + + std::smatch result; + static std::regex v6AddrRegex("^((.*)@)?\\[(.*)\\]$"); + + if (std::regex_match(connStr, result, v6AddrRegex)) { + connStr = result[1].matched ? result.str(1) + result.str(3) : result.str(3); + } + + return connStr; +} + +CommonSSHStoreConfig::CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params) + : StoreConfig(params) + , host(extractConnStr(scheme, host)) +{ +} + +SSHMaster CommonSSHStoreConfig::createSSHMaster(bool useMaster, Descriptor logFD) +{ + return { + host, + sshKey.get(), + sshPublicHostKey.get(), + useMaster, + compress, + logFD, + }; +} + +} diff --git a/src/libstore/ssh-store-config.hh b/src/libstore/ssh-store-config.hh index 4ce4ffc4c..5deb6f4c9 100644 --- a/src/libstore/ssh-store-config.hh +++ b/src/libstore/ssh-store-config.hh @@ -5,10 +5,14 @@ namespace nix { +class SSHMaster; + struct CommonSSHStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; + CommonSSHStoreConfig(std::string_view scheme, std::string_view host, const Params & params); + const Setting sshKey{this, "", "ssh-key", "Path to the SSH private key used to authenticate to the remote machine."}; @@ -24,6 +28,35 @@ struct CommonSSHStoreConfig : virtual StoreConfig to be used on the remote machine. The default is `auto` (i.e. use the Nix daemon or `/nix/store` directly). )"}; + + /** + * The `parseURL` function supports both IPv6 URIs as defined in + * RFC2732, but also pure addresses. The latter one is needed here to + * connect to a remote store via SSH (it's possible to do e.g. `ssh root@::1`). + * + * When initialized, the following adjustments are made: + * + * - If the URL looks like `root@[::1]` (which is allowed by the URL parser and probably + * needed to pass further flags), it + * will be transformed into `root@::1` for SSH (same for `[::1]` -> `::1`). + * + * - If the URL looks like `root@::1` it will be left as-is. + * + * - In any other case, the string will be left as-is. + * + * Will throw an error if `connStr` is empty too. + */ + std::string host; + + /** + * Small wrapper around `SSHMaster::SSHMaster` that gets most + * arguments from this configuration. + * + * See that constructor for details on the remaining two arguments. + */ + SSHMaster createSSHMaster( + bool useMaster, + Descriptor logFD = INVALID_DESCRIPTOR); }; } diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index 220d5d31b..7ad934b73 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -34,21 +34,19 @@ class SSHStore : public virtual SSHStoreConfig, public virtual RemoteStore { public: - SSHStore(const std::string & scheme, const std::string & host, const Params & params) + SSHStore( + std::string_view scheme, + std::string_view host, + const Params & params) : StoreConfig(params) , RemoteStoreConfig(params) - , CommonSSHStoreConfig(params) + , CommonSSHStoreConfig(scheme, host, params) , SSHStoreConfig(params) , Store(params) , RemoteStore(params) - , host(host) - , master( - host, - sshKey, - sshPublicHostKey, + , master(createSSHMaster( // Use SSH master only if using more than 1 connection. - connections->capacity() > 1, - compress) + connections->capacity() > 1)) { } @@ -108,6 +106,15 @@ struct MountedSSHStoreConfig : virtual SSHStoreConfig, virtual LocalFSStoreConfi { } + MountedSSHStoreConfig(std::string_view scheme, std::string_view host, StringMap params) + : StoreConfig(params) + , RemoteStoreConfig(params) + , CommonSSHStoreConfig(scheme, host, params) + , SSHStoreConfig(params) + , LocalFSStoreConfig(params) + { + } + const std::string name() override { return "Experimental SSH Store with filesystem mounted"; } std::string doc() override @@ -141,10 +148,13 @@ class MountedSSHStore : public virtual MountedSSHStoreConfig, public virtual SSH { public: - MountedSSHStore(const std::string & scheme, const std::string & host, const Params & params) + MountedSSHStore( + std::string_view scheme, + std::string_view host, + const Params & params) : StoreConfig(params) , RemoteStoreConfig(params) - , CommonSSHStoreConfig(params) + , CommonSSHStoreConfig(scheme, host, params) , SSHStoreConfig(params) , LocalFSStoreConfig(params) , MountedSSHStoreConfig(params) diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 7e730299a..e5d623adf 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -6,7 +6,11 @@ namespace nix { -SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, const std::string & sshPublicHostKey, bool useMaster, bool compress, int logFD) +SSHMaster::SSHMaster( + std::string_view host, + std::string_view keyFile, + std::string_view sshPublicHostKey, + bool useMaster, bool compress, Descriptor logFD) : host(host) , fakeSSH(host == "localhost") , keyFile(keyFile) diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index 3b1a0827a..19b30e883 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -17,7 +17,7 @@ private: const std::string sshPublicHostKey; const bool useMaster; const bool compress; - const int logFD; + const Descriptor logFD; struct State { @@ -39,7 +39,11 @@ private: public: - SSHMaster(const std::string & host, const std::string & keyFile, const std::string & sshPublicHostKey, bool useMaster, bool compress, int logFD = -1); + SSHMaster( + std::string_view host, + std::string_view keyFile, + std::string_view sshPublicHostKey, + bool useMaster, bool compress, Descriptor logFD = INVALID_DESCRIPTOR); struct Connection { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 0b78f999e..9b519dd84 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -8,7 +8,6 @@ #include "util.hh" #include "nar-info-disk-cache.hh" #include "thread-pool.hh" -#include "url.hh" #include "references.hh" #include "archive.hh" #include "callback.hh" @@ -21,7 +20,6 @@ #include "users.hh" #include -#include using json = nlohmann::json; @@ -1274,144 +1272,63 @@ Derivation Store::readInvalidDerivation(const StorePath & drvPath) namespace nix { -/* Split URI into protocol+hierarchy part and its parameter set. */ -std::pair splitUriAndParams(const std::string & uri_) -{ - auto uri(uri_); - Store::Params params; - auto q = uri.find('?'); - if (q != std::string::npos) { - params = decodeQuery(uri.substr(q + 1)); - uri = uri_.substr(0, q); - } - return {uri, params}; -} - -static bool isNonUriPath(const std::string & spec) -{ - return - // is not a URL - spec.find("://") == std::string::npos - // Has at least one path separator, and so isn't a single word that - // might be special like "auto" - && spec.find("/") != std::string::npos; -} - -std::shared_ptr openFromNonUri(const std::string & uri, const Store::Params & params) -{ - // TODO reenable on Windows once we have `LocalStore` and - // `UDSRemoteStore`. - if (uri == "" || uri == "auto") { - auto stateDir = getOr(params, "state", settings.nixStateDir); - if (access(stateDir.c_str(), R_OK | W_OK) == 0) - return std::make_shared(params); - else if (pathExists(settings.nixDaemonSocketFile)) - return std::make_shared(params); - #if __linux__ - else if (!pathExists(stateDir) - && params.empty() - && !isRootUser() - && !getEnv("NIX_STORE_DIR").has_value() - && !getEnv("NIX_STATE_DIR").has_value()) - { - /* If /nix doesn't exist, there is no daemon socket, and - we're not root, then automatically set up a chroot - store in ~/.local/share/nix/root. */ - auto chrootStore = getDataDir() + "/nix/root"; - if (!pathExists(chrootStore)) { - try { - createDirs(chrootStore); - } catch (Error & e) { - return std::make_shared(params); - } - warn("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore); - } else - debug("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore); - Store::Params params2; - params2["root"] = chrootStore; - return std::make_shared(params2); - } - #endif - else - return std::make_shared(params); - } else if (uri == "daemon") { - return std::make_shared(params); - } else if (uri == "local") { - return std::make_shared(params); - } else if (isNonUriPath(uri)) { - Store::Params params2 = params; - params2["root"] = absPath(uri); - return std::make_shared(params2); - } else { - return nullptr; - } -} - -// The `parseURL` function supports both IPv6 URIs as defined in -// RFC2732, but also pure addresses. The latter one is needed here to -// connect to a remote store via SSH (it's possible to do e.g. `ssh root@::1`). -// -// This function now ensures that a usable connection string is available: -// * If the store to be opened is not an SSH store, nothing will be done. -// * If the URL looks like `root@[::1]` (which is allowed by the URL parser and probably -// needed to pass further flags), it -// will be transformed into `root@::1` for SSH (same for `[::1]` -> `::1`). -// * If the URL looks like `root@::1` it will be left as-is. -// * In any other case, the string will be left as-is. -static std::string extractConnStr(const std::string &proto, const std::string &connStr) -{ - if (proto.rfind("ssh") != std::string::npos) { - std::smatch result; - std::regex v6AddrRegex("^((.*)@)?\\[(.*)\\]$"); - - if (std::regex_match(connStr, result, v6AddrRegex)) { - if (result[1].matched) { - return result.str(1) + result.str(3); - } - return result.str(3); - } - } - - return connStr; -} - -ref openStore(const std::string & uri_, +ref openStore(const std::string & uri, const Store::Params & extraParams) { - auto params = extraParams; - try { - auto parsedUri = parseURL(uri_); - params.insert(parsedUri.query.begin(), parsedUri.query.end()); + return openStore(StoreReference::parse(uri, extraParams)); +} - auto baseURI = extractConnStr( - parsedUri.scheme, - parsedUri.authority.value_or("") + parsedUri.path - ); +ref openStore(StoreReference && storeURI) +{ + auto & params = storeURI.params; - for (auto implem : *Implementations::registered) { - if (implem.uriSchemes.count(parsedUri.scheme)) { - auto store = implem.create(parsedUri.scheme, baseURI, params); - if (store) { - experimentalFeatureSettings.require(store->experimentalFeature()); - store->init(); - store->warnUnknownSettings(); - return ref(store); - } + auto store = std::visit(overloaded { + [&](const StoreReference::Auto &) -> std::shared_ptr { + auto stateDir = getOr(params, "state", settings.nixStateDir); + if (access(stateDir.c_str(), R_OK | W_OK) == 0) + return std::make_shared(params); + else if (pathExists(settings.nixDaemonSocketFile)) + return std::make_shared(params); + #if __linux__ + else if (!pathExists(stateDir) + && params.empty() + && !isRootUser() + && !getEnv("NIX_STORE_DIR").has_value() + && !getEnv("NIX_STATE_DIR").has_value()) + { + /* If /nix doesn't exist, there is no daemon socket, and + we're not root, then automatically set up a chroot + store in ~/.local/share/nix/root. */ + auto chrootStore = getDataDir() + "/nix/root"; + if (!pathExists(chrootStore)) { + try { + createDirs(chrootStore); + } catch (Error & e) { + return std::make_shared(params); + } + warn("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore); + } else + debug("'%s' does not exist, so Nix will use '%s' as a chroot store", stateDir, chrootStore); + return std::make_shared("local", chrootStore, params); } - } - } - catch (BadURL &) { - auto [uri, uriParams] = splitUriAndParams(uri_); - params.insert(uriParams.begin(), uriParams.end()); + #endif + else + return std::make_shared(params); + }, + [&](const StoreReference::Specified & g) { + for (auto implem : *Implementations::registered) + if (implem.uriSchemes.count(g.scheme)) + return implem.create(g.scheme, g.authority, params); - if (auto store = openFromNonUri(uri, params)) { - experimentalFeatureSettings.require(store->experimentalFeature()); - store->warnUnknownSettings(); - return ref(store); - } - } + throw Error("don't know how to open Nix store with scheme '%s'", g.scheme); + }, + }, storeURI.variant); - throw Error("don't know how to open Nix store '%s'", uri_); + experimentalFeatureSettings.require(store->experimentalFeature()); + store->warnUnknownSettings(); + store->init(); + + return ref { store }; } std::list> getDefaultSubstituters() diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index ae8c22437..15712458c 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -13,6 +13,7 @@ #include "path-info.hh" #include "repair-flag.hh" #include "store-dir-config.hh" +#include "store-reference.hh" #include "source-path.hh" #include @@ -65,7 +66,7 @@ MakeError(Unsupported, Error); MakeError(SubstituteGone, Error); MakeError(SubstituterDisabled, Error); -MakeError(InvalidStoreURI, Error); +MakeError(InvalidStoreReference, Error); struct Realisation; struct RealisedPath; @@ -91,7 +92,7 @@ enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true }; const uint32_t exportMagic = 0x4558494e; -enum BuildMode { bmNormal, bmRepair, bmCheck }; +enum BuildMode : uint8_t { bmNormal, bmRepair, bmCheck }; enum TrustedFlag : bool { NotTrusted = false, Trusted = true }; struct BuildResult; @@ -102,7 +103,7 @@ typedef std::map> StorePathCAMap; struct StoreConfig : public StoreDirConfig { - typedef std::map Params; + using Params = StoreReference::Params; using StoreDirConfig::StoreDirConfig; @@ -859,34 +860,13 @@ OutputPathMap resolveDerivedPath(Store &, const DerivedPath::Built &, Store * ev /** * @return a Store object to access the Nix store denoted by * ‘uri’ (slight misnomer...). - * - * @param uri Supported values are: - * - * - ‘local’: The Nix store in /nix/store and database in - * /nix/var/nix/db, accessed directly. - * - * - ‘daemon’: The Nix store accessed via a Unix domain socket - * connection to nix-daemon. - * - * - ‘unix://’: The Nix store accessed via a Unix domain socket - * connection to nix-daemon, with the socket located at . - * - * - ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on - * whether the user has write access to the local Nix - * store/database. - * - * - ‘file://’: A binary cache stored in . - * - * - ‘https://’: A binary cache accessed via HTTP. - * - * - ‘s3://’: A writable binary cache stored on Amazon's Simple - * Storage Service. - * - * - ‘ssh://[user@]’: A remote Nix store accessed by running - * ‘nix-store --serve’ via SSH. - * - * You can pass parameters to the store type by appending - * ‘?key=value&key=value&...’ to the URI. + */ +ref openStore(StoreReference && storeURI); + + +/** + * Opens the store at `uri`, where `uri` is in the format expected by `StoreReference::parse` + */ ref openStore(const std::string & uri = settings.storeUri.get(), const Store::Params & extraParams = Store::Params()); @@ -901,7 +881,14 @@ std::list> getDefaultSubstituters(); struct StoreFactory { std::set uriSchemes; - std::function (const std::string & scheme, const std::string & uri, const Store::Params & params)> create; + /** + * The `authorityPath` parameter is `/`, or really + * whatever comes after `://` and before `?`. + */ + std::function ( + std::string_view scheme, + std::string_view authorityPath, + const Store::Params & params)> create; std::function ()> getConfig; }; @@ -916,7 +903,7 @@ struct Implementations StoreFactory factory{ .uriSchemes = T::uriSchemes(), .create = - ([](const std::string & scheme, const std::string & uri, const Store::Params & params) + ([](auto scheme, auto uri, auto & params) -> std::shared_ptr { return std::make_shared(scheme, uri, params); }), .getConfig = @@ -950,11 +937,6 @@ std::optional decodeValidPathInfo( std::istream & str, std::optional hashGiven = std::nullopt); -/** - * Split URI into protocol+hierarchy part and its parameter set. - */ -std::pair splitUriAndParams(const std::string & uri); - const ContentAddress * getDerivationCA(const BasicDerivation & drv); std::map drvOutputReferences( diff --git a/src/libstore/store-reference.cc b/src/libstore/store-reference.cc new file mode 100644 index 000000000..b4968dfad --- /dev/null +++ b/src/libstore/store-reference.cc @@ -0,0 +1,116 @@ +#include + +#include "error.hh" +#include "url.hh" +#include "store-reference.hh" +#include "file-system.hh" +#include "util.hh" + +namespace nix { + +static bool isNonUriPath(const std::string & spec) +{ + return + // is not a URL + spec.find("://") == std::string::npos + // Has at least one path separator, and so isn't a single word that + // might be special like "auto" + && spec.find("/") != std::string::npos; +} + +std::string StoreReference::render() const +{ + std::string res; + + std::visit( + overloaded{ + [&](const StoreReference::Auto &) { res = "auto"; }, + [&](const StoreReference::Specified & g) { + res = g.scheme; + res += "://"; + res += g.authority; + }, + }, + variant); + + if (!params.empty()) { + res += "?"; + res += encodeQuery(params); + } + + return res; +} + +StoreReference StoreReference::parse(const std::string & uri, const StoreReference::Params & extraParams) +{ + auto params = extraParams; + try { + auto parsedUri = parseURL(uri); + params.insert(parsedUri.query.begin(), parsedUri.query.end()); + + auto baseURI = parsedUri.authority.value_or("") + parsedUri.path; + + return { + .variant = + Specified{ + .scheme = std::move(parsedUri.scheme), + .authority = std::move(baseURI), + }, + .params = std::move(params), + }; + } catch (BadURL &) { + auto [baseURI, uriParams] = splitUriAndParams(uri); + params.insert(uriParams.begin(), uriParams.end()); + + if (baseURI == "" || baseURI == "auto") { + return { + .variant = Auto{}, + .params = std::move(params), + }; + } else if (baseURI == "daemon") { + return { + .variant = + Specified{ + .scheme = "unix", + .authority = "", + }, + .params = std::move(params), + }; + } else if (baseURI == "local") { + return { + .variant = + Specified{ + .scheme = "local", + .authority = "", + }, + .params = std::move(params), + }; + } else if (isNonUriPath(baseURI)) { + return { + .variant = + Specified{ + .scheme = "local", + .authority = absPath(baseURI), + }, + .params = std::move(params), + }; + } + } + + throw UsageError("Cannot parse Nix store '%s'", uri); +} + +/* Split URI into protocol+hierarchy part and its parameter set. */ +std::pair splitUriAndParams(const std::string & uri_) +{ + auto uri(uri_); + StoreReference::Params params; + auto q = uri.find('?'); + if (q != std::string::npos) { + params = decodeQuery(uri.substr(q + 1)); + uri = uri_.substr(0, q); + } + return {uri, params}; +} + +} diff --git a/src/libstore/store-reference.hh b/src/libstore/store-reference.hh new file mode 100644 index 000000000..e99335c0d --- /dev/null +++ b/src/libstore/store-reference.hh @@ -0,0 +1,92 @@ +#pragma once +///@file + +#include + +#include "types.hh" + +namespace nix { + +/** + * A parsed Store URI (URI is a slight misnomer...), parsed but not yet + * resolved to a specific instance and query parms validated. + * + * Supported values are: + * + * - ‘local’: The Nix store in /nix/store and database in + * /nix/var/nix/db, accessed directly. + * + * - ‘daemon’: The Nix store accessed via a Unix domain socket + * connection to nix-daemon. + * + * - ‘unix://’: The Nix store accessed via a Unix domain socket + * connection to nix-daemon, with the socket located at . + * + * - ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on + * whether the user has write access to the local Nix + * store/database. + * + * - ‘file://’: A binary cache stored in . + * + * - ‘https://’: A binary cache accessed via HTTP. + * + * - ‘s3://’: A writable binary cache stored on Amazon's Simple + * Storage Service. + * + * - ‘ssh://[user@]’: A remote Nix store accessed by running + * ‘nix-store --serve’ via SSH. + * + * You can pass parameters to the store type by appending + * ‘?key=value&key=value&...’ to the URI. + */ +struct StoreReference +{ + using Params = std::map; + + /** + * Special store reference `""` or `"auto"` + */ + struct Auto + { + inline bool operator==(const Auto & rhs) const = default; + inline auto operator<=>(const Auto & rhs) const = default; + }; + + /** + * General case, a regular `scheme://authority` URL. + */ + struct Specified + { + std::string scheme; + std::string authority = ""; + + bool operator==(const Specified & rhs) const = default; + auto operator<=>(const Specified & rhs) const = default; + }; + + typedef std::variant Variant; + + Variant variant; + + Params params; + + bool operator==(const StoreReference & rhs) const = default; + auto operator<=>(const StoreReference & rhs) const = default; + + /** + * Render the whole store reference as a URI, including parameters. + */ + std::string render() const; + + /** + * Parse a URI into a store reference. + */ + static StoreReference parse(const std::string & uri, const Params & extraParams = Params{}); +}; + +/** + * Split URI into protocol+hierarchy part and its parameter set. + */ +std::pair splitUriAndParams(const std::string & uri); + +} diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc index 649644146..499f76967 100644 --- a/src/libstore/uds-remote-store.cc +++ b/src/libstore/uds-remote-store.cc @@ -40,12 +40,13 @@ UDSRemoteStore::UDSRemoteStore(const Params & params) UDSRemoteStore::UDSRemoteStore( - const std::string scheme, - std::string socket_path, + std::string_view scheme, + PathView socket_path, const Params & params) : UDSRemoteStore(params) { - path.emplace(socket_path); + if (!socket_path.empty()) + path.emplace(socket_path); } @@ -54,6 +55,7 @@ std::string UDSRemoteStore::getUri() if (path) { return std::string("unix://") + *path; } else { + // unix:// with no path also works. Change what we return? return "daemon"; } } diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh index 8bce8994a..6f0494bb6 100644 --- a/src/libstore/uds-remote-store.hh +++ b/src/libstore/uds-remote-store.hh @@ -28,7 +28,10 @@ class UDSRemoteStore : public virtual UDSRemoteStoreConfig public: UDSRemoteStore(const Params & params); - UDSRemoteStore(const std::string scheme, std::string path, const Params & params); + UDSRemoteStore( + std::string_view scheme, + PathView path, + const Params & params); std::string getUri() override; diff --git a/src/libstore/unix/build/local-derivation-goal.cc b/src/libstore/unix/build/local-derivation-goal.cc index 3b010350d..16095cf5d 100644 --- a/src/libstore/unix/build/local-derivation-goal.cc +++ b/src/libstore/unix/build/local-derivation-goal.cc @@ -285,7 +285,7 @@ static void movePath(const Path & src, const Path & dst) if (changePerm) chmod_(src, st.st_mode | S_IWUSR); - renameFile(src, dst); + std::filesystem::rename(src, dst); if (changePerm) chmod_(dst, st.st_mode); @@ -372,7 +372,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull() if (buildMode != bmCheck && status.known->isValid()) continue; auto p = worker.store.toRealPath(status.known->path); if (pathExists(chrootRootDir + p)) - renameFile((chrootRootDir + p), p); + std::filesystem::rename((chrootRootDir + p), p); } return diskFull; @@ -421,7 +421,9 @@ static void doBind(const Path & source, const Path & target, bool optional = fal } else if (S_ISLNK(st.st_mode)) { // Symlinks can (apparently) not be bind-mounted, so just copy it createDirs(dirOf(target)); - copyFile(source, target, /* andDelete */ false); + copyFile( + std::filesystem::path(source), + std::filesystem::path(target), false); } else { createDirs(dirOf(target)); writeFile(target, ""); @@ -2568,8 +2570,11 @@ SingleDrvOutputs LocalDerivationGoal::registerOutputs() // Replace the output by a fresh copy of itself to make sure // that there's no stale file descriptor pointing to it Path tmpOutput = actualPath + ".tmp"; - copyFile(actualPath, tmpOutput, true); - renameFile(tmpOutput, actualPath); + copyFile( + std::filesystem::path(actualPath), + std::filesystem::path(tmpOutput), true); + + std::filesystem::rename(tmpOutput, actualPath); auto newInfo0 = newInfoFromCA(DerivationOutput::CAFloating { .method = dof.ca.method, diff --git a/src/libstore/unix/lock.cc b/src/libstore/unix/user-lock.cc similarity index 99% rename from src/libstore/unix/lock.cc rename to src/libstore/unix/user-lock.cc index 023c74e34..8057aa13e 100644 --- a/src/libstore/unix/lock.cc +++ b/src/libstore/unix/user-lock.cc @@ -1,4 +1,4 @@ -#include "lock.hh" +#include "user-lock.hh" #include "file-system.hh" #include "globals.hh" #include "pathlocks.hh" diff --git a/src/libstore/unix/lock.hh b/src/libstore/unix/user-lock.hh similarity index 100% rename from src/libstore/unix/lock.hh rename to src/libstore/unix/user-lock.hh diff --git a/src/libstore/windows/build.cc b/src/libstore/windows/build.cc deleted file mode 100644 index 3eadc5bda..000000000 --- a/src/libstore/windows/build.cc +++ /dev/null @@ -1,37 +0,0 @@ -#include "store-api.hh" -#include "build-result.hh" - -namespace nix { - -void Store::buildPaths(const std::vector & reqs, BuildMode buildMode, std::shared_ptr evalStore) -{ - unsupported("buildPaths"); -} - -std::vector Store::buildPathsWithResults( - const std::vector & reqs, - BuildMode buildMode, - std::shared_ptr evalStore) -{ - unsupported("buildPathsWithResults"); -} - -BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, - BuildMode buildMode) -{ - unsupported("buildDerivation"); -} - - -void Store::ensurePath(const StorePath & path) -{ - unsupported("ensurePath"); -} - - -void Store::repairPath(const StorePath & path) -{ - unsupported("repairPath"); -} - -} diff --git a/src/libstore/windows/pathlocks.cc b/src/libstore/windows/pathlocks.cc index 738057f68..00761a8c3 100644 --- a/src/libstore/windows/pathlocks.cc +++ b/src/libstore/windows/pathlocks.cc @@ -9,6 +9,8 @@ namespace nix { +using namespace nix::windows; + void deleteLockFile(const Path & path, Descriptor desc) { @@ -35,8 +37,13 @@ void PathLocks::unlock() AutoCloseFD openLockFile(const Path & path, bool create) { AutoCloseFD desc = CreateFileA( - path.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, - create ? OPEN_ALWAYS : OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL | FILE_FLAG_POSIX_SEMANTICS, NULL); + path.c_str(), + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, + create ? OPEN_ALWAYS : OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL | FILE_FLAG_POSIX_SEMANTICS, + NULL); if (desc.get() == INVALID_HANDLE_VALUE) warn("%s: %s", path, std::to_string(GetLastError())); diff --git a/src/libstore/worker-protocol-connection.cc b/src/libstore/worker-protocol-connection.cc new file mode 100644 index 000000000..072bae8da --- /dev/null +++ b/src/libstore/worker-protocol-connection.cc @@ -0,0 +1,280 @@ +#include "worker-protocol-connection.hh" +#include "worker-protocol-impl.hh" +#include "build-result.hh" +#include "derivations.hh" + +namespace nix { + +WorkerProto::BasicClientConnection::~BasicClientConnection() +{ + try { + to.flush(); + } catch (...) { + ignoreException(); + } +} + +static Logger::Fields readFields(Source & from) +{ + Logger::Fields fields; + size_t size = readInt(from); + for (size_t n = 0; n < size; n++) { + auto type = (decltype(Logger::Field::type)) readInt(from); + if (type == Logger::Field::tInt) + fields.push_back(readNum(from)); + else if (type == Logger::Field::tString) + fields.push_back(readString(from)); + else + throw Error("got unsupported field type %x from Nix daemon", (int) type); + } + return fields; +} + +std::exception_ptr WorkerProto::BasicClientConnection::processStderrReturn(Sink * sink, Source * source, bool flush) +{ + if (flush) + to.flush(); + + std::exception_ptr ex; + + while (true) { + + auto msg = readNum(from); + + if (msg == STDERR_WRITE) { + auto s = readString(from); + if (!sink) + throw Error("no sink"); + (*sink)(s); + } + + else if (msg == STDERR_READ) { + if (!source) + throw Error("no source"); + size_t len = readNum(from); + auto buf = std::make_unique(len); + writeString({(const char *) buf.get(), source->read(buf.get(), len)}, to); + to.flush(); + } + + else if (msg == STDERR_ERROR) { + if (GET_PROTOCOL_MINOR(daemonVersion) >= 26) { + ex = std::make_exception_ptr(readError(from)); + } else { + auto error = readString(from); + unsigned int status = readInt(from); + ex = std::make_exception_ptr(Error(status, error)); + } + break; + } + + else if (msg == STDERR_NEXT) + printError(chomp(readString(from))); + + else if (msg == STDERR_START_ACTIVITY) { + auto act = readNum(from); + auto lvl = (Verbosity) readInt(from); + auto type = (ActivityType) readInt(from); + auto s = readString(from); + auto fields = readFields(from); + auto parent = readNum(from); + logger->startActivity(act, lvl, type, s, fields, parent); + } + + else if (msg == STDERR_STOP_ACTIVITY) { + auto act = readNum(from); + logger->stopActivity(act); + } + + else if (msg == STDERR_RESULT) { + auto act = readNum(from); + auto type = (ResultType) readInt(from); + auto fields = readFields(from); + logger->result(act, type, fields); + } + + else if (msg == STDERR_LAST) + break; + + else + throw Error("got unknown message type %x from Nix daemon", msg); + } + + if (!ex) { + return ex; + } else { + try { + std::rethrow_exception(ex); + } catch (const Error & e) { + // Nix versions before #4628 did not have an adequate + // behavior for reporting that the derivation format was + // upgraded. To avoid having to add compatibility logic in + // many places, we expect to catch almost all occurrences of + // the old incomprehensible error here, so that we can + // explain to users what's going on when their daemon is + // older than #4628 (2023). + if (experimentalFeatureSettings.isEnabled(Xp::DynamicDerivations) + && GET_PROTOCOL_MINOR(daemonVersion) <= 35) { + auto m = e.msg(); + if (m.find("parsing derivation") != std::string::npos && m.find("expected string") != std::string::npos + && m.find("Derive([") != std::string::npos) + return std::make_exception_ptr(Error( + "%s, this might be because the daemon is too old to understand dependencies on dynamic derivations. Check to see if the raw derivation is in the form '%s'", + std::move(m), + "Drv WithVersion(..)")); + } + return std::current_exception(); + } + } +} + +void WorkerProto::BasicClientConnection::processStderr(bool * daemonException, Sink * sink, Source * source, bool flush) +{ + auto ex = processStderrReturn(sink, source, flush); + if (ex) { + *daemonException = true; + std::rethrow_exception(ex); + } +} + +WorkerProto::Version +WorkerProto::BasicClientConnection::handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion) +{ + to << WORKER_MAGIC_1 << localVersion; + to.flush(); + + unsigned int magic = readInt(from); + if (magic != WORKER_MAGIC_2) + throw Error("nix-daemon protocol mismatch from"); + auto daemonVersion = readInt(from); + + if (GET_PROTOCOL_MAJOR(daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION)) + throw Error("Nix daemon protocol version not supported"); + if (GET_PROTOCOL_MINOR(daemonVersion) < 10) + throw Error("the Nix daemon version is too old"); + to << localVersion; + + return std::min(daemonVersion, localVersion); +} + +WorkerProto::Version +WorkerProto::BasicServerConnection::handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion) +{ + unsigned int magic = readInt(from); + if (magic != WORKER_MAGIC_1) + throw Error("protocol mismatch"); + to << WORKER_MAGIC_2 << localVersion; + to.flush(); + auto clientVersion = readInt(from); + return std::min(clientVersion, localVersion); +} + +WorkerProto::ClientHandshakeInfo WorkerProto::BasicClientConnection::postHandshake(const StoreDirConfig & store) +{ + WorkerProto::ClientHandshakeInfo res; + + if (GET_PROTOCOL_MINOR(daemonVersion) >= 14) { + // Obsolete CPU affinity. + to << 0; + } + + if (GET_PROTOCOL_MINOR(daemonVersion) >= 11) + to << false; // obsolete reserveSpace + + if (GET_PROTOCOL_MINOR(daemonVersion) >= 33) + to.flush(); + + return WorkerProto::Serialise::read(store, *this); +} + +void WorkerProto::BasicServerConnection::postHandshake(const StoreDirConfig & store, const ClientHandshakeInfo & info) +{ + if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from)) { + // Obsolete CPU affinity. + readInt(from); + } + + if (GET_PROTOCOL_MINOR(clientVersion) >= 11) + readInt(from); // obsolete reserveSpace + + WorkerProto::write(store, *this, info); +} + +UnkeyedValidPathInfo WorkerProto::BasicClientConnection::queryPathInfo( + const StoreDirConfig & store, bool * daemonException, const StorePath & path) +{ + to << WorkerProto::Op::QueryPathInfo << store.printStorePath(path); + try { + processStderr(daemonException); + } catch (Error & e) { + // Ugly backwards compatibility hack. + if (e.msg().find("is not valid") != std::string::npos) + throw InvalidPath(std::move(e.info())); + throw; + } + if (GET_PROTOCOL_MINOR(daemonVersion) >= 17) { + bool valid; + from >> valid; + if (!valid) + throw InvalidPath("path '%s' is not valid", store.printStorePath(path)); + } + return WorkerProto::Serialise::read(store, *this); +} + +StorePathSet WorkerProto::BasicClientConnection::queryValidPaths( + const StoreDirConfig & store, bool * daemonException, const StorePathSet & paths, SubstituteFlag maybeSubstitute) +{ + assert(GET_PROTOCOL_MINOR(daemonVersion) >= 12); + to << WorkerProto::Op::QueryValidPaths; + WorkerProto::write(store, *this, paths); + if (GET_PROTOCOL_MINOR(daemonVersion) >= 27) { + to << maybeSubstitute; + } + processStderr(daemonException); + return WorkerProto::Serialise::read(store, *this); +} + +void WorkerProto::BasicClientConnection::addTempRoot( + const StoreDirConfig & store, bool * daemonException, const StorePath & path) +{ + to << WorkerProto::Op::AddTempRoot << store.printStorePath(path); + processStderr(daemonException); + readInt(from); +} + +void WorkerProto::BasicClientConnection::putBuildDerivationRequest( + const StoreDirConfig & store, + bool * daemonException, + const StorePath & drvPath, + const BasicDerivation & drv, + BuildMode buildMode) +{ + to << WorkerProto::Op::BuildDerivation << store.printStorePath(drvPath); + writeDerivation(to, store, drv); + to << buildMode; +} + +BuildResult +WorkerProto::BasicClientConnection::getBuildDerivationResponse(const StoreDirConfig & store, bool * daemonException) +{ + return WorkerProto::Serialise::read(store, *this); +} + +void WorkerProto::BasicClientConnection::narFromPath( + const StoreDirConfig & store, bool * daemonException, const StorePath & path, std::function fun) +{ + to << WorkerProto::Op::NarFromPath << store.printStorePath(path); + processStderr(daemonException); + + fun(from); +} + +void WorkerProto::BasicClientConnection::importPaths( + const StoreDirConfig & store, bool * daemonException, Source & source) +{ + to << WorkerProto::Op::ImportPaths; + processStderr(daemonException, 0, &source); + auto importedPaths = WorkerProto::Serialise::read(store, *this); + assert(importedPaths.size() <= importedPaths.size()); +} +} diff --git a/src/libstore/worker-protocol-connection.hh b/src/libstore/worker-protocol-connection.hh new file mode 100644 index 000000000..9dd723fd0 --- /dev/null +++ b/src/libstore/worker-protocol-connection.hh @@ -0,0 +1,187 @@ +#pragma once +///@file + +#include "worker-protocol.hh" +#include "store-api.hh" + +namespace nix { + +struct WorkerProto::BasicClientConnection +{ + /** + * Send with this. + */ + FdSink to; + + /** + * Receive with this. + */ + FdSource from; + + /** + * Worker protocol version used for the connection. + * + * Despite its name, it is actually the maximum version both + * sides support. (If the maximum doesn't exist, we would fail to + * establish a connection and produce a value of this type.) + */ + WorkerProto::Version daemonVersion; + + /** + * Flush to direction + */ + virtual ~BasicClientConnection(); + + virtual void closeWrite() = 0; + + std::exception_ptr processStderrReturn(Sink * sink = 0, Source * source = 0, bool flush = true); + + void processStderr(bool * daemonException, Sink * sink = 0, Source * source = 0, bool flush = true); + + /** + * Establishes connection, negotiating version. + * + * @return the version provided by the other side of the + * connection. + * + * @param to Taken by reference to allow for various error handling + * mechanisms. + * + * @param from Taken by reference to allow for various error + * handling mechanisms. + * + * @param localVersion Our version which is sent over + */ + static Version handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion); + + /** + * After calling handshake, must call this to exchange some basic + * information abou the connection. + */ + ClientHandshakeInfo postHandshake(const StoreDirConfig & store); + + /** + * Coercion to `WorkerProto::ReadConn`. This makes it easy to use the + * factored out serve protocol serializers with a + * `LegacySSHStore::Connection`. + * + * The serve protocol connection types are unidirectional, unlike + * this type. + */ + operator WorkerProto::ReadConn() + { + return WorkerProto::ReadConn{ + .from = from, + .version = daemonVersion, + }; + } + + /** + * Coercion to `WorkerProto::WriteConn`. This makes it easy to use the + * factored out serve protocol serializers with a + * `LegacySSHStore::Connection`. + * + * The serve protocol connection types are unidirectional, unlike + * this type. + */ + operator WorkerProto::WriteConn() + { + return WorkerProto::WriteConn{ + .to = to, + .version = daemonVersion, + }; + } + + void addTempRoot(const StoreDirConfig & remoteStore, bool * daemonException, const StorePath & path); + + StorePathSet queryValidPaths( + const StoreDirConfig & remoteStore, + bool * daemonException, + const StorePathSet & paths, + SubstituteFlag maybeSubstitute); + + UnkeyedValidPathInfo queryPathInfo(const StoreDirConfig & store, bool * daemonException, const StorePath & path); + + void putBuildDerivationRequest( + const StoreDirConfig & store, + bool * daemonException, + const StorePath & drvPath, + const BasicDerivation & drv, + BuildMode buildMode); + + /** + * Get the response, must be paired with + * `putBuildDerivationRequest`. + */ + BuildResult getBuildDerivationResponse(const StoreDirConfig & store, bool * daemonException); + + void narFromPath( + const StoreDirConfig & store, + bool * daemonException, + const StorePath & path, + std::function fun); + + void importPaths(const StoreDirConfig & store, bool * daemonException, Source & source); +}; + +struct WorkerProto::BasicServerConnection +{ + /** + * Send with this. + */ + FdSink & to; + + /** + * Receive with this. + */ + FdSource & from; + + /** + * Worker protocol version used for the connection. + * + * Despite its name, it is actually the maximum version both + * sides support. (If the maximum doesn't exist, we would fail to + * establish a connection and produce a value of this type.) + */ + WorkerProto::Version clientVersion; + + operator WorkerProto::ReadConn() + { + return WorkerProto::ReadConn{ + .from = from, + .version = clientVersion, + }; + } + + operator WorkerProto::WriteConn() + { + return WorkerProto::WriteConn{ + .to = to, + .version = clientVersion, + }; + } + + /** + * Establishes connection, negotiating version. + * + * @return the version provided by the other side of the + * connection. + * + * @param to Taken by reference to allow for various error handling + * mechanisms. + * + * @param from Taken by reference to allow for various error + * handling mechanisms. + * + * @param localVersion Our version which is sent over + */ + static WorkerProto::Version handshake(BufferedSink & to, Source & from, WorkerProto::Version localVersion); + + /** + * After calling handshake, must call this to exchange some basic + * information abou the connection. + */ + void postHandshake(const StoreDirConfig & store, const ClientHandshakeInfo & info); +}; + +} diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index a50259d24..f06fb2893 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -14,6 +14,34 @@ namespace nix { /* protocol-specific definitions */ +BuildMode WorkerProto::Serialise::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) +{ + auto temp = readNum(conn.from); + switch (temp) { + case 0: return bmNormal; + case 1: return bmRepair; + case 2: return bmCheck; + default: throw Error("Invalid build mode"); + } +} + +void WorkerProto::Serialise::write(const StoreDirConfig & store, WorkerProto::WriteConn conn, const BuildMode & buildMode) +{ + switch (buildMode) { + case bmNormal: + conn.to << uint8_t{0}; + break; + case bmRepair: + conn.to << uint8_t{1}; + break; + case bmCheck: + conn.to << uint8_t{2}; + break; + default: + assert(false); + }; +} + std::optional WorkerProto::Serialise>::read(const StoreDirConfig & store, WorkerProto::ReadConn conn) { auto temp = readNum(conn.from); @@ -222,4 +250,35 @@ void WorkerProto::Serialise::write(const StoreDirConfig & } } + +WorkerProto::ClientHandshakeInfo WorkerProto::Serialise::read(const StoreDirConfig & store, ReadConn conn) +{ + WorkerProto::ClientHandshakeInfo res; + + if (GET_PROTOCOL_MINOR(conn.version) >= 33) { + res.daemonNixVersion = readString(conn.from); + } + + if (GET_PROTOCOL_MINOR(conn.version) >= 35) { + res.remoteTrustsUs = WorkerProto::Serialise>::read(store, conn); + } else { + // We don't know the answer; protocol to old. + res.remoteTrustsUs = std::nullopt; + } + + return res; +} + +void WorkerProto::Serialise::write(const StoreDirConfig & store, WriteConn conn, const WorkerProto::ClientHandshakeInfo & info) +{ + if (GET_PROTOCOL_MINOR(conn.version) >= 33) { + assert(info.daemonNixVersion); + conn.to << *info.daemonNixVersion; + } + + if (GET_PROTOCOL_MINOR(conn.version) >= 35) { + WorkerProto::write(store, conn, info.remoteTrustsUs); + } +} + } diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 91d277b77..62a12d182 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -35,6 +35,7 @@ struct BuildResult; struct KeyedBuildResult; struct ValidPathInfo; struct UnkeyedValidPathInfo; +enum BuildMode : uint8_t; enum TrustedFlag : bool; @@ -76,6 +77,19 @@ struct WorkerProto Version version; }; + /** + * Stripped down serialization logic suitable for sharing with Hydra. + * + * @todo remove once Hydra uses Store abstraction consistently. + */ + struct BasicClientConnection; + struct BasicServerConnection; + + /** + * Extra information provided as part of protocol negotation. + */ + struct ClientHandshakeInfo; + /** * Data type for canonical pairs of serialisers for the worker protocol. * @@ -166,6 +180,33 @@ enum struct WorkerProto::Op : uint64_t AddPermRoot = 47, }; +struct WorkerProto::ClientHandshakeInfo +{ + /** + * The version of the Nix daemon that is processing our requests +. + * + * Do note, it may or may not communicating with another daemon, + * rather than being an "end" `LocalStore` or similar. + */ + std::optional daemonNixVersion; + + /** + * Whether the remote side trusts us or not. + * + * 3 values: "yes", "no", or `std::nullopt` for "unknown". + * + * Note that the "remote side" might not be just the end daemon, but + * also an intermediary forwarder that can make its own trusting + * decisions. This would be the intersection of all their trust + * decisions, since it takes only one link in the chain to start + * denying operations. + */ + std::optional remoteTrustsUs; + + bool operator == (const ClientHandshakeInfo &) const = default; +}; + /** * Convenience for sending operation codes. * @@ -215,9 +256,13 @@ DECLARE_WORKER_SERIALISER(ValidPathInfo); template<> DECLARE_WORKER_SERIALISER(UnkeyedValidPathInfo); template<> +DECLARE_WORKER_SERIALISER(BuildMode); +template<> DECLARE_WORKER_SERIALISER(std::optional); template<> DECLARE_WORKER_SERIALISER(std::optional); +template<> +DECLARE_WORKER_SERIALISER(WorkerProto::ClientHandshakeInfo); template DECLARE_WORKER_SERIALISER(std::vector); diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 04f777d00..d20936de4 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -315,13 +315,4 @@ void copyNAR(Source & source, Sink & sink) } -void copyPath(const Path & from, const Path & to) -{ - auto source = sinkToSource([&](Sink & sink) { - dumpPath(from, sink); - }); - restorePath(to, *source); -} - - } diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh index 28c63bb85..bd70072ce 100644 --- a/src/libutil/archive.hh +++ b/src/libutil/archive.hh @@ -82,8 +82,6 @@ void restorePath(const Path & path, Source & source); */ void copyNAR(Source & source, Sink & sink); -void copyPath(const Path & from, const Path & to); - inline constexpr std::string_view narVersionMagic1 = "nix-archive-1"; diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 243e3a5a6..c202facdf 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -268,8 +268,6 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) verbosity = lvlError; } - bool argsSeen = false; - // Heuristic to see if we're invoked as a shebang script, namely, // if we have at least one argument, it's the name of an // executable file, and it starts with "#!". @@ -336,10 +334,6 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) throw UsageError("unrecognised flag '%1%'", arg); } else { - if (!argsSeen) { - argsSeen = true; - initialFlagsProcessed(); - } pos = rewriteArgs(cmdline, pos); pendingArgs.push_back(*pos++); if (processArgs(pendingArgs, false)) @@ -349,8 +343,7 @@ void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) processArgs(pendingArgs, true); - if (!argsSeen) - initialFlagsProcessed(); + initialFlagsProcessed(); /* Now that we are done parsing, make sure that any experimental * feature required by the flags is enabled */ diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index d17401f27..d27028565 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -263,8 +263,13 @@ struct BrotliCompressionSink : ChunkedCompressionSink checkInterrupt(); if (!BrotliEncoderCompressStream( - state, data.data() ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH, &avail_in, &next_in, - &avail_out, &next_out, nullptr)) + state, + data.data() ? BROTLI_OPERATION_PROCESS : BROTLI_OPERATION_FINISH, + &avail_in, + &next_in, + &avail_out, + &next_out, + nullptr)) throw CompressionError("error while compressing brotli compression"); if (avail_out < sizeof(outbuf) || avail_in == 0) { @@ -280,8 +285,8 @@ struct BrotliCompressionSink : ChunkedCompressionSink ref makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel, int level) { - std::vector la_supports = {"bzip2", "compress", "grzip", "gzip", "lrzip", "lz4", - "lzip", "lzma", "lzop", "xz", "zstd"}; + std::vector la_supports = { + "bzip2", "compress", "grzip", "gzip", "lrzip", "lz4", "lzip", "lzma", "lzop", "xz", "zstd"}; if (std::find(la_supports.begin(), la_supports.end(), method) != la_supports.end()) { return make_ref(nextSink, method, parallel, level); } diff --git a/src/libutil/current-process.cc b/src/libutil/current-process.cc index c88013b3c..6ca48220d 100644 --- a/src/libutil/current-process.cc +++ b/src/libutil/current-process.cc @@ -7,6 +7,7 @@ #include "file-system.hh" #include "processes.hh" #include "signals.hh" +#include #ifdef __APPLE__ # include @@ -59,15 +60,15 @@ unsigned int getMaxCPU() ////////////////////////////////////////////////////////////////////// -#ifndef _WIN32 -rlim_t savedStackSize = 0; +size_t savedStackSize = 0; -void setStackSize(rlim_t stackSize) +void setStackSize(size_t stackSize) { + #ifndef _WIN32 struct rlimit limit; if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) { savedStackSize = limit.rlim_cur; - limit.rlim_cur = std::min(stackSize, limit.rlim_max); + limit.rlim_cur = std::min(static_cast(stackSize), limit.rlim_max); if (setrlimit(RLIMIT_STACK, &limit) != 0) { logger->log( lvlError, @@ -81,8 +82,31 @@ void setStackSize(rlim_t stackSize) ); } } + #else + ULONG_PTR stackLow, stackHigh; + GetCurrentThreadStackLimits(&stackLow, &stackHigh); + ULONG maxStackSize = stackHigh - stackLow; + ULONG currStackSize = 0; + // This retrieves the current promised stack size + SetThreadStackGuarantee(&currStackSize); + if (currStackSize < stackSize) { + savedStackSize = currStackSize; + ULONG newStackSize = std::min(static_cast(stackSize), maxStackSize); + if (SetThreadStackGuarantee(&newStackSize) == 0) { + logger->log( + lvlError, + HintFmt( + "Failed to increase stack size from %1% to %2% (maximum allowed stack size: %3%): %4%", + savedStackSize, + stackSize, + maxStackSize, + std::to_string(GetLastError()) + ).str() + ); + } + } + #endif } -#endif void restoreProcessContext(bool restoreMounts) { diff --git a/src/libutil/current-process.hh b/src/libutil/current-process.hh index a5adb70cf..8286bf89d 100644 --- a/src/libutil/current-process.hh +++ b/src/libutil/current-process.hh @@ -17,12 +17,10 @@ namespace nix { */ unsigned int getMaxCPU(); -#ifndef _WIN32 // TODO implement on Windows, if needed. /** * Change the stack size. */ -void setStackSize(rlim_t stackSize); -#endif +void setStackSize(size_t stackSize); /** * Restore the original inherited Unix process context (such as signal diff --git a/src/libutil/error.hh b/src/libutil/error.hh index 0419f36d6..87d181c94 100644 --- a/src/libutil/error.hh +++ b/src/libutil/error.hh @@ -206,11 +206,11 @@ MakeError(SystemError, Error); * * Throw this, but prefer not to catch this, and catch `SystemError` * instead. This allows implementations to freely switch between this - * and `WinError` without breaking catch blocks. + * and `windows::WinError` without breaking catch blocks. * * However, it is permissible to catch this and rethrow so long as * certain conditions are not met (e.g. to catch only if `errNo = - * EFooBar`). In that case, try to also catch the equivalent `WinError` + * EFooBar`). In that case, try to also catch the equivalent `windows::WinError` * code. * * @todo Rename this to `PosixError` or similar. At this point Windows @@ -248,7 +248,9 @@ public: }; #ifdef _WIN32 -class WinError; +namespace windows { + class WinError; +} #endif /** @@ -258,7 +260,7 @@ class WinError; */ using NativeSysError = #ifdef _WIN32 - WinError + windows::WinError #else SysError #endif diff --git a/src/libutil/file-content-address.hh b/src/libutil/file-content-address.hh index cd63be551..e216ee4a7 100644 --- a/src/libutil/file-content-address.hh +++ b/src/libutil/file-content-address.hh @@ -12,16 +12,28 @@ struct SourcePath; /** * An enumeration of the ways we can serialize file system * objects. + * + * See `file-system-object/content-address.md#serial` in the manual for + * a user-facing description of this concept, but note that this type is also + * used for storing or sending copies; not just for addressing. + * Note also that there are other content addressing methods that don't + * correspond to a serialisation method. */ enum struct FileSerialisationMethod : uint8_t { /** * Flat-file. The contents of a single file exactly. + * + * See `file-system-object/content-address.md#serial-flat` in the + * manual. */ Flat, /** * Nix Archive. Serializes the file-system object in * Nix Archive format. + * + * See `file-system-object/content-address.md#serial-nix-archive` in + * the manual. */ Recursive, }; @@ -81,33 +93,32 @@ HashResult hashPath( /** * An enumeration of the ways we can ingest file system * objects, producing a hash or digest. + * + * See `file-system-object/content-address.md` in the manual for a + * user-facing description of this concept. */ enum struct FileIngestionMethod : uint8_t { /** * Hash `FileSerialisationMethod::Flat` serialisation. + * + * See `file-system-object/content-address.md#serial-flat` in the + * manual. */ Flat, /** - * Hash `FileSerialisationMethod::Git` serialisation. + * Hash `FileSerialisationMethod::Recursive` serialisation. + * + * See `file-system-object/content-address.md#serial-flat` in the + * manual. */ Recursive, /** - * Git hashing. In particular files are hashed as git "blobs", and - * directories are hashed as git "trees". + * Git hashing. * - * Unlike `Flat` and `Recursive`, this is not a hash of a single - * serialisation but a [Merkle - * DAG](https://en.wikipedia.org/wiki/Merkle_tree) of multiple - * rounds of serialisation and hashing. - * - * @note Git's data model is slightly different, in that a plain - * file doesn't have an executable bit, directory entries do - * instead. We decide treat a bare file as non-executable by fiat, - * as we do with `FileIngestionMethod::Flat` which also lacks this - * information. Thus, Git can encode some but all of Nix's "File - * System Objects", and this sort of hashing is likewise partial. + * See `file-system-object/content-address.md#serial-git` in the + * manual. */ Git, }; diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc index 39efa19fe..919bf5d50 100644 --- a/src/libutil/file-system.cc +++ b/src/libutil/file-system.cc @@ -222,26 +222,6 @@ Path readLink(const Path & path) } -std::vector readDirectory(const Path & path) -{ - std::vector entries; - entries.reserve(64); - - for (auto & entry : fs::directory_iterator{path}) { - checkInterrupt(); - entries.push_back(std::move(entry)); - } - - return entries; -} - - -fs::file_type getFileType(const Path & path) -{ - return fs::symlink_status(path).type(); -} - - std::string readFile(const Path & path) { AutoCloseFD fd = toDescriptor(open(path.c_str(), O_RDONLY @@ -588,7 +568,7 @@ void replaceSymlink(const Path & target, const Path & link) throw; } - renameFile(tmp, link); + std::filesystem::rename(tmp, link); break; } @@ -611,29 +591,29 @@ static void setWriteTime(const fs::path & p, const struct stat & st) } #endif -void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete) +void copyFile(const fs::path & from, const fs::path & to, bool andDelete) { #ifndef _WIN32 // TODO: Rewrite the `is_*` to use `symlink_status()` - auto statOfFrom = lstat(from.path().c_str()); + auto statOfFrom = lstat(from.c_str()); #endif - auto fromStatus = from.symlink_status(); + auto fromStatus = fs::symlink_status(from); // Mark the directory as writable so that we can delete its children if (andDelete && fs::is_directory(fromStatus)) { - fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); + fs::permissions(from, fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); } if (fs::is_symlink(fromStatus) || fs::is_regular_file(fromStatus)) { - fs::copy(from.path(), to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing); + fs::copy(from, to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing); } else if (fs::is_directory(fromStatus)) { fs::create_directory(to); - for (auto & entry : fs::directory_iterator(from.path())) { - copy(entry, to / entry.path().filename(), andDelete); + for (auto & entry : fs::directory_iterator(from)) { + copyFile(entry, to / entry.path().filename(), andDelete); } } else { - throw Error("file '%s' has an unsupported type", from.path()); + throw Error("file '%s' has an unsupported type", from); } #ifndef _WIN32 @@ -641,25 +621,15 @@ void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete) #endif if (andDelete) { if (!fs::is_symlink(fromStatus)) - fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); - fs::remove(from.path()); + fs::permissions(from, fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); + fs::remove(from); } } -void copyFile(const Path & oldPath, const Path & newPath, bool andDelete) -{ - return copy(fs::directory_entry(fs::path(oldPath)), fs::path(newPath), andDelete); -} - -void renameFile(const Path & oldName, const Path & newName) -{ - fs::rename(oldName, newName); -} - void moveFile(const Path & oldName, const Path & newName) { try { - renameFile(oldName, newName); + std::filesystem::rename(oldName, newName); } catch (fs::filesystem_error & e) { auto oldPath = fs::path(oldName); auto newPath = fs::path(newName); @@ -673,8 +643,8 @@ void moveFile(const Path & oldName, const Path & newName) if (e.code().value() == EXDEV) { fs::remove(newPath); warn("Can’t rename %s as %s, copying instead", oldName, newName); - copy(fs::directory_entry(oldPath), tempCopyTarget, true); - renameFile( + copyFile(oldPath, tempCopyTarget, true); + std::filesystem::rename( os_string_to_string(PathViewNG { tempCopyTarget }), os_string_to_string(PathViewNG { newPath })); } diff --git a/src/libutil/file-system.hh b/src/libutil/file-system.hh index 4536accc3..c6b6ecedb 100644 --- a/src/libutil/file-system.hh +++ b/src/libutil/file-system.hh @@ -20,8 +20,6 @@ #endif #include -#include - #include #include #include @@ -122,14 +120,6 @@ Path readLink(const Path & path); */ Descriptor openDirectory(const std::filesystem::path & path); -/** - * Read the contents of a directory. The entries `.` and `..` are - * removed. - */ -std::vector readDirectory(const Path & path); - -std::filesystem::file_type getFileType(const Path & path); - /** * Read the contents of a file into a string. */ @@ -177,8 +167,6 @@ void createSymlink(const Path & target, const Path & link); */ void replaceSymlink(const Path & target, const Path & link); -void renameFile(const Path & src, const Path & dst); - /** * Similar to 'renameFile', but fallback to a copy+remove if `src` and `dst` * are on a different filesystem. @@ -194,7 +182,7 @@ void moveFile(const Path & src, const Path & dst); * with the guaranty that the destination will be “fresh”, with no stale inode * or file descriptor pointing to it). */ -void copyFile(const Path & oldPath, const Path & newPath, bool andDelete); +void copyFile(const std::filesystem::path & from, const std::filesystem::path & to, bool andDelete); /** * Automatic cleanup of resources. diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh index c178257d4..ef44a8409 100644 --- a/src/libutil/fmt.hh +++ b/src/libutil/fmt.hh @@ -182,6 +182,8 @@ public: return *this; } + HintFmt & operator=(HintFmt const & rhs) = default; + std::string str() const { return fmt.str(); diff --git a/src/libutil/json-utils.cc b/src/libutil/json-utils.cc index 1b911bf75..dff068e07 100644 --- a/src/libutil/json-utils.cc +++ b/src/libutil/json-utils.cc @@ -39,12 +39,9 @@ std::optional optionalValueAt(const nlohmann::json::object_t & m } -std::optional getNullable(const nlohmann::json & value) +const nlohmann::json * getNullable(const nlohmann::json & value) { - if (value.is_null()) - return std::nullopt; - - return value.get(); + return value.is_null() ? nullptr : &value; } /** diff --git a/src/libutil/json-utils.hh b/src/libutil/json-utils.hh index 08c98cc8c..fe7a406cf 100644 --- a/src/libutil/json-utils.hh +++ b/src/libutil/json-utils.hh @@ -29,7 +29,7 @@ std::optional optionalValueAt(const nlohmann::json::object_t & v * Downcast the json object, failing with a nice error if the conversion fails. * See https://json.nlohmann.me/features/types/ */ -std::optional getNullable(const nlohmann::json & value); +const nlohmann::json * getNullable(const nlohmann::json & value); const nlohmann::json::object_t & getObject(const nlohmann::json & value); const nlohmann::json::array_t & getArray(const nlohmann::json & value); const nlohmann::json::string_t & getString(const nlohmann::json & value); diff --git a/src/libutil/linux/cgroup.cc b/src/libutil/linux/cgroup.cc index 619ef7764..ec4077478 100644 --- a/src/libutil/linux/cgroup.cc +++ b/src/libutil/linux/cgroup.cc @@ -64,7 +64,7 @@ static CgroupStats destroyCgroup(const std::filesystem::path & cgroup, bool retu /* Otherwise, manually kill every process in the subcgroups and this cgroup. */ - for (auto & entry : readDirectory(cgroup)) { + for (auto & entry : std::filesystem::directory_iterator{cgroup}) { if (entry.symlink_status().type() != std::filesystem::file_type::directory) continue; destroyCgroup(cgroup / entry.path().filename(), false); } diff --git a/src/libutil/linux/namespaces.cc b/src/libutil/linux/namespaces.cc index f8289ef39..d4766cbba 100644 --- a/src/libutil/linux/namespaces.cc +++ b/src/libutil/linux/namespaces.cc @@ -137,10 +137,10 @@ void restoreMountNamespace() } } -void unshareFilesystem() +void tryUnshareFilesystem() { - if (unshare(CLONE_FS) != 0 && errno != EPERM) - throw SysError("unsharing filesystem state in download thread"); + if (unshare(CLONE_FS) != 0 && errno != EPERM && errno != ENOSYS) + throw SysError("unsharing filesystem state"); } } diff --git a/src/libutil/linux/namespaces.hh b/src/libutil/linux/namespaces.hh index ef3c9123f..208920b80 100644 --- a/src/libutil/linux/namespaces.hh +++ b/src/libutil/linux/namespaces.hh @@ -20,11 +20,13 @@ void saveMountNamespace(); void restoreMountNamespace(); /** - * Cause this thread to not share any FS attributes with the main + * Cause this thread to try to not share any FS attributes with the main * thread, because this causes setns() in restoreMountNamespace() to * fail. + * + * This is best effort -- EPERM and ENOSYS failures are just ignored. */ -void unshareFilesystem(); +void tryUnshareFilesystem(); bool userNamespacesSupported(); diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 2511c8849..5fa01f0d9 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -8,6 +8,7 @@ #include "position.hh" #include +#include #include #include diff --git a/src/libutil/muxable-pipe.hh b/src/libutil/muxable-pipe.hh new file mode 100644 index 000000000..53ac39170 --- /dev/null +++ b/src/libutil/muxable-pipe.hh @@ -0,0 +1,82 @@ +#pragma once +///@file + +#include "file-descriptor.hh" +#ifdef _WIN32 +# include "windows-async-pipe.hh" +#endif + +#ifndef _WIN32 +# include +#else +# include +# include "windows-error.hh" +#endif + +namespace nix { + +/** + * An "muxable pipe" is a type of pipe supporting endpoints that wait + * for events on multiple pipes at once. + * + * On Unix, this is just a regular anonymous pipe. On Windows, this has + * to be a named pipe because we need I/O Completion Ports to wait on + * multiple pipes. + */ +using MuxablePipe = +#ifndef _WIN32 + Pipe +#else + windows::AsyncPipe +#endif + ; + +/** + * Use pool() (Unix) / I/O Completion Ports (Windows) to wait for the + * input side of any logger pipe to become `available'. Note that + * `available' (i.e., non-blocking) includes EOF. + */ +struct MuxablePipePollState +{ +#ifndef _WIN32 + std::vector pollStatus; + std::map fdToPollStatus; +#else + OVERLAPPED_ENTRY oentries[0x20] = {0}; + ULONG removed; + bool gotEOF = false; + +#endif + + /** + * Check for ready (Unix) / completed (Windows) operations + */ + void poll( +#ifdef _WIN32 + HANDLE ioport, +#endif + std::optional timeout); + + using CommChannel = +#ifndef _WIN32 + Descriptor +#else + windows::AsyncPipe * +#endif + ; + + /** + * Process for ready (Unix) / completed (Windows) operations, + * calling the callbacks as needed. + * + * @param handleRead callback to be passed read data. + * + * @param handleEOF callback for when the `MuxablePipe` has closed. + */ + void iterate( + std::set & channels, + std::function handleRead, + std::function handleEOF); +}; + +} diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index aa13f4c56..225fc852c 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -132,7 +132,7 @@ SourceAccessor::DirEntries PosixSourceAccessor::readDirectory(const CanonPath & { assertNoSymlinks(path); DirEntries res; - for (auto & entry : nix::readDirectory(makeAbsPath(path).string())) { + for (auto & entry : std::filesystem::directory_iterator{makeAbsPath(path)}) { auto type = [&]() -> std::optional { std::filesystem::file_type nativeType; try { diff --git a/src/libutil/processes.hh b/src/libutil/processes.hh index e319f79e0..168fcaa55 100644 --- a/src/libutil/processes.hh +++ b/src/libutil/processes.hh @@ -12,8 +12,6 @@ #include #include -#include - #include #include #include @@ -118,8 +116,6 @@ public: { } }; -#ifndef _WIN32 - /** * Convert the exit status of a child as returned by wait() into an * error string. @@ -128,6 +124,4 @@ std::string statusToString(int status); bool statusOk(int status); -#endif - } diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh index 5d0c3696d..03aa64273 100644 --- a/src/libutil/ref.hh +++ b/src/libutil/ref.hh @@ -77,6 +77,8 @@ public: return ref((std::shared_ptr) p); } + ref & operator=(ref const & rhs) = default; + bool operator == (const ref & other) const { return p == other.p; diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 5ea27ccbe..36b99905a 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -136,7 +136,7 @@ size_t FdSource::readUnbuffered(char * data, size_t len) checkInterrupt(); if (!::ReadFile(fd, data, len, &n, NULL)) { _good = false; - throw WinError("ReadFile when FdSource::readUnbuffered"); + throw windows::WinError("ReadFile when FdSource::readUnbuffered"); } #else ssize_t n; diff --git a/src/libutil/source-accessor.cc b/src/libutil/source-accessor.cc index 66093d2cc..e797951c7 100644 --- a/src/libutil/source-accessor.cc +++ b/src/libutil/source-accessor.cc @@ -53,7 +53,7 @@ SourceAccessor::Stat SourceAccessor::lstat(const CanonPath & path) if (auto st = maybeLstat(path)) return *st; else - throw Error("path '%s' does not exist", showPath(path)); + throw FileNotFound("path '%s' does not exist", showPath(path)); } void SourceAccessor::setPathDisplay(std::string displayPrefix, std::string displaySuffix) diff --git a/src/libutil/source-accessor.hh b/src/libutil/source-accessor.hh index d7fb0af5f..cc8db01f5 100644 --- a/src/libutil/source-accessor.hh +++ b/src/libutil/source-accessor.hh @@ -29,6 +29,8 @@ enum class SymlinkResolution { Full, }; +MakeError(FileNotFound, Error); + /** * A read-only filesystem abstraction. This is used by the Nix * evaluator and elsewhere for accessing sources in various diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc index 4dc280f8c..5d5ff7dcb 100644 --- a/src/libutil/terminal.cc +++ b/src/libutil/terminal.cc @@ -4,6 +4,8 @@ #if _WIN32 # include +# define WIN32_LEAN_AND_MEAN +# include # define isatty _isatty #else # include @@ -97,17 +99,26 @@ std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int w static Sync> windowSize{{0, 0}}; -#ifndef _WIN32 void updateWindowSize() { + #ifndef _WIN32 struct winsize ws; if (ioctl(2, TIOCGWINSZ, &ws) == 0) { auto windowSize_(windowSize.lock()); windowSize_->first = ws.ws_row; windowSize_->second = ws.ws_col; } + #else + CONSOLE_SCREEN_BUFFER_INFO info; + // From https://stackoverflow.com/a/12642749 + if (GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &info) != 0) { + auto windowSize_(windowSize.lock()); + // From https://github.com/libuv/libuv/blob/v1.48.0/src/win/tty.c#L1130 + windowSize_->first = info.srWindow.Bottom - info.srWindow.Top + 1; + windowSize_->second = info.dwSize.X; + } + #endif } -#endif std::pair getWindowSize() diff --git a/src/libutil/terminal.hh b/src/libutil/terminal.hh index 628833283..902e75945 100644 --- a/src/libutil/terminal.hh +++ b/src/libutil/terminal.hh @@ -21,16 +21,13 @@ std::string filterANSIEscapes(std::string_view s, bool filterAll = false, unsigned int width = std::numeric_limits::max()); -#ifndef _WIN32 - /** - * Recalculate the window size, updating a global variable. Used in the - * `SIGWINCH` signal handler. + * Recalculate the window size, updating a global variable. + * + * Used in the `SIGWINCH` signal handler on Unix, for example. */ void updateWindowSize(); -#endif - /** * @return the number of rows and columns of the terminal. * diff --git a/src/libutil/unix/file-descriptor.cc b/src/libutil/unix/file-descriptor.cc index 222d077e5..84a33af81 100644 --- a/src/libutil/unix/file-descriptor.cc +++ b/src/libutil/unix/file-descriptor.cc @@ -124,7 +124,7 @@ void closeMostFDs(const std::set & exceptions) { #if __linux__ try { - for (auto & s : readDirectory("/proc/self/fd")) { + for (auto & s : std::filesystem::directory_iterator{"/proc/self/fd"}) { auto fd = std::stoi(s.path().filename()); if (!exceptions.count(fd)) { debug("closing leaked FD %d", fd); diff --git a/src/libutil/unix/muxable-pipe.cc b/src/libutil/unix/muxable-pipe.cc new file mode 100644 index 000000000..0104663c3 --- /dev/null +++ b/src/libutil/unix/muxable-pipe.cc @@ -0,0 +1,47 @@ +#include + +#include "logging.hh" +#include "util.hh" +#include "muxable-pipe.hh" + +namespace nix { + +void MuxablePipePollState::poll(std::optional timeout) +{ + if (::poll(pollStatus.data(), pollStatus.size(), timeout ? *timeout : -1) == -1) { + if (errno == EINTR) + return; + throw SysError("waiting for input"); + } +} + +void MuxablePipePollState::iterate( + std::set & channels, + std::function handleRead, + std::function handleEOF) +{ + std::set fds2(channels); + std::vector buffer(4096); + for (auto & k : fds2) { + const auto fdPollStatusId = get(fdToPollStatus, k); + assert(fdPollStatusId); + assert(*fdPollStatusId < pollStatus.size()); + if (pollStatus.at(*fdPollStatusId).revents) { + ssize_t rd = ::read(fromDescriptorReadOnly(k), buffer.data(), buffer.size()); + // FIXME: is there a cleaner way to handle pt close + // than EIO? Is this even standard? + if (rd == 0 || (rd == -1 && errno == EIO)) { + handleEOF(k); + channels.erase(k); + } else if (rd == -1) { + if (errno != EINTR) + throw SysError("read failed"); + } else { + std::string_view data((char *) buffer.data(), rd); + handleRead(k, data); + } + } + } +} + +} diff --git a/src/libutil/url.hh b/src/libutil/url.hh index 24806bbff..6cd06e53d 100644 --- a/src/libutil/url.hh +++ b/src/libutil/url.hh @@ -33,6 +33,8 @@ std::string percentEncode(std::string_view s, std::string_view keep=""); std::map decodeQuery(const std::string & query); +std::string encodeQuery(const std::map & query); + ParsedURL parseURL(const std::string & url); /** diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 16bca093c..698e181a1 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -7,6 +7,8 @@ #include #include +#include +#include #ifdef NDEBUG #error "Nix may not be built with assertions disabled (i.e. with -DNDEBUG)." @@ -111,6 +113,43 @@ std::string rewriteStrings(std::string s, const StringMap & rewrites) return s; } +template +std::optional string2Int(const std::string_view s) +{ + if (s.substr(0, 1) == "-" && !std::numeric_limits::is_signed) + return std::nullopt; + try { + return boost::lexical_cast(s.data(), s.size()); + } catch (const boost::bad_lexical_cast &) { + return std::nullopt; + } +} + +// Explicitly instantiated in one place for faster compilation +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); +template std::optional string2Int(const std::string_view s); + +template +std::optional string2Float(const std::string_view s) +{ + try { + return boost::lexical_cast(s.data(), s.size()); + } catch (const boost::bad_lexical_cast &) { + return std::nullopt; + } +} + +template std::optional string2Float(const std::string_view s); +template std::optional string2Float(const std::string_view s); + std::string renderSize(uint64_t value, bool align) { diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 6db59ef20..23682ff7c 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -5,7 +5,6 @@ #include "error.hh" #include "logging.hh" -#include #include #include @@ -102,16 +101,7 @@ std::string rewriteStrings(std::string s, const StringMap & rewrites); * Parse a string into an integer. */ template -std::optional string2Int(const std::string_view s) -{ - if (s.substr(0, 1) == "-" && !std::numeric_limits::is_signed) - return std::nullopt; - try { - return boost::lexical_cast(s.data(), s.size()); - } catch (const boost::bad_lexical_cast &) { - return std::nullopt; - } -} +std::optional string2Int(const std::string_view s); /** * Like string2Int(), but support an optional suffix 'K', 'M', 'G' or @@ -148,14 +138,7 @@ std::string renderSize(uint64_t value, bool align = false); * Parse a string into a float. */ template -std::optional string2Float(const std::string_view s) -{ - try { - return boost::lexical_cast(s.data(), s.size()); - } catch (const boost::bad_lexical_cast &) { - return std::nullopt; - } -} +std::optional string2Float(const std::string_view s); /** diff --git a/src/libutil/windows/file-descriptor.cc b/src/libutil/windows/file-descriptor.cc index 26f769b66..b5c21ad32 100644 --- a/src/libutil/windows/file-descriptor.cc +++ b/src/libutil/windows/file-descriptor.cc @@ -14,6 +14,8 @@ namespace nix { +using namespace nix::windows; + std::string readFile(HANDLE handle) { LARGE_INTEGER li; diff --git a/src/libutil/windows/file-system.cc b/src/libutil/windows/file-system.cc index 8002dd75e..b15355efe 100644 --- a/src/libutil/windows/file-system.cc +++ b/src/libutil/windows/file-system.cc @@ -5,8 +5,13 @@ namespace nix { Descriptor openDirectory(const std::filesystem::path & path) { return CreateFileW( - path.c_str(), GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, OPEN_EXISTING, - FILE_FLAG_BACKUP_SEMANTICS, NULL); + path.c_str(), + GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, + NULL); } } diff --git a/src/libutil/windows/muxable-pipe.cc b/src/libutil/windows/muxable-pipe.cc new file mode 100644 index 000000000..91a321f7c --- /dev/null +++ b/src/libutil/windows/muxable-pipe.cc @@ -0,0 +1,70 @@ +#include +#include "windows-error.hh" + +#include "logging.hh" +#include "util.hh" +#include "muxable-pipe.hh" + +namespace nix { + +void MuxablePipePollState::poll(HANDLE ioport, std::optional timeout) +{ + /* We are on at least Windows Vista / Server 2008 and can get many + (countof(oentries)) statuses in one API call. */ + if (!GetQueuedCompletionStatusEx( + ioport, oentries, sizeof(oentries) / sizeof(*oentries), &removed, timeout ? *timeout : INFINITE, false)) { + windows::WinError winError("GetQueuedCompletionStatusEx"); + if (winError.lastError != WAIT_TIMEOUT) + throw winError; + assert(removed == 0); + } else { + assert(0 < removed && removed <= sizeof(oentries) / sizeof(*oentries)); + } +} + +void MuxablePipePollState::iterate( + std::set & channels, + std::function handleRead, + std::function handleEOF) +{ + auto p = channels.begin(); + while (p != channels.end()) { + decltype(p) nextp = p; + ++nextp; + for (ULONG i = 0; i < removed; i++) { + if (oentries[i].lpCompletionKey == ((ULONG_PTR) ((*p)->readSide.get()) ^ 0x5555)) { + printMsg(lvlVomit, "read %s bytes", oentries[i].dwNumberOfBytesTransferred); + if (oentries[i].dwNumberOfBytesTransferred > 0) { + std::string data{ + (char *) (*p)->buffer.data(), + oentries[i].dwNumberOfBytesTransferred, + }; + handleRead((*p)->readSide.get(), data); + } + + if (gotEOF) { + handleEOF((*p)->readSide.get()); + nextp = channels.erase(p); // no need to maintain `channels`? + } else { + BOOL rc = ReadFile( + (*p)->readSide.get(), (*p)->buffer.data(), (*p)->buffer.size(), &(*p)->got, &(*p)->overlapped); + if (rc) { + // here is possible (but not obligatory) to call + // `handleRead` and repeat ReadFile immediately + } else { + windows::WinError winError("ReadFile(%s, ..)", (*p)->readSide.get()); + if (winError.lastError == ERROR_BROKEN_PIPE) { + handleEOF((*p)->readSide.get()); + nextp = channels.erase(p); // no need to maintain `channels` ? + } else if (winError.lastError != ERROR_IO_PENDING) + throw winError; + } + } + break; + } + } + p = nextp; + } +} + +} diff --git a/src/libutil/windows/processes.cc b/src/libutil/windows/processes.cc index 5ef4ed1e4..44a32f6a1 100644 --- a/src/libutil/windows/processes.cc +++ b/src/libutil/windows/processes.cc @@ -16,16 +16,6 @@ #include #include -#ifdef __APPLE__ -# include -#endif - -#ifdef __linux__ -# include -# include -#endif - - namespace nix { std::string runProgram(Path program, bool lookupPath, const Strings & args, @@ -34,15 +24,31 @@ std::string runProgram(Path program, bool lookupPath, const Strings & args, throw UnimplementedError("Cannot shell out to git on Windows yet"); } + // Output = error code + "standard out" output stream std::pair runProgram(RunOptions && options) { throw UnimplementedError("Cannot shell out to git on Windows yet"); } + void runProgram2(const RunOptions & options) { throw UnimplementedError("Cannot shell out to git on Windows yet"); } +std::string statusToString(int status) +{ + if (status != 0) + return fmt("with exit code %d", status); + else + return "succeeded"; +} + + +bool statusOk(int status) +{ + return status == 0; +} + } diff --git a/src/libutil/windows/users.cc b/src/libutil/windows/users.cc index 1792ff1a1..db6c42df3 100644 --- a/src/libutil/windows/users.cc +++ b/src/libutil/windows/users.cc @@ -9,6 +9,8 @@ namespace nix { +using namespace nix::windows; + std::string getUserName() { // Get the required buffer size diff --git a/src/libutil/windows/windows-async-pipe.cc b/src/libutil/windows/windows-async-pipe.cc new file mode 100644 index 000000000..4fa57ca36 --- /dev/null +++ b/src/libutil/windows/windows-async-pipe.cc @@ -0,0 +1,49 @@ +#include "windows-async-pipe.hh" +#include "windows-error.hh" + +namespace nix::windows { + +void AsyncPipe::createAsyncPipe(HANDLE iocp) +{ + // std::cerr << (format("-----AsyncPipe::createAsyncPipe(%x)") % iocp) << std::endl; + + buffer.resize(0x1000); + memset(&overlapped, 0, sizeof(overlapped)); + + std::string pipeName = fmt("\\\\.\\pipe\\nix-%d-%p", GetCurrentProcessId(), (void *) this); + + readSide = CreateNamedPipeA( + pipeName.c_str(), + PIPE_ACCESS_INBOUND | FILE_FLAG_OVERLAPPED, + PIPE_TYPE_BYTE, + PIPE_UNLIMITED_INSTANCES, + 0, + 0, + INFINITE, + NULL); + if (!readSide) + throw WinError("CreateNamedPipeA(%s)", pipeName); + + HANDLE hIocp = CreateIoCompletionPort(readSide.get(), iocp, (ULONG_PTR) (readSide.get()) ^ 0x5555, 0); + if (hIocp != iocp) + throw WinError("CreateIoCompletionPort(%x[%s], %x, ...) returned %x", readSide.get(), pipeName, iocp, hIocp); + + if (!ConnectNamedPipe(readSide.get(), &overlapped) && GetLastError() != ERROR_IO_PENDING) + throw WinError("ConnectNamedPipe(%s)", pipeName); + + SECURITY_ATTRIBUTES psa2 = {0}; + psa2.nLength = sizeof(SECURITY_ATTRIBUTES); + psa2.bInheritHandle = TRUE; + + writeSide = CreateFileA(pipeName.c_str(), GENERIC_WRITE, 0, &psa2, OPEN_EXISTING, 0, NULL); + if (!readSide) + throw WinError("CreateFileA(%s)", pipeName); +} + +void AsyncPipe::close() +{ + readSide.close(); + writeSide.close(); +} + +} diff --git a/src/libutil/windows/windows-async-pipe.hh b/src/libutil/windows/windows-async-pipe.hh new file mode 100644 index 000000000..8f554e403 --- /dev/null +++ b/src/libutil/windows/windows-async-pipe.hh @@ -0,0 +1,27 @@ +#pragma once +///@file + +#include "file-descriptor.hh" + +namespace nix::windows { + +/*** + * An "async pipe" is a pipe that supports I/O Completion Ports so + * multiple pipes can be listened too. + * + * Unfortunately, only named pipes support that on windows, so we use + * those with randomized temp file names. + */ +class AsyncPipe +{ +public: + AutoCloseFD writeSide, readSide; + OVERLAPPED overlapped; + DWORD got; + std::vector buffer; + + void createAsyncPipe(HANDLE iocp); + void close(); +}; + +} diff --git a/src/libutil/windows/windows-error.cc b/src/libutil/windows/windows-error.cc index 26faaae6d..aead4af23 100644 --- a/src/libutil/windows/windows-error.cc +++ b/src/libutil/windows/windows-error.cc @@ -4,7 +4,7 @@ #define WIN32_LEAN_AND_MEAN #include -namespace nix { +namespace nix::windows { std::string WinError::renderError(DWORD lastError) { diff --git a/src/libutil/windows/windows-error.hh b/src/libutil/windows/windows-error.hh index fdfd0f52c..624b4c4cb 100644 --- a/src/libutil/windows/windows-error.hh +++ b/src/libutil/windows/windows-error.hh @@ -5,7 +5,7 @@ #include "error.hh" -namespace nix { +namespace nix::windows { /** * Windows Error type. diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index 02a3a4b83..91209c978 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -27,7 +27,7 @@ void removeOldGenerations(std::string dir) bool canWrite = access(dir.c_str(), W_OK) == 0; - for (auto & i : readDirectory(dir)) { + for (auto & i : std::filesystem::directory_iterator{dir}) { checkInterrupt(); auto path = i.path().string(); diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index 6cbbacb15..5246b03e4 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -9,8 +9,9 @@ #include "eval-inline.hh" #include "profiles.hh" #include "print-ambiguous.hh" -#include +#include +#include namespace nix { @@ -140,6 +141,7 @@ bool createUserEnv(EvalState & state, PackageInfos & elems, NixStringContext context; auto & aDrvPath(*topLevel.attrs()->find(state.sDrvPath)); auto topLevelDrv = state.coerceToStorePath(aDrvPath.pos, *aDrvPath.value, context, ""); + topLevelDrv.requireDerivation(); auto & aOutPath(*topLevel.attrs()->find(state.sOutPath)); auto topLevelOut = state.coerceToStorePath(aOutPath.pos, *aOutPath.value, context, ""); diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index b23d99ad6..6d028e0a7 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -7,6 +7,7 @@ #include "local-fs-store.hh" #include "log-store.hh" #include "serve-protocol.hh" +#include "serve-protocol-connection.hh" #include "serve-protocol-impl.hh" #include "shared.hh" #include "graphml.hh" diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index 2e50392f7..554c36540 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -100,6 +100,8 @@ struct CmdBundle : InstallableValueCommand NixStringContext context2; auto drvPath = evalState->coerceToStorePath(attr1->pos, *attr1->value, context2, ""); + drvPath.requireDerivation(); + auto attr2 = vRes->attrs()->get(evalState->sOutPath); if (!attr2) throw Error("the bundler '%s' does not produce a derivation", bundler.what()); diff --git a/src/nix/derivation-show.md b/src/nix/derivation-show.md index 2437ea08f..9fff58ef9 100644 --- a/src/nix/derivation-show.md +++ b/src/nix/derivation-show.md @@ -50,7 +50,7 @@ By default, this command only shows top-level derivations, but with `nix derivation show` outputs a JSON map of [store path]s to derivations in the following format: -[store path]: @docroot@/glossary.md#gloss-store-path +[store path]: @docroot@/store/store-path.md {{#include ../../protocols/json/derivation.md}} diff --git a/src/nix/develop.cc b/src/nix/develop.cc index 08d44d7aa..27287a1a8 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -14,6 +14,7 @@ #include #include +#include #include #include @@ -610,7 +611,7 @@ struct CmdDevelop : Common, MixEnvironment } else { - script = "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n" + script; + script = "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\nshopt -u expand_aliases\n" + script + "\nshopt -s expand_aliases\n"; if (developSettings.bashPrompt != "") script += fmt("[ -n \"$PS1\" ] && PS1=%s;\n", shellEscape(developSettings.bashPrompt.get())); diff --git a/src/nix/env.cc b/src/nix/env.cc new file mode 100644 index 000000000..021c47cbb --- /dev/null +++ b/src/nix/env.cc @@ -0,0 +1,106 @@ +#include "command.hh" +#include "run.hh" +#include + +using namespace nix; + +struct CmdEnv : NixMultiCommand +{ + CmdEnv() + : NixMultiCommand("env", RegisterCommand::getCommandsFor({"env"})) + { + } + + std::string description() override + { + return "manipulate the process environment"; + } + + Category category() override + { + return catUtility; + } +}; + +static auto rCmdEnv = registerCommand("env"); + +struct CmdShell : InstallablesCommand, MixEnvironment +{ + + using InstallablesCommand::run; + + std::vector command = {getEnv("SHELL").value_or("bash")}; + + CmdShell() + { + addFlag( + {.longName = "command", + .shortName = 'c', + .description = "Command and arguments to be executed, defaulting to `$SHELL`", + .labels = {"command", "args"}, + .handler = {[&](std::vector ss) { + if (ss.empty()) + throw UsageError("--command requires at least one argument"); + command = ss; + }}}); + } + + std::string description() override + { + return "run a shell in which the specified packages are available"; + } + + std::string doc() override + { + return +#include "shell.md" + ; + } + + void run(ref store, Installables && installables) override + { + auto outPaths = + Installable::toStorePaths(getEvalStore(), store, Realise::Outputs, OperateOn::Output, installables); + + auto accessor = store->getFSAccessor(); + + std::unordered_set done; + std::queue todo; + for (auto & path : outPaths) + todo.push(path); + + setEnviron(); + + std::vector pathAdditions; + + while (!todo.empty()) { + auto path = todo.front(); + todo.pop(); + if (!done.insert(path).second) + continue; + + if (true) + pathAdditions.push_back(store->printStorePath(path) + "/bin"); + + auto propPath = accessor->resolveSymlinks( + CanonPath(store->printStorePath(path)) / "nix-support" / "propagated-user-env-packages"); + if (auto st = accessor->maybeLstat(propPath); st && st->type == SourceAccessor::tRegular) { + for (auto & p : tokenizeString(accessor->readFile(propPath))) + todo.push(store->parseStorePath(p)); + } + } + + auto unixPath = tokenizeString(getEnv("PATH").value_or(""), ":"); + unixPath.insert(unixPath.begin(), pathAdditions.begin(), pathAdditions.end()); + auto unixPathString = concatStringsSep(":", unixPath); + setEnv("PATH", unixPathString.c_str()); + + Strings args; + for (auto & arg : command) + args.push_back(arg); + + runProgramInStore(store, UseLookupPath::Use, *command.begin(), args); + } +}; + +static auto rCmdShell = registerCommand2({"env", "shell"}); diff --git a/src/nix/flake-metadata.md b/src/nix/flake-metadata.md index 5a009409b..adfd3dc96 100644 --- a/src/nix/flake-metadata.md +++ b/src/nix/flake-metadata.md @@ -2,10 +2,10 @@ R""( # Examples -* Show what `nixpkgs` resolves to: +* Show what `dwarffs` resolves to: ```console - # nix flake metadata nixpkgs + # nix flake metadata dwarffs Resolved URL: github:edolstra/dwarffs Locked URL: github:edolstra/dwarffs/f691e2c991e75edb22836f1dbe632c40324215c5 Description: A filesystem that fetches DWARF debug info from the Internet on demand diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 9c1888aa0..6bf694e08 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -866,7 +866,7 @@ struct CmdFlakeInitCommon : virtual Args, EvalCommand { createDirs(to); - for (auto & entry : readDirectory(from)) { + for (auto & entry : std::filesystem::directory_iterator{from}) { auto from2 = entry.path().string(); auto to2 = to + "/" + entry.path().filename().string(); auto st = lstat(from2); @@ -1176,7 +1176,7 @@ struct CmdFlakeShow : FlakeCommand, MixJSON // If we don't recognize it, it's probably content return true; } catch (EvalError & e) { - // Some attrs may contain errors, eg. legacyPackages of + // Some attrs may contain errors, e.g. legacyPackages of // nixpkgs. We still want to recurse into it, instead of // skipping it at all. return true; diff --git a/src/nix/flake.md b/src/nix/flake.md index d8b5bf435..2f43d0264 100644 --- a/src/nix/flake.md +++ b/src/nix/flake.md @@ -134,7 +134,9 @@ The following generic flake reference attributes are supported: repository or tarball. The default is the root directory of the flake. -* `narHash`: The hash of the NAR serialisation (in SRI format) of the +* `narHash`: The hash of the + [Nix Archive (NAR) serialisation][Nix Archive] + (in SRI format) of the contents of the flake. This is useful for flake types such as tarballs that lack a unique content identifier such as a Git commit hash. @@ -423,8 +425,9 @@ The following attributes are supported in `flake.nix`: * `lastModified`: The commit time of the revision `rev` as an integer denoting the number of seconds since 1970. - * `narHash`: The SHA-256 (in SRI format) of the NAR serialization of - the flake's source tree. + * `narHash`: The SHA-256 (in SRI format) of the + [Nix Archive (NAR) serialisation][Nix Archive] + NAR serialization of the flake's source tree. The value returned by the `outputs` function must be an attribute set. The attributes can have arbitrary values; however, various @@ -439,7 +442,7 @@ The following attributes are supported in `flake.nix`: - [`bash-prompt-prefix`](@docroot@/command-ref/conf-file.md#conf-bash-prompt-prefix) - [`bash-prompt-suffix`](@docroot@/command-ref/conf-file.md#conf-bash-prompt-suffix) - [`flake-registry`](@docroot@/command-ref/conf-file.md#conf-flake-registry) - - [`commit-lockfile-summary`](@docroot@/command-ref/conf-file.md#conf-commit-lockfile-summary) + - [`commit-lock-file-summary`](@docroot@/command-ref/conf-file.md#conf-commit-lock-file-summary) ## Flake inputs @@ -703,4 +706,5 @@ will not look at the lock files of dependencies. However, lock file generation itself *does* use the lock files of dependencies by default. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive )"" diff --git a/src/nix/hash.cc b/src/nix/hash.cc index f969886ea..f57b224d2 100644 --- a/src/nix/hash.cc +++ b/src/nix/hash.cc @@ -181,7 +181,7 @@ struct CmdToBase : Command void run() override { - warn("The old format conversion sub commands of `nix hash` where deprecated in favor of `nix hash convert`."); + warn("The old format conversion sub commands of `nix hash` were deprecated in favor of `nix hash convert`."); for (auto s : args) logger->cout(Hash::parseAny(s, hashAlgo).to_string(hashFormat, hashFormat == HashFormat::SRI)); } diff --git a/src/nix/local.mk b/src/nix/local.mk index 305b0e9df..9883509fb 100644 --- a/src/nix/local.mk +++ b/src/nix/local.mk @@ -30,6 +30,11 @@ nix_LIBS = libexpr libmain libfetchers libstore libutil libcmd nix_LDFLAGS = $(THREAD_LDFLAGS) $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) $(LOWDOWN_LIBS) +ifdef HOST_WINDOWS + # Increase the default reserved stack size to 65 MB so Nix doesn't run out of space + nix_LDFLAGS += -Wl,--stack,$(shell echo $$((65 * 1024 * 1024))) +endif + $(foreach name, \ nix-build nix-channel nix-collect-garbage nix-copy-closure nix-daemon nix-env nix-hash nix-instantiate nix-prefetch-url nix-shell nix-store, \ $(eval $(call install-symlink, nix, $(bindir)/$(name)))) diff --git a/src/nix/main.cc b/src/nix/main.cc index bc13a4df5..81d368d6a 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -18,6 +18,7 @@ #include "terminal.hh" #include "users.hh" #include "network-proxy.hh" +#include "eval-cache.hh" #include #include @@ -42,6 +43,19 @@ void chrootHelper(int argc, char * * argv); namespace nix { +enum struct AliasStatus { + /** Aliases that don't go away */ + AcceptedShorthand, + /** Aliases that will go away */ + Deprecated, +}; + +/** An alias, except for the original syntax, which is in the map key. */ +struct AliasInfo { + AliasStatus status; + std::vector replacement; +}; + /* Check if we have a non-loopback/link-local network interface. */ static bool haveInternet() { @@ -134,29 +148,30 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs }); } - std::map> aliases = { - {"add-to-store", {"store", "add-path"}}, - {"cat-nar", {"nar", "cat"}}, - {"cat-store", {"store", "cat"}}, - {"copy-sigs", {"store", "copy-sigs"}}, - {"dev-shell", {"develop"}}, - {"diff-closures", {"store", "diff-closures"}}, - {"dump-path", {"store", "dump-path"}}, - {"hash-file", {"hash", "file"}}, - {"hash-path", {"hash", "path"}}, - {"ls-nar", {"nar", "ls"}}, - {"ls-store", {"store", "ls"}}, - {"make-content-addressable", {"store", "make-content-addressed"}}, - {"optimise-store", {"store", "optimise"}}, - {"ping-store", {"store", "ping"}}, - {"sign-paths", {"store", "sign"}}, - {"show-derivation", {"derivation", "show"}}, - {"show-config", {"config", "show"}}, - {"to-base16", {"hash", "to-base16"}}, - {"to-base32", {"hash", "to-base32"}}, - {"to-base64", {"hash", "to-base64"}}, - {"verify", {"store", "verify"}}, - {"doctor", {"config", "check"}}, + std::map aliases = { + {"add-to-store", { AliasStatus::Deprecated, {"store", "add-path"}}}, + {"cat-nar", { AliasStatus::Deprecated, {"nar", "cat"}}}, + {"cat-store", { AliasStatus::Deprecated, {"store", "cat"}}}, + {"copy-sigs", { AliasStatus::Deprecated, {"store", "copy-sigs"}}}, + {"dev-shell", { AliasStatus::Deprecated, {"develop"}}}, + {"diff-closures", { AliasStatus::Deprecated, {"store", "diff-closures"}}}, + {"dump-path", { AliasStatus::Deprecated, {"store", "dump-path"}}}, + {"hash-file", { AliasStatus::Deprecated, {"hash", "file"}}}, + {"hash-path", { AliasStatus::Deprecated, {"hash", "path"}}}, + {"ls-nar", { AliasStatus::Deprecated, {"nar", "ls"}}}, + {"ls-store", { AliasStatus::Deprecated, {"store", "ls"}}}, + {"make-content-addressable", { AliasStatus::Deprecated, {"store", "make-content-addressed"}}}, + {"optimise-store", { AliasStatus::Deprecated, {"store", "optimise"}}}, + {"ping-store", { AliasStatus::Deprecated, {"store", "ping"}}}, + {"sign-paths", { AliasStatus::Deprecated, {"store", "sign"}}}, + {"shell", { AliasStatus::AcceptedShorthand, {"env", "shell"}}}, + {"show-derivation", { AliasStatus::Deprecated, {"derivation", "show"}}}, + {"show-config", { AliasStatus::Deprecated, {"config", "show"}}}, + {"to-base16", { AliasStatus::Deprecated, {"hash", "to-base16"}}}, + {"to-base32", { AliasStatus::Deprecated, {"hash", "to-base32"}}}, + {"to-base64", { AliasStatus::Deprecated, {"hash", "to-base64"}}}, + {"verify", { AliasStatus::Deprecated, {"store", "verify"}}}, + {"doctor", { AliasStatus::Deprecated, {"config", "check"}}}, }; bool aliasUsed = false; @@ -167,10 +182,13 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs, virtual RootArgs auto arg = *pos; auto i = aliases.find(arg); if (i == aliases.end()) return pos; - warn("'%s' is a deprecated alias for '%s'", - arg, concatStringsSep(" ", i->second)); + auto & info = i->second; + if (info.status == AliasStatus::Deprecated) { + warn("'%s' is a deprecated alias for '%s'", + arg, concatStringsSep(" ", info.replacement)); + } pos = args.erase(pos); - for (auto j = i->second.rbegin(); j != i->second.rend(); ++j) + for (auto j = info.replacement.rbegin(); j != info.replacement.rend(); ++j) pos = args.insert(pos, *j); aliasUsed = true; return pos; @@ -515,18 +533,24 @@ void mainWrapped(int argc, char * * argv) if (args.command->second->forceImpureByDefault() && !evalSettings.pureEval.overridden) { evalSettings.pureEval = false; } - args.command->second->run(); + + try { + args.command->second->run(); + } catch (eval_cache::CachedEvalError & e) { + /* Evaluate the original attribute that resulted in this + cached error so that we can show the original error to the + user. */ + e.force(); + } } } int main(int argc, char * * argv) { -#ifndef _WIN32 // TODO implement on Windows // Increase the default stack size for the evaluator and for // libstdc++'s std::regex. nix::setStackSize(64 * 1024 * 1024); -#endif return nix::handleExceptions(argv[0], [&]() { nix::mainWrapped(argc, argv); diff --git a/src/nix/nar-cat.md b/src/nix/nar-cat.md index 55c481a28..1131eb2bf 100644 --- a/src/nix/nar-cat.md +++ b/src/nix/nar-cat.md @@ -2,7 +2,7 @@ R""( # Examples -* List a file in a NAR and pipe it through `gunzip`: +* List a file in a [Nix Archive (NAR)][Nix Archive] and pipe it through `gunzip`: ```console # nix nar cat ./hello.nar /share/man/man1/hello.1.gz | gunzip @@ -16,4 +16,5 @@ R""( This command prints on standard output the contents of the regular file *path* inside the NAR file *nar*. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive )"" diff --git a/src/nix/nar-dump-path.md b/src/nix/nar-dump-path.md index de82202de..4676e4fef 100644 --- a/src/nix/nar-dump-path.md +++ b/src/nix/nar-dump-path.md @@ -2,7 +2,7 @@ R""( # Examples -* To serialise directory `foo` as a NAR: +* To serialise directory `foo` as a [Nix Archive (NAR)][Nix Archive]: ```console # nix nar pack ./foo > foo.nar @@ -10,8 +10,10 @@ R""( # Description -This command generates a NAR file containing the serialisation of +This command generates a [Nix Archive (NAR)][Nix Archive] file containing the serialisation of *path*, which must contain only regular files, directories and symbolic links. The NAR is written to standard output. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive + )"" diff --git a/src/nix/nar-ls.md b/src/nix/nar-ls.md index 5a03c5d82..27c4b97e6 100644 --- a/src/nix/nar-ls.md +++ b/src/nix/nar-ls.md @@ -2,7 +2,7 @@ R""( # Examples -* To list a specific file in a NAR: +* To list a specific file in a [NAR][Nix Archive]: ```console # nix nar ls --long ./hello.nar /bin/hello @@ -19,6 +19,8 @@ R""( # Description -This command shows information about a *path* inside NAR file *nar*. +This command shows information about a *path* inside [Nix Archive (NAR)][Nix Archive] file *nar*. + +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive )"" diff --git a/src/nix/nar.md b/src/nix/nar.md index a83b5c764..b0f70ce93 100644 --- a/src/nix/nar.md +++ b/src/nix/nar.md @@ -3,11 +3,14 @@ R""( # Description `nix nar` provides several subcommands for creating and inspecting -*Nix Archives* (NARs). +[*Nix Archives* (NARs)][Nix Archive]. # File format -For the definition of the NAR file format, see Figure 5.2 in -https://edolstra.github.io/pubs/phd-thesis.pdf. +For the definition of the Nix Archive file format, see +[within the protocols chapter](@docroot@/protocols/nix-archive.md) +of the manual. + +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive )"" diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index e932170cf..3ce52acc5 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -115,9 +115,10 @@ std::tuple prefetchFile( /* If the archive unpacks to a single file/directory, then use that as the top-level. */ - auto entries = readDirectory(unpacked); - if (entries.size() == 1) - tmpFile = entries[0].path(); + auto entries = std::filesystem::directory_iterator{unpacked}; + auto file_count = std::distance(entries, std::filesystem::directory_iterator{}); + if (file_count == 1) + tmpFile = entries->path(); else tmpFile = unpacked; } diff --git a/src/nix/run.cc b/src/nix/run.cc index 9c559bdf6..7d3122470 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -71,83 +71,6 @@ void runProgramInStore(ref store, } -struct CmdShell : InstallablesCommand, MixEnvironment -{ - - using InstallablesCommand::run; - - std::vector command = { getEnv("SHELL").value_or("bash") }; - - CmdShell() - { - addFlag({ - .longName = "command", - .shortName = 'c', - .description = "Command and arguments to be executed, defaulting to `$SHELL`", - .labels = {"command", "args"}, - .handler = {[&](std::vector ss) { - if (ss.empty()) throw UsageError("--command requires at least one argument"); - command = ss; - }} - }); - } - - std::string description() override - { - return "run a shell in which the specified packages are available"; - } - - std::string doc() override - { - return - #include "shell.md" - ; - } - - void run(ref store, Installables && installables) override - { - auto outPaths = Installable::toStorePaths(getEvalStore(), store, Realise::Outputs, OperateOn::Output, installables); - - auto accessor = store->getFSAccessor(); - - std::unordered_set done; - std::queue todo; - for (auto & path : outPaths) todo.push(path); - - setEnviron(); - - std::vector pathAdditions; - - while (!todo.empty()) { - auto path = todo.front(); - todo.pop(); - if (!done.insert(path).second) continue; - - if (true) - pathAdditions.push_back(store->printStorePath(path) + "/bin"); - - auto propPath = accessor->resolveSymlinks( - CanonPath(store->printStorePath(path)) / "nix-support" / "propagated-user-env-packages"); - if (auto st = accessor->maybeLstat(propPath); st && st->type == SourceAccessor::tRegular) { - for (auto & p : tokenizeString(accessor->readFile(propPath))) - todo.push(store->parseStorePath(p)); - } - } - - auto unixPath = tokenizeString(getEnv("PATH").value_or(""), ":"); - unixPath.insert(unixPath.begin(), pathAdditions.begin(), pathAdditions.end()); - auto unixPathString = concatStringsSep(":", unixPath); - setEnv("PATH", unixPathString.c_str()); - - Strings args; - for (auto & arg : command) args.push_back(arg); - - runProgramInStore(store, UseLookupPath::Use, *command.begin(), args); - } -}; - -static auto rCmdShell = registerCommand("shell"); - struct CmdRun : InstallableValueCommand { using InstallableCommand::run; @@ -248,7 +171,7 @@ void chrootHelper(int argc, char * * argv) if (mount(realStoreDir.c_str(), (tmpDir + storeDir).c_str(), "", MS_BIND, 0) == -1) throw SysError("mounting '%s' on '%s'", realStoreDir, storeDir); - for (auto entry : readDirectory("/")) { + for (auto entry : std::filesystem::directory_iterator{"/"}) { auto src = entry.path().string(); Path dst = tmpDir + "/" + entry.path().filename().string(); if (pathExists(dst)) continue; diff --git a/src/nix/store-dump-path.md b/src/nix/store-dump-path.md index 56e2174b6..21467ff32 100644 --- a/src/nix/store-dump-path.md +++ b/src/nix/store-dump-path.md @@ -17,7 +17,9 @@ R""( # Description -This command generates a NAR file containing the serialisation of the +This command generates a [Nix Archive (NAR)][Nix Archive] file containing the serialisation of the store path [*installable*](./nix.md#installables). The NAR is written to standard output. +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive + )"" diff --git a/src/nix/unix/daemon.cc b/src/nix/unix/daemon.cc index 8afcbe982..de77a7b6b 100644 --- a/src/nix/unix/daemon.cc +++ b/src/nix/unix/daemon.cc @@ -58,7 +58,7 @@ struct AuthorizationSettings : Config { this, {"root"}, "trusted-users", R"( A list of user names, separated by whitespace. - These users will have additional rights when connecting to the Nix daemon, such as the ability to specify additional [substituters](#conf-substituters), or to import unsigned [NARs](@docroot@/glossary.md#gloss-nar). + These users will have additional rights when connecting to the Nix daemon, such as the ability to specify additional [substituters](#conf-substituters), or to import unsigned realisations or unsigned input-addressed store objects. You can also specify groups by prefixing names with `@`. For instance, `@wheel` means all users in the `wheel` group. diff --git a/src/nix/verify.md b/src/nix/verify.md index e1d55eab4..ae0b0acd6 100644 --- a/src/nix/verify.md +++ b/src/nix/verify.md @@ -46,4 +46,6 @@ The exit status of this command is the sum of the following values: * **4** if any path couldn't be verified for any other reason (such as an I/O error). +[Nix Archive]: @docroot@/store/file-system-object/content-address.md#serial-nix-archive + )"" diff --git a/tests/functional/add.sh b/tests/functional/add.sh old mode 100644 new mode 100755 index a4bb0e225..a6cf88e1a --- a/tests/functional/add.sh +++ b/tests/functional/add.sh @@ -1,10 +1,12 @@ +#!/usr/bin/env bash + source common.sh path1=$(nix-store --add ./dummy) -echo $path1 +echo "$path1" path2=$(nix-store --add-fixed sha256 --recursive ./dummy) -echo $path2 +echo "$path2" if test "$path1" != "$path2"; then echo "nix-store --add and --add-fixed mismatch" @@ -12,18 +14,18 @@ if test "$path1" != "$path2"; then fi path3=$(nix-store --add-fixed sha256 ./dummy) -echo $path3 +echo "$path3" test "$path1" != "$path3" || exit 1 path4=$(nix-store --add-fixed sha1 --recursive ./dummy) -echo $path4 +echo "$path4" test "$path1" != "$path4" || exit 1 -hash1=$(nix-store -q --hash $path1) -echo $hash1 +hash1=$(nix-store -q --hash "$path1") +echo "$hash1" hash2=$(nix-hash --type sha256 --base32 ./dummy) -echo $hash2 +echo "$hash2" test "$hash1" = "sha256:$hash2" diff --git a/tests/functional/bash-profile.sh b/tests/functional/bash-profile.sh old mode 100644 new mode 100755 index 3faeaaba1..6cfa5bd9c --- a/tests/functional/bash-profile.sh +++ b/tests/functional/bash-profile.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh sed -e "s|@localstatedir@|$TEST_ROOT/profile-var|g" -e "s|@coreutils@|$coreutils|g" < ../../scripts/nix-profile.sh.in > $TEST_ROOT/nix-profile.sh diff --git a/tests/functional/binary-cache-build-remote.sh b/tests/functional/binary-cache-build-remote.sh old mode 100644 new mode 100755 index 81cd21a4a..0303e9410 --- a/tests/functional/binary-cache-build-remote.sh +++ b/tests/functional/binary-cache-build-remote.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/binary-cache.sh b/tests/functional/binary-cache.sh old mode 100644 new mode 100755 index 2a8d5ccdb..54a3687ca --- a/tests/functional/binary-cache.sh +++ b/tests/functional/binary-cache.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh needLocalStore "'--no-require-sigs' can’t be used with the daemon" diff --git a/tests/functional/brotli.sh b/tests/functional/brotli.sh old mode 100644 new mode 100755 index dc9bbdb66..02a2a0875 --- a/tests/functional/brotli.sh +++ b/tests/functional/brotli.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/build-delete.sh b/tests/functional/build-delete.sh old mode 100644 new mode 100755 index 9c56b00e8..2ef3008f6 --- a/tests/functional/build-delete.sh +++ b/tests/functional/build-delete.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/build-dry.sh b/tests/functional/build-dry.sh old mode 100644 new mode 100755 index 6d1754af5..9336cf745 --- a/tests/functional/build-dry.sh +++ b/tests/functional/build-dry.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh ################################################### diff --git a/tests/functional/build-remote-content-addressed-fixed.sh b/tests/functional/build-remote-content-addressed-fixed.sh old mode 100644 new mode 100755 index ae7441591..61a1f4a46 --- a/tests/functional/build-remote-content-addressed-fixed.sh +++ b/tests/functional/build-remote-content-addressed-fixed.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh file=build-hook-ca-fixed.nix diff --git a/tests/functional/build-remote-content-addressed-floating.sh b/tests/functional/build-remote-content-addressed-floating.sh old mode 100644 new mode 100755 index e83b42b41..33d667f92 --- a/tests/functional/build-remote-content-addressed-floating.sh +++ b/tests/functional/build-remote-content-addressed-floating.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh file=build-hook-ca-floating.nix diff --git a/tests/functional/build-remote-input-addressed.sh b/tests/functional/build-remote-input-addressed.sh old mode 100644 new mode 100755 index 49d15c389..986692dbc --- a/tests/functional/build-remote-input-addressed.sh +++ b/tests/functional/build-remote-input-addressed.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh file=build-hook.nix @@ -11,17 +13,17 @@ registerBuildHook () { # Dummy post-build-hook just to ensure that it's executed correctly. # (we can't reuse the one from `$PWD/push-to-store.sh` because of # https://github.com/NixOS/nix/issues/4341) - cat < $TEST_ROOT/post-build-hook.sh + cat < "$TEST_ROOT/post-build-hook.sh" #!/bin/sh echo "Post hook ran successfully" # Add an empty line to a counter file, just to check that this hook ran properly echo "" >> $TEST_ROOT/post-hook-counter EOF - chmod +x $TEST_ROOT/post-build-hook.sh - rm -f $TEST_ROOT/post-hook-counter + chmod +x "$TEST_ROOT/post-build-hook.sh" + rm -f "$TEST_ROOT/post-hook-counter" - echo "post-build-hook = $TEST_ROOT/post-build-hook.sh" >> $NIX_CONF_DIR/nix.conf + echo "post-build-hook = $TEST_ROOT/post-build-hook.sh" >> "$NIX_CONF_DIR/nix.conf" } registerBuildHook @@ -30,4 +32,4 @@ source build-remote.sh # `build-hook.nix` has four derivations to build, and the hook runs twice for # each derivation (once on the builder and once on the host), so the counter # should contain eight lines now -[[ $(cat $TEST_ROOT/post-hook-counter | wc -l) -eq 8 ]] +[[ $(wc -l < "$TEST_ROOT/post-hook-counter") -eq 8 ]] diff --git a/tests/functional/build-remote-trustless-after.sh b/tests/functional/build-remote-trustless-after.sh index 19f59e6ae..2fcdbf10a 100644 --- a/tests/functional/build-remote-trustless-after.sh +++ b/tests/functional/build-remote-trustless-after.sh @@ -1,2 +1,7 @@ -outPath=$(readlink -f $TEST_ROOT/result) -grep 'FOO BAR BAZ' ${remoteDir}/${outPath} +# shellcheck shell=bash + +# Variables must be defined by caller, so +# shellcheck disable=SC2154 + +outPath=$(readlink -f "$TEST_ROOT/result") +grep 'FOO BAR BAZ' "${remoteDir}/${outPath}" diff --git a/tests/functional/build-remote-trustless-should-fail-0.sh b/tests/functional/build-remote-trustless-should-fail-0.sh old mode 100644 new mode 100755 index 3d4a4b097..269f7f112 --- a/tests/functional/build-remote-trustless-should-fail-0.sh +++ b/tests/functional/build-remote-trustless-should-fail-0.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh enableFeatures "daemon-trust-override" @@ -22,8 +24,12 @@ nix-build build-hook.nix -A passthru.input2 \ # copy our already-build `input2` to the remote store. That store object # is input-addressed, so this will fail. +# For script below +# shellcheck disable=SC2034 file=build-hook.nix +# shellcheck disable=SC2034 prog=$(readlink -e ./nix-daemon-untrusting.sh) +# shellcheck disable=SC2034 proto=ssh-ng expectStderr 1 source build-remote-trustless.sh \ diff --git a/tests/functional/build-remote-trustless-should-pass-0.sh b/tests/functional/build-remote-trustless-should-pass-0.sh old mode 100644 new mode 100755 index 2a7ebd8c6..b81060907 --- a/tests/functional/build-remote-trustless-should-pass-0.sh +++ b/tests/functional/build-remote-trustless-should-pass-0.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Remote trusts us diff --git a/tests/functional/build-remote-trustless-should-pass-1.sh b/tests/functional/build-remote-trustless-should-pass-1.sh old mode 100644 new mode 100755 index 516bdf092..b8dc038bf --- a/tests/functional/build-remote-trustless-should-pass-1.sh +++ b/tests/functional/build-remote-trustless-should-pass-1.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Remote trusts us diff --git a/tests/functional/build-remote-trustless-should-pass-2.sh b/tests/functional/build-remote-trustless-should-pass-2.sh old mode 100644 new mode 100755 index b769a88f0..ba5d1ff7a --- a/tests/functional/build-remote-trustless-should-pass-2.sh +++ b/tests/functional/build-remote-trustless-should-pass-2.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh enableFeatures "daemon-trust-override" diff --git a/tests/functional/build-remote-trustless-should-pass-3.sh b/tests/functional/build-remote-trustless-should-pass-3.sh old mode 100644 new mode 100755 index 40f81da5a..187b89948 --- a/tests/functional/build-remote-trustless-should-pass-3.sh +++ b/tests/functional/build-remote-trustless-should-pass-3.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh enableFeatures "daemon-trust-override" diff --git a/tests/functional/build-remote-trustless.sh b/tests/functional/build-remote-trustless.sh index 81e5253bf..c498d46c3 100644 --- a/tests/functional/build-remote-trustless.sh +++ b/tests/functional/build-remote-trustless.sh @@ -1,5 +1,11 @@ +# shellcheck shell=bash + +# All variables should be defined externally by the scripts that source +# this, `set -u` will catch any that are forgotten. +# shellcheck disable=SC2154 + requireSandboxSupport -[[ $busybox =~ busybox ]] || skipTest "no busybox" +[[ "$busybox" =~ busybox ]] || skipTest "no busybox" unset NIX_STORE_DIR unset NIX_STATE_DIR @@ -8,7 +14,7 @@ remoteDir=$TEST_ROOT/remote # Note: ssh{-ng}://localhost bypasses ssh. See tests/functional/build-remote.sh for # more details. -nix-build $file -o $TEST_ROOT/result --max-jobs 0 \ - --arg busybox $busybox \ - --store $TEST_ROOT/local \ +nix-build "$file" -o "$TEST_ROOT/result" --max-jobs 0 \ + --arg busybox "$busybox" \ + --store "$TEST_ROOT/local" \ --builders "$proto://localhost?remote-program=$prog&remote-store=${remoteDir}%3Fsystem-features=foo%20bar%20baz - - 1 1 foo,bar,baz" diff --git a/tests/functional/build-remote-with-mounted-ssh-ng.sh b/tests/functional/build-remote-with-mounted-ssh-ng.sh old mode 100644 new mode 100755 index 443acb6ca..e2627af39 --- a/tests/functional/build-remote-with-mounted-ssh-ng.sh +++ b/tests/functional/build-remote-with-mounted-ssh-ng.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireSandboxSupport @@ -6,17 +8,17 @@ requireSandboxSupport enableFeatures mounted-ssh-store nix build -Lvf simple.nix \ - --arg busybox $busybox \ - --out-link $TEST_ROOT/result-from-remote \ + --arg busybox "$busybox" \ + --out-link "$TEST_ROOT/result-from-remote" \ --store mounted-ssh-ng://localhost nix build -Lvf simple.nix \ - --arg busybox $busybox \ - --out-link $TEST_ROOT/result-from-remote-new-cli \ + --arg busybox "$busybox" \ + --out-link "$TEST_ROOT/result-from-remote-new-cli" \ --store 'mounted-ssh-ng://localhost?remote-program=nix daemon' # This verifies that the out link was actually created and valid. The ability # to create out links (permanent gc roots) is the distinguishing feature of # the mounted-ssh-ng store. -cat $TEST_ROOT/result-from-remote/hello | grepQuiet 'Hello World!' -cat $TEST_ROOT/result-from-remote-new-cli/hello | grepQuiet 'Hello World!' +grepQuiet 'Hello World!' < "$TEST_ROOT/result-from-remote/hello" +grepQuiet 'Hello World!' < "$TEST_ROOT/result-from-remote-new-cli/hello" diff --git a/tests/functional/build-remote.sh b/tests/functional/build-remote.sh index d2a2132c1..1a5334577 100644 --- a/tests/functional/build-remote.sh +++ b/tests/functional/build-remote.sh @@ -1,5 +1,9 @@ +# shellcheck shell=bash + +: "${file?must be defined by caller (remote building test case using this)}" + requireSandboxSupport -[[ $busybox =~ busybox ]] || skipTest "no busybox" +[[ "${busybox-}" =~ busybox ]] || skipTest "no busybox" # Avoid store dir being inside sandbox build-dir unset NIX_STORE_DIR @@ -15,50 +19,50 @@ fi builders=( # system-features will automatically be added to the outer URL, but not inner # remote-store URL. - "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=$(join_by "%20" foo ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," foo ${EXTRA_SYSTEM_FEATURES[@]})" - "$TEST_ROOT/machine2 - - 1 1 $(join_by "," bar ${EXTRA_SYSTEM_FEATURES[@]})" - "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=$(join_by "%20" baz ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," baz ${EXTRA_SYSTEM_FEATURES[@]})" + "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=$(join_by "%20" foo "${EXTRA_SYSTEM_FEATURES[@]}") - - 1 1 $(join_by "," foo "${EXTRA_SYSTEM_FEATURES[@]}")" + "$TEST_ROOT/machine2 - - 1 1 $(join_by "," bar "${EXTRA_SYSTEM_FEATURES[@]}")" + "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=$(join_by "%20" baz "${EXTRA_SYSTEM_FEATURES[@]}") - - 1 1 $(join_by "," baz "${EXTRA_SYSTEM_FEATURES[@]}")" ) -chmod -R +w $TEST_ROOT/machine* || true -rm -rf $TEST_ROOT/machine* || true +chmod -R +w "$TEST_ROOT/machine"* || true +rm -rf "$TEST_ROOT/machine"* || true # Note: ssh://localhost bypasses ssh, directly invoking nix-store as a # child process. This allows us to test LegacySSHStore::buildDerivation(). # ssh-ng://... likewise allows us to test RemoteStore::buildDerivation(). -nix build -L -v -f $file -o $TEST_ROOT/result --max-jobs 0 \ - --arg busybox $busybox \ - --store $TEST_ROOT/machine0 \ +nix build -L -v -f "$file" -o "$TEST_ROOT/result" --max-jobs 0 \ + --arg busybox "$busybox" \ + --store "$TEST_ROOT/machine0" \ --builders "$(join_by '; ' "${builders[@]}")" -outPath=$(readlink -f $TEST_ROOT/result) +outPath=$(readlink -f "$TEST_ROOT/result") -grep 'FOO BAR BAZ' $TEST_ROOT/machine0/$outPath +grep 'FOO BAR BAZ' "$TEST_ROOT/machine0/$outPath" -testPrintOutPath=$(nix build -L -v -f $file --no-link --print-out-paths --max-jobs 0 \ - --arg busybox $busybox \ - --store $TEST_ROOT/machine0 \ +testPrintOutPath=$(nix build -L -v -f "$file" --no-link --print-out-paths --max-jobs 0 \ + --arg busybox "$busybox" \ + --store "$TEST_ROOT/machine0" \ --builders "$(join_by '; ' "${builders[@]}")" ) [[ $testPrintOutPath =~ store.*build-remote ]] # Ensure that input1 was built on store1 due to the required feature. -output=$(nix path-info --store $TEST_ROOT/machine1 --all) +output=$(nix path-info --store "$TEST_ROOT/machine1" --all) echo "$output" | grepQuiet builder-build-remote-input-1.sh echo "$output" | grepQuietInverse builder-build-remote-input-2.sh echo "$output" | grepQuietInverse builder-build-remote-input-3.sh unset output # Ensure that input2 was built on store2 due to the required feature. -output=$(nix path-info --store $TEST_ROOT/machine2 --all) +output=$(nix path-info --store "$TEST_ROOT/machine2" --all) echo "$output" | grepQuietInverse builder-build-remote-input-1.sh echo "$output" | grepQuiet builder-build-remote-input-2.sh echo "$output" | grepQuietInverse builder-build-remote-input-3.sh unset output # Ensure that input3 was built on store3 due to the required feature. -output=$(nix path-info --store $TEST_ROOT/machine3 --all) +output=$(nix path-info --store "$TEST_ROOT/machine3" --all) echo "$output" | grepQuietInverse builder-build-remote-input-1.sh echo "$output" | grepQuietInverse builder-build-remote-input-2.sh echo "$output" | grepQuiet builder-build-remote-input-3.sh @@ -66,7 +70,7 @@ unset output for i in input1 input3; do -nix log --store $TEST_ROOT/machine0 --file "$file" --arg busybox $busybox passthru."$i" | grep hi-$i +nix log --store "$TEST_ROOT/machine0" --file "$file" --arg busybox "$busybox" "passthru.$i" | grep hi-$i done # Behavior of keep-failed @@ -74,9 +78,9 @@ out="$(nix-build 2>&1 failing.nix \ --no-out-link \ --builders "$(join_by '; ' "${builders[@]}")" \ --keep-failed \ - --store $TEST_ROOT/machine0 \ + --store "$TEST_ROOT/machine0" \ -j0 \ - --arg busybox $busybox)" || true + --arg busybox "$busybox")" || true [[ "$out" =~ .*"note: keeping build directory".* ]] diff --git a/tests/functional/build.sh b/tests/functional/build.sh old mode 100644 new mode 100755 index 7fbdb0f07..a14e6d672 --- a/tests/functional/build.sh +++ b/tests/functional/build.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore @@ -43,6 +45,14 @@ nix build -f multiple-outputs.nix --json e --no-link | jq --exit-status ' (.outputs | keys == ["a_a", "b"])) ' +# Tests that we can handle empty 'outputsToInstall' (assuming that default +# output "out" exists). +nix build -f multiple-outputs.nix --json nothing-to-install --no-link | jq --exit-status ' + (.[0] | + (.drvPath | match(".*nothing-to-install.drv")) and + (.outputs | keys == ["out"])) +' + # But not when it's overriden. nix build -f multiple-outputs.nix --json e^a_a --no-link nix build -f multiple-outputs.nix --json e^a_a --no-link | jq --exit-status ' @@ -133,3 +143,35 @@ nix build --impure -f multiple-outputs.nix --json e --no-link | jq --exit-status # Make sure that `--stdin` works and does not apply any defaults printf "" | nix build --no-link --stdin --json | jq --exit-status '. == []' printf "%s\n" "$drv^*" | nix build --no-link --stdin --json | jq --exit-status '.[0]|has("drvPath")' + +# --keep-going and FOD +out="$(nix build -f fod-failing.nix -L 2>&1)" && status=0 || status=$? +test "$status" = 1 +# one "hash mismatch" error, one "build of ... failed" +test "$(<<<"$out" grep -E '^error:' | wc -l)" = 2 +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" +<<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x3\\.drv'" +<<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +<<<"$out" grepQuiet -E "error: build of '.*-x[1-4]\\.drv\\^out', '.*-x[1-4]\\.drv\\^out', '.*-x[1-4]\\.drv\\^out', '.*-x[1-4]\\.drv\\^out' failed" + +out="$(nix build -f fod-failing.nix -L x1 x2 x3 --keep-going 2>&1)" && status=0 || status=$? +test "$status" = 1 +# three "hash mismatch" errors - for each failing fod, one "build of ... failed" +test "$(<<<"$out" grep -E '^error:' | wc -l)" = 4 +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x1\\.drv'" +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x3\\.drv'" +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" +<<<"$out" grepQuiet -E "error: build of '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out', '.*-x[1-3]\\.drv\\^out' failed" + +out="$(nix build -f fod-failing.nix -L x4 2>&1)" && status=0 || status=$? +test "$status" = 1 +test "$(<<<"$out" grep -E '^error:' | wc -l)" = 2 +<<<"$out" grepQuiet -E "error: 1 dependencies of derivation '.*-x4\\.drv' failed to build" +<<<"$out" grepQuiet -E "hash mismatch in fixed-output derivation '.*-x2\\.drv'" + +out="$(nix build -f fod-failing.nix -L x4 --keep-going 2>&1)" && status=0 || status=$? +test "$status" = 1 +test "$(<<<"$out" grep -E '^error:' | wc -l)" = 3 +<<<"$out" grepQuiet -E "error: 2 dependencies of derivation '.*-x4\\.drv' failed to build" +<<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x3\\.drv'" +<<<"$out" grepQuiet -vE "hash mismatch in fixed-output derivation '.*-x2\\.drv'" diff --git a/tests/functional/case-hack.sh b/tests/functional/case-hack.sh old mode 100644 new mode 100755 index 61bf9b94b..fbc8242ff --- a/tests/functional/case-hack.sh +++ b/tests/functional/case-hack.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/check-refs.sh b/tests/functional/check-refs.sh old mode 100644 new mode 100755 index 3b587d1e5..2cebdd84d --- a/tests/functional/check-refs.sh +++ b/tests/functional/check-refs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/check-reqs.sh b/tests/functional/check-reqs.sh old mode 100644 new mode 100755 index 856c94cec..2bcd558fd --- a/tests/functional/check-reqs.sh +++ b/tests/functional/check-reqs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/check.sh b/tests/functional/check.sh old mode 100644 new mode 100755 index 38883c5d7..efb93eeb0 --- a/tests/functional/check.sh +++ b/tests/functional/check.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # XXX: This shouldn’t be, but #4813 cause this test to fail diff --git a/tests/functional/chroot-store.sh b/tests/functional/chroot-store.sh old mode 100644 new mode 100755 index 9e589d04b..60b9c50a7 --- a/tests/functional/chroot-store.sh +++ b/tests/functional/chroot-store.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh echo example > $TEST_ROOT/example.txt diff --git a/tests/functional/common.sh b/tests/functional/common.sh index 7b0922c9f..d038aaf59 100644 --- a/tests/functional/common.sh +++ b/tests/functional/common.sh @@ -1,10 +1,16 @@ +# shellcheck shell=bash + set -eu -o pipefail if [[ -z "${COMMON_SH_SOURCED-}" ]]; then COMMON_SH_SOURCED=1 -source "$(readlink -f "$(dirname "${BASH_SOURCE[0]-$0}")")/common/vars-and-functions.sh" +functionalTestsDir="$(readlink -f "$(dirname "${BASH_SOURCE[0]-$0}")")" + +source "$functionalTestsDir/common/vars-and-functions.sh" +source "$functionalTestsDir/common/init.sh" + if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then startDaemon fi diff --git a/tests/functional/init.sh b/tests/functional/common/init.sh similarity index 65% rename from tests/functional/init.sh rename to tests/functional/common/init.sh index 97b1b0587..dda1ecd41 100755 --- a/tests/functional/init.sh +++ b/tests/functional/common/init.sh @@ -1,14 +1,15 @@ -# Don't start the daemon -source common/vars-and-functions.sh +# shellcheck shell=bash test -n "$TEST_ROOT" -if test -d "$TEST_ROOT"; then - chmod -R u+rw "$TEST_ROOT" - # We would delete any daemon socket, so let's stop the daemon first. - killDaemon +# We would delete any daemon socket, so let's stop the daemon first. +killDaemon +# Destroy the test directory that may have persisted from previous runs +if [[ -e "$TEST_ROOT" ]]; then + chmod -R u+w "$TEST_ROOT" rm -rf "$TEST_ROOT" fi -mkdir "$TEST_ROOT" +mkdir -p "$TEST_ROOT" +mkdir "$TEST_HOME" mkdir "$NIX_STORE_DIR" mkdir "$NIX_LOCALSTATE_DIR" @@ -36,7 +37,7 @@ extra-experimental-features = flakes EOF # Initialise the database. +# The flag itself does nothing, but running the command touches the store nix-store --init - -# Did anything happen? +# Sanity check test -e "$NIX_STATE_DIR"/db/db.sqlite diff --git a/tests/functional/common/subst-vars.sh.in b/tests/functional/common/subst-vars.sh.in new file mode 100644 index 000000000..4105e9f35 --- /dev/null +++ b/tests/functional/common/subst-vars.sh.in @@ -0,0 +1,15 @@ +# NOTE: instances of @variable@ are substituted as defined in /mk/templates.mk + +export PATH=@bindir@:$PATH +export coreutils=@coreutils@ +#lsof=@lsof@ + +export dot=@dot@ +export SHELL="@bash@" +export PAGER=cat +export busybox="@sandbox_shell@" + +export version=@PACKAGE_VERSION@ +export system=@system@ + +export BUILD_SHARED_LIBS=@BUILD_SHARED_LIBS@ diff --git a/tests/functional/common/vars-and-functions.sh.in b/tests/functional/common/vars-and-functions.sh similarity index 95% rename from tests/functional/common/vars-and-functions.sh.in rename to tests/functional/common/vars-and-functions.sh index e7e2fc770..ad5b29a94 100644 --- a/tests/functional/common/vars-and-functions.sh.in +++ b/tests/functional/common/vars-and-functions.sh @@ -1,3 +1,5 @@ +# NOTE: instances of @variable@ are substituted as defined in /mk/templates.mk + set -eu -o pipefail if [[ -z "${COMMON_VARS_AND_FUNCTIONS_SH_SOURCED-}" ]]; then @@ -6,6 +8,12 @@ COMMON_VARS_AND_FUNCTIONS_SH_SOURCED=1 set +x +commonDir="$(readlink -f "$(dirname "${BASH_SOURCE[0]-$0}")")" + +source "$commonDir/subst-vars.sh" +# Make sure shellcheck knows all these will be defined by the above generated snippet +: "${PATH?} ${coreutils?} ${dot?} ${SHELL?} ${PAGER?} ${busybox?} ${version?} ${system?} ${BUILD_SHARED_LIBS?}" + export TEST_ROOT=$(realpath ${TMPDIR:-/tmp}/nix-test)/${TEST_NAME:-default/tests\/functional//} export NIX_STORE_DIR if ! NIX_STORE_DIR=$(readlink -f $TEST_ROOT/store 2> /dev/null); then @@ -34,9 +42,7 @@ unset XDG_DATA_HOME unset XDG_CONFIG_HOME unset XDG_CONFIG_DIRS unset XDG_CACHE_HOME -mkdir -p $TEST_HOME -export PATH=@bindir@:$PATH if [[ -n "${NIX_CLIENT_PACKAGE:-}" ]]; then export PATH="$NIX_CLIENT_PACKAGE/bin":$PATH fi @@ -44,18 +50,6 @@ DAEMON_PATH="$PATH" if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then DAEMON_PATH="${NIX_DAEMON_PACKAGE}/bin:$DAEMON_PATH" fi -coreutils=@coreutils@ -lsof=@lsof@ - -export dot=@dot@ -export SHELL="@bash@" -export PAGER=cat -export busybox="@sandbox_shell@" - -export version=@PACKAGE_VERSION@ -export system=@system@ - -export BUILD_SHARED_LIBS=@BUILD_SHARED_LIBS@ export IMPURE_VAR1=foo export IMPURE_VAR2=bar diff --git a/tests/functional/completions.sh b/tests/functional/completions.sh old mode 100644 new mode 100755 index d3d5bbd48..9164c5013 --- a/tests/functional/completions.sh +++ b/tests/functional/completions.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh cd "$TEST_ROOT" diff --git a/tests/functional/compression-levels.sh b/tests/functional/compression-levels.sh old mode 100644 new mode 100755 index 85f12974a..34f66c531 --- a/tests/functional/compression-levels.sh +++ b/tests/functional/compression-levels.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/compute-levels.sh b/tests/functional/compute-levels.sh old mode 100644 new mode 100755 index de3da2ebd..a8bd27610 --- a/tests/functional/compute-levels.sh +++ b/tests/functional/compute-levels.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh if [[ $(uname -ms) = "Linux x86_64" ]]; then diff --git a/tests/functional/config.sh b/tests/functional/config.sh old mode 100644 new mode 100755 index efdafa8ca..1811b755c --- a/tests/functional/config.sh +++ b/tests/functional/config.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Isolate the home for this test. diff --git a/tests/functional/db-migration.sh b/tests/functional/db-migration.sh old mode 100644 new mode 100755 index 44cd16bc0..a6a5c7744 --- a/tests/functional/db-migration.sh +++ b/tests/functional/db-migration.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test that we can successfully migrate from an older db schema source common.sh diff --git a/tests/functional/debugger.sh b/tests/functional/debugger.sh old mode 100644 new mode 100755 index 63d88cbf3..47e644bb7 --- a/tests/functional/debugger.sh +++ b/tests/functional/debugger.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/dependencies.sh b/tests/functional/dependencies.sh old mode 100644 new mode 100755 index b93dacac0..5922a1f98 --- a/tests/functional/dependencies.sh +++ b/tests/functional/dependencies.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/derivation-json.sh b/tests/functional/derivation-json.sh old mode 100644 new mode 100755 index b6be5d977..59c77e6c5 --- a/tests/functional/derivation-json.sh +++ b/tests/functional/derivation-json.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh drvPath=$(nix-instantiate simple.nix) diff --git a/tests/functional/dump-db.sh b/tests/functional/dump-db.sh old mode 100644 new mode 100755 index 48647f403..2d0460275 --- a/tests/functional/dump-db.sh +++ b/tests/functional/dump-db.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh needLocalStore "--dump-db requires a local store" diff --git a/tests/functional/eval-store.sh b/tests/functional/eval-store.sh old mode 100644 new mode 100755 index 9937ecbce..0ab608acc --- a/tests/functional/eval-store.sh +++ b/tests/functional/eval-store.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Using `--eval-store` with the daemon will eventually copy everything diff --git a/tests/functional/eval.sh b/tests/functional/eval.sh old mode 100644 new mode 100755 index c6a475cd0..acd6e2915 --- a/tests/functional/eval.sh +++ b/tests/functional/eval.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore @@ -52,3 +54,7 @@ fi # Test --arg-from-stdin. [[ "$(echo bla | nix eval --raw --arg-from-stdin foo --expr '{ foo }: { inherit foo; }' foo)" = bla ]] + +# Test that unknown settings are warned about +out="$(expectStderr 0 nix eval --option foobar baz --expr '""' --raw)" +[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] diff --git a/tests/functional/experimental-features.sh b/tests/functional/experimental-features.sh old mode 100644 new mode 100755 index 12112b293..38f198eee --- a/tests/functional/experimental-features.sh +++ b/tests/functional/experimental-features.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Skipping these two for now, because we actually *do* want flags and diff --git a/tests/functional/export-graph.sh b/tests/functional/export-graph.sh old mode 100644 new mode 100755 index 1f6232a40..281b5b05b --- a/tests/functional/export-graph.sh +++ b/tests/functional/export-graph.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/export.sh b/tests/functional/export.sh old mode 100644 new mode 100755 index 2238539bc..fce2e333a --- a/tests/functional/export.sh +++ b/tests/functional/export.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/extra-sandbox-profile.sh b/tests/functional/extra-sandbox-profile.sh old mode 100644 new mode 100755 index ac3ca036f..672e5779d --- a/tests/functional/extra-sandbox-profile.sh +++ b/tests/functional/extra-sandbox-profile.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh if [[ $(uname) != Darwin ]]; then skipTest "Need Darwin"; fi diff --git a/tests/functional/fetchClosure.sh b/tests/functional/fetchClosure.sh old mode 100644 new mode 100755 index a02d1ce7a..4026b0790 --- a/tests/functional/fetchClosure.sh +++ b/tests/functional/fetchClosure.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh enableFeatures "fetch-closure" diff --git a/tests/functional/fetchGit.sh b/tests/functional/fetchGit.sh old mode 100644 new mode 100755 index 74d6de4e3..d89d59a81 --- a/tests/functional/fetchGit.sh +++ b/tests/functional/fetchGit.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireGit diff --git a/tests/functional/fetchGitRefs.sh b/tests/functional/fetchGitRefs.sh old mode 100644 new mode 100755 index d643fea04..b17cc2090 --- a/tests/functional/fetchGitRefs.sh +++ b/tests/functional/fetchGitRefs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireGit diff --git a/tests/functional/fetchGitSubmodules.sh b/tests/functional/fetchGitSubmodules.sh old mode 100644 new mode 100755 index bd82a0a17..7c5f7b0d6 --- a/tests/functional/fetchGitSubmodules.sh +++ b/tests/functional/fetchGitSubmodules.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh set -u diff --git a/tests/functional/fetchGitVerification.sh b/tests/functional/fetchGitVerification.sh old mode 100644 new mode 100755 index b80e061b5..27f9a8cf5 --- a/tests/functional/fetchGitVerification.sh +++ b/tests/functional/fetchGitVerification.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireGit diff --git a/tests/functional/fetchMercurial.sh b/tests/functional/fetchMercurial.sh old mode 100644 new mode 100755 index 9f7cef7b2..f3774fc74 --- a/tests/functional/fetchMercurial.sh +++ b/tests/functional/fetchMercurial.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh [[ $(type -p hg) ]] || skipTest "Mercurial not installed" diff --git a/tests/functional/fetchPath.sh b/tests/functional/fetchPath.sh old mode 100644 new mode 100755 index 29be38ce2..e466e4494 --- a/tests/functional/fetchPath.sh +++ b/tests/functional/fetchPath.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh touch $TEST_ROOT/foo -t 202211111111 diff --git a/tests/functional/fetchTree-file.sh b/tests/functional/fetchTree-file.sh old mode 100644 new mode 100755 index be698ea35..9c9532876 --- a/tests/functional/fetchTree-file.sh +++ b/tests/functional/fetchTree-file.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/fetchurl.sh b/tests/functional/fetchurl.sh old mode 100644 new mode 100755 index a3620f52b..2255dbbdc --- a/tests/functional/fetchurl.sh +++ b/tests/functional/fetchurl.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/filter-source.sh b/tests/functional/filter-source.sh old mode 100644 new mode 100755 index ba34d2eac..c5e10be93 --- a/tests/functional/filter-source.sh +++ b/tests/functional/filter-source.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh rm -rf $TEST_ROOT/filterin diff --git a/tests/functional/fixed.sh b/tests/functional/fixed.sh old mode 100644 new mode 100755 index 7bbecda91..4d07d00cd --- a/tests/functional/fixed.sh +++ b/tests/functional/fixed.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/flakes/absolute-attr-paths.sh b/tests/functional/flakes/absolute-attr-paths.sh old mode 100644 new mode 100755 index 491adceb7..8ed1755c4 --- a/tests/functional/flakes/absolute-attr-paths.sh +++ b/tests/functional/flakes/absolute-attr-paths.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh flake1Dir=$TEST_ROOT/flake1 diff --git a/tests/functional/flakes/absolute-paths.sh b/tests/functional/flakes/absolute-paths.sh old mode 100644 new mode 100755 index e7bfba12d..a355a7a1c --- a/tests/functional/flakes/absolute-paths.sh +++ b/tests/functional/flakes/absolute-paths.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh requireGit diff --git a/tests/functional/flakes/build-paths.sh b/tests/functional/flakes/build-paths.sh old mode 100644 new mode 100755 index 4e5c68095..a336471f0 --- a/tests/functional/flakes/build-paths.sh +++ b/tests/functional/flakes/build-paths.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh flake1Dir=$TEST_ROOT/flake1 diff --git a/tests/functional/flakes/bundle.sh b/tests/functional/flakes/bundle.sh old mode 100644 new mode 100755 index 67bbb05ac..711691e0b --- a/tests/functional/flakes/bundle.sh +++ b/tests/functional/flakes/bundle.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh cp ../simple.nix ../simple.builder.sh ../config.nix $TEST_HOME diff --git a/tests/functional/flakes/check.sh b/tests/functional/flakes/check.sh old mode 100644 new mode 100755 index 0433e5335..3b83dcafe --- a/tests/functional/flakes/check.sh +++ b/tests/functional/flakes/check.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh flakeDir=$TEST_ROOT/flake3 diff --git a/tests/functional/flakes/circular.sh b/tests/functional/flakes/circular.sh old mode 100644 new mode 100755 index d3bb8e8a3..6cab3a72b --- a/tests/functional/flakes/circular.sh +++ b/tests/functional/flakes/circular.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test circular flake dependencies. source ./common.sh diff --git a/tests/functional/flakes/config.sh b/tests/functional/flakes/config.sh old mode 100644 new mode 100755 index d1941a6be..66b917457 --- a/tests/functional/flakes/config.sh +++ b/tests/functional/flakes/config.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh cp ../simple.nix ../simple.builder.sh ../config.nix $TEST_HOME diff --git a/tests/functional/flakes/develop.sh b/tests/functional/flakes/develop.sh old mode 100644 new mode 100755 index e1e53d364..83be2e308 --- a/tests/functional/flakes/develop.sh +++ b/tests/functional/flakes/develop.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ../common.sh clearStore diff --git a/tests/functional/flakes/edit.sh b/tests/functional/flakes/edit.sh new file mode 100755 index 000000000..0fdf8b95a --- /dev/null +++ b/tests/functional/flakes/edit.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +source ./common.sh + +requireGit + +flake1Dir=$TEST_ROOT/flake1 + +createGitRepo "$flake1Dir" +createSimpleGitFlake "$flake1Dir" + +export EDITOR=cat +nix edit "$flake1Dir#" | grepQuiet simple.builder.sh diff --git a/tests/functional/flakes/eval-cache.sh b/tests/functional/flakes/eval-cache.sh new file mode 100644 index 000000000..90c7abd3c --- /dev/null +++ b/tests/functional/flakes/eval-cache.sh @@ -0,0 +1,22 @@ +source ./common.sh + +requireGit + +flake1Dir="$TEST_ROOT/eval-cache-flake" + +createGitRepo "$flake1Dir" "" + +cat >"$flake1Dir/flake.nix" <&1 | grepQuiet 'error: breaks' +expect 1 nix build "$flake1Dir#foo.bar" 2>&1 | grepQuiet 'error: breaks' diff --git a/tests/functional/flakes/flake-in-submodule.sh b/tests/functional/flakes/flake-in-submodule.sh old mode 100644 new mode 100755 index 85a4d3389..2988352a9 --- a/tests/functional/flakes/flake-in-submodule.sh +++ b/tests/functional/flakes/flake-in-submodule.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Tests that: diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh old mode 100644 new mode 100755 index 35b0c5d84..3057c0293 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh requireGit diff --git a/tests/functional/flakes/follow-paths.sh b/tests/functional/flakes/follow-paths.sh old mode 100644 new mode 100755 index 1afd91bd2..ea56b9503 --- a/tests/functional/flakes/follow-paths.sh +++ b/tests/functional/flakes/follow-paths.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh requireGit diff --git a/tests/functional/flakes/init.sh b/tests/functional/flakes/init.sh old mode 100644 new mode 100755 index 2d4c77ba1..f8d51e819 --- a/tests/functional/flakes/init.sh +++ b/tests/functional/flakes/init.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh requireGit diff --git a/tests/functional/flakes/inputs.sh b/tests/functional/flakes/inputs.sh old mode 100644 new mode 100755 index 80620488a..0327a3e9e --- a/tests/functional/flakes/inputs.sh +++ b/tests/functional/flakes/inputs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh requireGit diff --git a/tests/functional/flakes/mercurial.sh b/tests/functional/flakes/mercurial.sh old mode 100644 new mode 100755 index 7074af6f7..0e9f2d626 --- a/tests/functional/flakes/mercurial.sh +++ b/tests/functional/flakes/mercurial.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh [[ $(type -p hg) ]] || skipTest "Mercurial not installed" diff --git a/tests/functional/flakes/prefetch.sh b/tests/functional/flakes/prefetch.sh new file mode 100755 index 000000000..a451b7120 --- /dev/null +++ b/tests/functional/flakes/prefetch.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +source common.sh + +# Test symlinks in zip files (#10649). +path=$(nix flake prefetch --json file://$(pwd)/tree.zip | jq -r .storePath) +[[ $(cat $path/foo) = foo ]] +[[ $(readlink $path/bar) = foo ]] diff --git a/tests/functional/flakes/run.sh b/tests/functional/flakes/run.sh old mode 100644 new mode 100755 index 9fa51d1c7..4d8b512b9 --- a/tests/functional/flakes/run.sh +++ b/tests/functional/flakes/run.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ../common.sh clearStore diff --git a/tests/functional/flakes/search-root.sh b/tests/functional/flakes/search-root.sh old mode 100644 new mode 100755 index 6b137aa86..c2337edc0 --- a/tests/functional/flakes/search-root.sh +++ b/tests/functional/flakes/search-root.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/flakes/show.sh b/tests/functional/flakes/show.sh old mode 100644 new mode 100755 index a3d300552..22e1f4193 --- a/tests/functional/flakes/show.sh +++ b/tests/functional/flakes/show.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh flakeDir=$TEST_ROOT/flake diff --git a/tests/functional/flakes/tree.zip b/tests/functional/flakes/tree.zip new file mode 100644 index 000000000..f9e4d225f Binary files /dev/null and b/tests/functional/flakes/tree.zip differ diff --git a/tests/functional/flakes/unlocked-override.sh b/tests/functional/flakes/unlocked-override.sh old mode 100644 new mode 100755 index 8abc8b7d3..680a1505c --- a/tests/functional/flakes/unlocked-override.sh +++ b/tests/functional/flakes/unlocked-override.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source ./common.sh requireGit diff --git a/tests/functional/fmt.sh b/tests/functional/fmt.sh old mode 100644 new mode 100755 index 3c1bd9989..8fc9a3979 --- a/tests/functional/fmt.sh +++ b/tests/functional/fmt.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/fod-failing.nix b/tests/functional/fod-failing.nix new file mode 100644 index 000000000..37c04fe12 --- /dev/null +++ b/tests/functional/fod-failing.nix @@ -0,0 +1,39 @@ +with import ./config.nix; +rec { + x1 = mkDerivation { + name = "x1"; + builder = builtins.toFile "builder.sh" + '' + echo $name > $out + ''; + outputHashMode = "recursive"; + outputHash = "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="; + }; + x2 = mkDerivation { + name = "x2"; + builder = builtins.toFile "builder.sh" + '' + echo $name > $out + ''; + outputHashMode = "recursive"; + outputHash = "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="; + }; + x3 = mkDerivation { + name = "x3"; + builder = builtins.toFile "builder.sh" + '' + echo $name > $out + ''; + outputHashMode = "recursive"; + outputHash = "sha256-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="; + }; + x4 = mkDerivation { + name = "x4"; + inherit x2 x3; + builder = builtins.toFile "builder.sh" + '' + echo $x2 $x3 + exit 1 + ''; + }; +} diff --git a/tests/functional/function-trace.sh b/tests/functional/function-trace.sh index bd804bf18..71f18b67f 100755 --- a/tests/functional/function-trace.sh +++ b/tests/functional/function-trace.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh set +x diff --git a/tests/functional/gc-auto.sh b/tests/functional/gc-auto.sh old mode 100644 new mode 100755 index 281eef20d..98bb7e60e --- a/tests/functional/gc-auto.sh +++ b/tests/functional/gc-auto.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh needLocalStore "“min-free” and “max-free” are daemon options" diff --git a/tests/functional/gc-concurrent.sh b/tests/functional/gc-concurrent.sh old mode 100644 new mode 100755 index 2c6622c62..67ea3dc74 --- a/tests/functional/gc-concurrent.sh +++ b/tests/functional/gc-concurrent.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/gc-non-blocking.sh b/tests/functional/gc-non-blocking.sh old mode 100644 new mode 100755 index ec280badb..ecfa421fb --- a/tests/functional/gc-non-blocking.sh +++ b/tests/functional/gc-non-blocking.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test whether the collector is non-blocking, i.e. a build can run in # parallel with it. source common.sh diff --git a/tests/functional/gc-runtime.sh b/tests/functional/gc-runtime.sh old mode 100644 new mode 100755 index dc1826a55..2ee72b61e --- a/tests/functional/gc-runtime.sh +++ b/tests/functional/gc-runtime.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh case $system in diff --git a/tests/functional/gc.sh b/tests/functional/gc.sh old mode 100644 new mode 100755 index ad09a8b39..1f216ebc7 --- a/tests/functional/gc.sh +++ b/tests/functional/gc.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/hash-convert.sh b/tests/functional/hash-convert.sh old mode 100644 new mode 100755 index 9b3afc10b..3a099950f --- a/tests/functional/hash-convert.sh +++ b/tests/functional/hash-convert.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Conversion with `nix hash` `nix-hash` and `nix hash convert` diff --git a/tests/functional/hash-path.sh b/tests/functional/hash-path.sh old mode 100644 new mode 100755 index 4ad9f8ff2..12605ef71 --- a/tests/functional/hash-path.sh +++ b/tests/functional/hash-path.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh try () { diff --git a/tests/functional/help.sh b/tests/functional/help.sh old mode 100644 new mode 100755 index 868f5d2e9..6436fb500 --- a/tests/functional/help.sh +++ b/tests/functional/help.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/import-derivation.sh b/tests/functional/import-derivation.sh old mode 100644 new mode 100755 index 98d61ef49..53efa1f5d --- a/tests/functional/import-derivation.sh +++ b/tests/functional/import-derivation.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/impure-derivations.sh b/tests/functional/impure-derivations.sh old mode 100644 new mode 100755 index 54ed6f5dd..b59f73c77 --- a/tests/functional/impure-derivations.sh +++ b/tests/functional/impure-derivations.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireDaemonNewerThan "2.8pre20220311" diff --git a/tests/functional/impure-env.sh b/tests/functional/impure-env.sh old mode 100644 new mode 100755 index cfea4cae9..3c7df169e --- a/tests/functional/impure-env.sh +++ b/tests/functional/impure-env.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Needs the config option 'impure-env' to work diff --git a/tests/functional/impure-eval.sh b/tests/functional/impure-eval.sh old mode 100644 new mode 100755 index 6c72f01d7..33a5ea409 --- a/tests/functional/impure-eval.sh +++ b/tests/functional/impure-eval.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh export REMOTE_STORE="dummy://" diff --git a/tests/functional/lang-test-infra.sh b/tests/functional/lang-test-infra.sh old mode 100644 new mode 100755 index 30da8977b..f32ccef05 --- a/tests/functional/lang-test-infra.sh +++ b/tests/functional/lang-test-infra.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test the function for lang.sh source common.sh diff --git a/tests/functional/lang.sh b/tests/functional/lang.sh index e35795a7a..a853cfd81 100755 --- a/tests/functional/lang.sh +++ b/tests/functional/lang.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh set -o pipefail @@ -24,6 +26,9 @@ nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuietIn nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grepQuietInverse Hello expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello %" (throw "Foo")' | grepQuiet 'Hello %' +# Relies on parsing the expression derivation as a derivation, can't use --eval +expectStderr 1 nix-instantiate --show-trace lang/non-eval-fail-bad-drvPath.nix | grepQuiet "store path '8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin' is not a valid derivation path" + nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \ 2>&1 | grepQuiet -E 'trace: { x = «potential infinite recursion»; }' @@ -72,7 +77,7 @@ for i in lang/eval-fail-*.nix; do if [[ -e "lang/$i.flags" ]]; then sed -e 's/#.*//' < "lang/$i.flags" else - # note that show-trace is also set by init.sh + # note that show-trace is also set by common/init.sh echo "--eval --strict --show-trace" fi )" diff --git a/tests/functional/lang/non-eval-fail-bad-drvPath.nix b/tests/functional/lang/non-eval-fail-bad-drvPath.nix new file mode 100644 index 000000000..23639bc54 --- /dev/null +++ b/tests/functional/lang/non-eval-fail-bad-drvPath.nix @@ -0,0 +1,14 @@ +let + package = { + type = "derivation"; + name = "cachix-1.7.3"; + system = builtins.currentSystem; + outputs = [ "out" ]; + # Illegal, because does not end in `.drv` + drvPath = "${builtins.storeDir}/8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin"; + outputName = "out"; + outPath = "${builtins.storeDir}/8qlfcic10lw5304gqm8q45nr7g7jl62b-cachix-1.7.3-bin"; + out = package; + }; +in +package diff --git a/tests/functional/legacy-ssh-store.sh b/tests/functional/legacy-ssh-store.sh old mode 100644 new mode 100755 index 56b4c2d20..3a1a7b022 --- a/tests/functional/legacy-ssh-store.sh +++ b/tests/functional/legacy-ssh-store.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh store_uri="ssh://localhost?remote-store=$TEST_ROOT/other-store" diff --git a/tests/functional/linux-sandbox.sh b/tests/functional/linux-sandbox.sh old mode 100644 new mode 100755 index e553791d9..e71224f5e --- a/tests/functional/linux-sandbox.sh +++ b/tests/functional/linux-sandbox.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh needLocalStore "the sandbox only runs on the builder side, so it makes no sense to test it with the daemon" diff --git a/tests/functional/local-overlay-store/add-lower.sh b/tests/functional/local-overlay-store/add-lower.sh index f0ac46a91..33bf20ebd 100755 --- a/tests/functional/local-overlay-store/add-lower.sh +++ b/tests/functional/local-overlay-store/add-lower.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/bad-uris.sh b/tests/functional/local-overlay-store/bad-uris.sh index 2517681dd..42a6d47f7 100644 --- a/tests/functional/local-overlay-store/bad-uris.sh +++ b/tests/functional/local-overlay-store/bad-uris.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/build.sh b/tests/functional/local-overlay-store/build.sh index 758585400..2251be7e7 100755 --- a/tests/functional/local-overlay-store/build.sh +++ b/tests/functional/local-overlay-store/build.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/check-post-init.sh b/tests/functional/local-overlay-store/check-post-init.sh index 985bf978e..e0c260276 100755 --- a/tests/functional/local-overlay-store/check-post-init.sh +++ b/tests/functional/local-overlay-store/check-post-init.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/common.sh b/tests/functional/local-overlay-store/common.sh index 2634f8c8f..0e6097861 100644 --- a/tests/functional/local-overlay-store/common.sh +++ b/tests/functional/local-overlay-store/common.sh @@ -1,4 +1,4 @@ -source ../common.sh +source ../common/vars-and-functions.sh # The new Linux mount interface does not seem to support remounting # OverlayFS mount points. @@ -37,10 +37,9 @@ addConfig () { setupConfig () { addConfig "require-drop-supplementary-groups = false" addConfig "build-users-group = " + enableFeatures "local-overlay-store" } -enableFeatures "local-overlay-store" - setupStoreDirs () { # Attempt to create store dirs on tmpfs volume. # This ensures lowerdir, upperdir and workdir will be on diff --git a/tests/functional/local-overlay-store/delete-duplicate.sh b/tests/functional/local-overlay-store/delete-duplicate.sh index 0c0b1a3b2..e3b94e1cb 100644 --- a/tests/functional/local-overlay-store/delete-duplicate.sh +++ b/tests/functional/local-overlay-store/delete-duplicate.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/delete-refs.sh b/tests/functional/local-overlay-store/delete-refs.sh index 942d7fbdc..62295aaa1 100755 --- a/tests/functional/local-overlay-store/delete-refs.sh +++ b/tests/functional/local-overlay-store/delete-refs.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/gc.sh b/tests/functional/local-overlay-store/gc.sh index 1e1fb203e..f3420d0b8 100755 --- a/tests/functional/local-overlay-store/gc.sh +++ b/tests/functional/local-overlay-store/gc.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/optimise.sh b/tests/functional/local-overlay-store/optimise.sh index 569afa248..a524a675e 100755 --- a/tests/functional/local-overlay-store/optimise.sh +++ b/tests/functional/local-overlay-store/optimise.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/redundant-add.sh b/tests/functional/local-overlay-store/redundant-add.sh index fbd4799e7..b4f04b2e1 100755 --- a/tests/functional/local-overlay-store/redundant-add.sh +++ b/tests/functional/local-overlay-store/redundant-add.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/stale-file-handle.sh b/tests/functional/local-overlay-store/stale-file-handle.sh index 5e75628ca..684b8ce23 100755 --- a/tests/functional/local-overlay-store/stale-file-handle.sh +++ b/tests/functional/local-overlay-store/stale-file-handle.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local-overlay-store/verify.sh b/tests/functional/local-overlay-store/verify.sh index 8b44603ff..d73d1a57d 100755 --- a/tests/functional/local-overlay-store/verify.sh +++ b/tests/functional/local-overlay-store/verify.sh @@ -1,4 +1,5 @@ source common.sh +source ../common/init.sh requireEnvironment setupConfig diff --git a/tests/functional/local.mk b/tests/functional/local.mk index 65ab20f9a..b379eeefe 100644 --- a/tests/functional/local.mk +++ b/tests/functional/local.mk @@ -1,8 +1,8 @@ nix_tests = \ test-infra.sh \ - init.sh \ flakes/flakes.sh \ flakes/develop.sh \ + flakes/edit.sh \ flakes/run.sh \ flakes/mercurial.sh \ flakes/circular.sh \ @@ -16,6 +16,8 @@ nix_tests = \ flakes/absolute-attr-paths.sh \ flakes/build-paths.sh \ flakes/flake-in-submodule.sh \ + flakes/prefetch.sh \ + flakes/eval-cache.sh \ gc.sh \ nix-collect-garbage-d.sh \ remote-store.sh \ @@ -153,7 +155,7 @@ $(d)/plugins.sh.test $(d)/plugins.sh.test-debug: \ install-tests += $(foreach x, $(nix_tests), $(d)/$(x)) test-clean-files := \ - $(d)/common/vars-and-functions.sh \ + $(d)/common/subst-vars.sh \ $(d)/config.nix clean-files += $(test-clean-files) diff --git a/tests/functional/logging.sh b/tests/functional/logging.sh old mode 100644 new mode 100755 index 1ccc21d0b..63752f6db --- a/tests/functional/logging.sh +++ b/tests/functional/logging.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/misc.sh b/tests/functional/misc.sh old mode 100644 new mode 100755 index af96d20bd..9eb80ad22 --- a/tests/functional/misc.sh +++ b/tests/functional/misc.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # Tests miscellaneous commands. @@ -30,3 +32,12 @@ expectStderr 1 nix-instantiate --eval -E '[]' -A 'x' | grepQuiet "should be a se expectStderr 1 nix-instantiate --eval -E '{}' -A '1' | grepQuiet "should be a list" expectStderr 1 nix-instantiate --eval -E '{}' -A '.' | grepQuiet "empty attribute name" expectStderr 1 nix-instantiate --eval -E '[]' -A '1' | grepQuiet "out of range" + +# Unknown setting warning +# NOTE(cole-h): behavior is different depending on the order, which is why we test an unknown option +# before and after the `'{}'`! +out="$(expectStderr 0 nix-instantiate --option foobar baz --expr '{}')" +[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] + +out="$(expectStderr 0 nix-instantiate '{}' --option foobar baz --expr )" +[[ "$(echo "$out" | grep foobar | wc -l)" = 1 ]] diff --git a/tests/functional/multiple-outputs.nix b/tests/functional/multiple-outputs.nix index 413d392e4..6ba7c523d 100644 --- a/tests/functional/multiple-outputs.nix +++ b/tests/functional/multiple-outputs.nix @@ -96,6 +96,12 @@ rec { buildCommand = "mkdir $a_a $b $c"; }; + nothing-to-install = mkDerivation { + name = "nothing-to-install"; + meta.outputsToInstall = [ ]; + buildCommand = "mkdir $out"; + }; + independent = mkDerivation { name = "multiple-outputs-independent"; outputs = [ "first" "second" ]; diff --git a/tests/functional/multiple-outputs.sh b/tests/functional/multiple-outputs.sh old mode 100644 new mode 100755 index 330600d08..af9f8af72 --- a/tests/functional/multiple-outputs.sh +++ b/tests/functional/multiple-outputs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/nar-access.sh b/tests/functional/nar-access.sh old mode 100644 new mode 100755 index 87981e7d9..8839fd043 --- a/tests/functional/nar-access.sh +++ b/tests/functional/nar-access.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh echo "building test path" diff --git a/tests/functional/nested-sandboxing.sh b/tests/functional/nested-sandboxing.sh old mode 100644 new mode 100755 index 61fe043c6..44c3bb2bc --- a/tests/functional/nested-sandboxing.sh +++ b/tests/functional/nested-sandboxing.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # This test is run by `tests/functional/nested-sandboxing/runner.nix` in an extra layer of sandboxing. [[ -d /nix/store ]] || skipTest "running this test without Nix's deps being drawn from /nix/store is not yet supported" diff --git a/tests/functional/nix-build.sh b/tests/functional/nix-build.sh old mode 100644 new mode 100755 index 44a5a14cd..45ff314c7 --- a/tests/functional/nix-build.sh +++ b/tests/functional/nix-build.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/nix-channel.sh b/tests/functional/nix-channel.sh old mode 100644 new mode 100755 index ca5df3bdd..a4870e7a8 --- a/tests/functional/nix-channel.sh +++ b/tests/functional/nix-channel.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearProfiles diff --git a/tests/functional/nix-collect-garbage-d.sh b/tests/functional/nix-collect-garbage-d.sh old mode 100644 new mode 100755 index bf30f8938..07aaf61e9 --- a/tests/functional/nix-collect-garbage-d.sh +++ b/tests/functional/nix-collect-garbage-d.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/nix-copy-ssh-ng.sh b/tests/functional/nix-copy-ssh-ng.sh old mode 100644 new mode 100755 index 62e99cd24..1fd735b9d --- a/tests/functional/nix-copy-ssh-ng.sh +++ b/tests/functional/nix-copy-ssh-ng.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh source nix-copy-ssh-common.sh "ssh-ng" diff --git a/tests/functional/nix-copy-ssh.sh b/tests/functional/nix-copy-ssh.sh old mode 100644 new mode 100755 index 12e8346bc..1dc256e49 --- a/tests/functional/nix-copy-ssh.sh +++ b/tests/functional/nix-copy-ssh.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh source nix-copy-ssh-common.sh "ssh" diff --git a/tests/functional/nix-profile.sh b/tests/functional/nix-profile.sh old mode 100644 new mode 100755 index 7c4da6283..3e5846cf2 --- a/tests/functional/nix-profile.sh +++ b/tests/functional/nix-profile.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/nix-shell.sh b/tests/functional/nix-shell.sh old mode 100644 new mode 100755 index 04c83138e..c38107e64 --- a/tests/functional/nix-shell.sh +++ b/tests/functional/nix-shell.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/nix_path.sh b/tests/functional/nix_path.sh old mode 100644 new mode 100755 index 2b222b4a1..e6a2193f3 --- a/tests/functional/nix_path.sh +++ b/tests/functional/nix_path.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Regression for https://github.com/NixOS/nix/issues/5998 and https://github.com/NixOS/nix/issues/5980 source common.sh diff --git a/tests/functional/optimise-store.sh b/tests/functional/optimise-store.sh old mode 100644 new mode 100755 index 8c2d05cd5..70ce954f9 --- a/tests/functional/optimise-store.sh +++ b/tests/functional/optimise-store.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/output-normalization.sh b/tests/functional/output-normalization.sh old mode 100644 new mode 100755 index 0f6df5e31..2b319201a --- a/tests/functional/output-normalization.sh +++ b/tests/functional/output-normalization.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh testNormalization () { diff --git a/tests/functional/pass-as-file.sh b/tests/functional/pass-as-file.sh old mode 100644 new mode 100755 index 2c0bc5031..21d9ffc6d --- a/tests/functional/pass-as-file.sh +++ b/tests/functional/pass-as-file.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/path-from-hash-part.sh b/tests/functional/path-from-hash-part.sh old mode 100644 new mode 100755 index bdd104434..41d1b7410 --- a/tests/functional/path-from-hash-part.sh +++ b/tests/functional/path-from-hash-part.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh path=$(nix build --no-link --print-out-paths -f simple.nix) diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh old mode 100644 new mode 100755 index 763935eb7..8597de683 --- a/tests/functional/path-info.sh +++ b/tests/functional/path-info.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh echo foo > $TEST_ROOT/foo diff --git a/tests/functional/placeholders.sh b/tests/functional/placeholders.sh old mode 100644 new mode 100755 index cd1bb7bc2..f2b8bf6bf --- a/tests/functional/placeholders.sh +++ b/tests/functional/placeholders.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/plugins.sh b/tests/functional/plugins.sh old mode 100644 new mode 100755 index baf71a362..ab4876df9 --- a/tests/functional/plugins.sh +++ b/tests/functional/plugins.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh if [[ $BUILD_SHARED_LIBS != 1 ]]; then diff --git a/tests/functional/post-hook.sh b/tests/functional/post-hook.sh old mode 100644 new mode 100755 index 752f8220c..c0b1ab3aa --- a/tests/functional/post-hook.sh +++ b/tests/functional/post-hook.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/pure-eval.sh b/tests/functional/pure-eval.sh old mode 100644 new mode 100755 index 5334bf28e..6d8aa35ec --- a/tests/functional/pure-eval.sh +++ b/tests/functional/pure-eval.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/read-only-store.sh b/tests/functional/read-only-store.sh old mode 100644 new mode 100755 index d63920c19..ecc57642d --- a/tests/functional/read-only-store.sh +++ b/tests/functional/read-only-store.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh enableFeatures "read-only-local-store" @@ -9,7 +11,10 @@ clearStore happy () { # We can do a read-only query just fine with a read-only store nix --store local?read-only=true path-info $dummyPath - + + # `local://` also works. + nix --store local://?read-only=true path-info $dummyPath + # We can "write" an already-present store-path a read-only store, because no IO is actually required nix-store --store local?read-only=true --add dummy } diff --git a/tests/functional/readfile-context.sh b/tests/functional/readfile-context.sh old mode 100644 new mode 100755 index 31e70ddb1..76fad9349 --- a/tests/functional/readfile-context.sh +++ b/tests/functional/readfile-context.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/recursive.sh b/tests/functional/recursive.sh old mode 100644 new mode 100755 index 0bf00f8fa..a9966aabd --- a/tests/functional/recursive.sh +++ b/tests/functional/recursive.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh enableFeatures 'recursive-nix' diff --git a/tests/functional/referrers.sh b/tests/functional/referrers.sh old mode 100644 new mode 100755 index 81323c280..898032e42 --- a/tests/functional/referrers.sh +++ b/tests/functional/referrers.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh needLocalStore "uses some low-level store manipulations that aren’t available through the daemon" diff --git a/tests/functional/remote-store.sh b/tests/functional/remote-store.sh old mode 100644 new mode 100755 index cc5dd1833..171a5d391 --- a/tests/functional/remote-store.sh +++ b/tests/functional/remote-store.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore @@ -23,7 +25,7 @@ fi # Test import-from-derivation through the daemon. [[ $(nix eval --impure --raw --file ./ifd.nix) = hi ]] -storeCleared=1 NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs.sh +NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs-test-case.sh nix-store --gc --max-freed 1K diff --git a/tests/functional/repair.sh b/tests/functional/repair.sh old mode 100644 new mode 100755 index c8f07b1c6..552e04280 --- a/tests/functional/repair.sh +++ b/tests/functional/repair.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh needLocalStore "--repair needs a local store" diff --git a/tests/functional/repl.sh b/tests/functional/repl.sh old mode 100644 new mode 100755 index f11fa7140..fca982807 --- a/tests/functional/repl.sh +++ b/tests/functional/repl.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh testDir="$PWD" @@ -21,11 +23,14 @@ import $testDir/undefined-variable.nix " testRepl () { - local nixArgs=("$@") + local nixArgs + nixArgs=("$@") rm -rf repl-result-out || true # cleanup from other runs backed by a foreign nix store - local replOutput="$(nix repl "${nixArgs[@]}" <<< "$replCmds")" + local replOutput + replOutput="$(nix repl "${nixArgs[@]}" <<< "$replCmds")" echo "$replOutput" - local outPath=$(echo "$replOutput" |& + local outPath + outPath=$(echo "$replOutput" |& grep -o -E "$NIX_STORE_DIR/\w*-simple") nix path-info "${nixArgs[@]}" "$outPath" [ "$(realpath ./repl-result-out)" == "$outPath" ] || fail "nix repl :bl doesn't make a symlink" @@ -34,11 +39,11 @@ testRepl () { # simple.nix prints a PATH during build echo "$replOutput" | grepQuiet -s 'PATH=' || fail "nix repl :log doesn't output logs" - local replOutput="$(nix repl "${nixArgs[@]}" <<< "$replFailingCmds" 2>&1)" + replOutput="$(nix repl "${nixArgs[@]}" <<< "$replFailingCmds" 2>&1)" echo "$replOutput" echo "$replOutput" | grepQuiet -s 'This should fail' \ || fail "nix repl :log doesn't output logs for a failed derivation" - local replOutput="$(nix repl --show-trace "${nixArgs[@]}" <<< "$replUndefinedVariable" 2>&1)" + replOutput="$(nix repl --show-trace "${nixArgs[@]}" <<< "$replUndefinedVariable" 2>&1)" echo "$replOutput" echo "$replOutput" | grepQuiet -s "while evaluating the file" \ || fail "nix repl --show-trace doesn't show the trace" @@ -48,7 +53,7 @@ testRepl () { nix repl "${nixArgs[@]}" 2>&1 <<< "builtins.currentSystem" \ | grep "$(nix-instantiate --eval -E 'builtins.currentSystem')" - expectStderr 1 nix repl ${testDir}/simple.nix \ + expectStderr 1 nix repl "${testDir}/simple.nix" \ | grepQuiet -s "error: path '$testDir/simple.nix' is not a flake" } @@ -63,10 +68,11 @@ stripColors () { } testReplResponseGeneral () { - local grepMode="$1"; shift - local commands="$1"; shift - local expectedResponse="$1"; shift - local response="$(nix repl "$@" <<< "$commands" | stripColors)" + local grepMode commands expectedResponse response + grepMode="$1"; shift + commands="$1"; shift + expectedResponse="$1"; shift + response="$(nix repl "$@" <<< "$commands" | stripColors)" echo "$response" | grepQuiet "$grepMode" -s "$expectedResponse" \ || fail "repl command set: @@ -91,6 +97,8 @@ testReplResponseNoRegex () { } # :a uses the newest version of a symbol +# +# shellcheck disable=SC2016 testReplResponse ' :a { a = "1"; } :a { a = "2"; } @@ -101,6 +109,8 @@ testReplResponse ' # note the escaped \, # \\ # because the second argument is a regex +# +# shellcheck disable=SC2016 testReplResponseNoRegex ' "$" + "{hi}" ' '"\${hi}"' @@ -108,12 +118,12 @@ testReplResponseNoRegex ' testReplResponse ' drvPath ' '".*-simple.drv"' \ ---file $testDir/simple.nix +--file "$testDir/simple.nix" testReplResponse ' drvPath ' '".*-simple.drv"' \ ---file $testDir/simple.nix --experimental-features 'ca-derivations' +--file "$testDir/simple.nix" --experimental-features 'ca-derivations' mkdir -p flake && cat < flake/flake.nix { diff --git a/tests/functional/restricted.sh b/tests/functional/restricted.sh old mode 100644 new mode 100755 index 3de26eb36..ab4cad5cf --- a/tests/functional/restricted.sh +++ b/tests/functional/restricted.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/search.sh b/tests/functional/search.sh old mode 100644 new mode 100755 index d9c7a75da..ce17411d2 --- a/tests/functional/search.sh +++ b/tests/functional/search.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/secure-drv-outputs.sh b/tests/functional/secure-drv-outputs.sh old mode 100644 new mode 100755 index 50a9c4428..7d81db58b --- a/tests/functional/secure-drv-outputs.sh +++ b/tests/functional/secure-drv-outputs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test that users cannot register specially-crafted derivations that # produce output paths belonging to other derivations. This could be # used to inject malware into the store. diff --git a/tests/functional/selfref-gc.sh b/tests/functional/selfref-gc.sh old mode 100644 new mode 100755 index 3f1f50eea..37ce33089 --- a/tests/functional/selfref-gc.sh +++ b/tests/functional/selfref-gc.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireDaemonNewerThan "2.6.0pre20211215" diff --git a/tests/functional/shell.sh b/tests/functional/shell.sh old mode 100644 new mode 100755 index 8a3fef3e7..1760eefff --- a/tests/functional/shell.sh +++ b/tests/functional/shell.sh @@ -1,8 +1,13 @@ +#!/usr/bin/env bash + source common.sh clearStore clearCache +# nix shell is an alias for nix env shell. We'll use the shorter form in the rest of the test. +nix env shell -f shell-hello.nix hello -c hello | grep 'Hello World' + nix shell -f shell-hello.nix hello -c hello | grep 'Hello World' nix shell -f shell-hello.nix hello -c hello NixOS | grep 'Hello NixOS' diff --git a/tests/functional/signing.sh b/tests/functional/signing.sh old mode 100644 new mode 100755 index 942b51630..cf84ab377 --- a/tests/functional/signing.sh +++ b/tests/functional/signing.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore @@ -13,9 +15,9 @@ outPath=$(nix-build dependencies.nix --no-out-link --secret-key-files "$TEST_ROO # Verify that the path got signed. info=$(nix path-info --json $outPath) -[[ $info =~ '"ultimate":true' ]] -[[ $info =~ 'cache1.example.org' ]] -[[ $info =~ 'cache2.example.org' ]] +echo $info | jq -e '.[] | .ultimate == true' +echo $info | jq -e '.[] | .signatures.[] | select(startswith("cache1.example.org"))' +echo $info | jq -e '.[] | .signatures.[] | select(startswith("cache2.example.org"))' # Test "nix store verify". nix store verify -r $outPath @@ -37,8 +39,8 @@ nix store verify -r $outPath # Verify that the path did not get signed but does have the ultimate bit. info=$(nix path-info --json $outPath2) -[[ $info =~ '"ultimate":true' ]] -(! [[ $info =~ 'signatures' ]]) +echo $info | jq -e '.[] | .ultimate == true' +echo $info | jq -e '.[] | .signatures == []' # Test "nix store verify". nix store verify -r $outPath2 @@ -55,7 +57,7 @@ nix store verify -r $outPath2 --sigs-needed 1 --trusted-public-keys $pk1 # Build something content-addressed. outPathCA=$(IMPURE_VAR1=foo IMPURE_VAR2=bar nix-build ./fixed.nix -A good.0 --no-out-link) -[[ $(nix path-info --json $outPathCA) =~ '"ca":"fixed:md5:' ]] +nix path-info --json $outPathCA | jq -e '.[] | .ca | startswith("fixed:md5:")' # Content-addressed paths don't need signatures, so they verify # regardless of --sigs-needed. @@ -71,15 +73,15 @@ nix copy --to file://$cacheDir $outPath2 # Verify that signatures got copied. info=$(nix path-info --store file://$cacheDir --json $outPath2) -(! [[ $info =~ '"ultimate":true' ]]) -[[ $info =~ 'cache1.example.org' ]] -(! [[ $info =~ 'cache2.example.org' ]]) +echo $info | jq -e '.[] | .ultimate == false' +echo $info | jq -e '.[] | .signatures.[] | select(startswith("cache1.example.org"))' +echo $info | expect 4 jq -e '.[] | .signatures.[] | select(startswith("cache2.example.org"))' # Verify that adding a signature to a path in a binary cache works. nix store sign --store file://$cacheDir --key-file $TEST_ROOT/sk2 $outPath2 info=$(nix path-info --store file://$cacheDir --json $outPath2) -[[ $info =~ 'cache1.example.org' ]] -[[ $info =~ 'cache2.example.org' ]] +echo $info | jq -e '.[] | .signatures.[] | select(startswith("cache1.example.org"))' +echo $info | jq -e '.[] | .signatures.[] | select(startswith("cache2.example.org"))' # Copying to a diverted store should fail due to a lack of signatures by trusted keys. chmod -R u+w $TEST_ROOT/store0 || true diff --git a/tests/functional/simple.nix b/tests/functional/simple.nix index 4223c0f23..2035ca294 100644 --- a/tests/functional/simple.nix +++ b/tests/functional/simple.nix @@ -5,4 +5,5 @@ mkDerivation { builder = ./simple.builder.sh; PATH = ""; goodPath = path; + meta.position = "${__curPos.file}:${toString __curPos.line}"; } diff --git a/tests/functional/simple.sh b/tests/functional/simple.sh old mode 100644 new mode 100755 index 50d44f93f..846738cbd --- a/tests/functional/simple.sh +++ b/tests/functional/simple.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh drvPath=$(nix-instantiate simple.nix) diff --git a/tests/functional/ssh-relay.sh b/tests/functional/ssh-relay.sh old mode 100644 new mode 100755 index 053b2f00d..059c66434 --- a/tests/functional/ssh-relay.sh +++ b/tests/functional/ssh-relay.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh echo foo > $TEST_ROOT/hello.sh diff --git a/tests/functional/store-info.sh b/tests/functional/store-info.sh old mode 100644 new mode 100755 index 18a8131a9..2398f5beb --- a/tests/functional/store-info.sh +++ b/tests/functional/store-info.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh STORE_INFO=$(nix store info 2>&1) diff --git a/tests/functional/structured-attrs.sh b/tests/functional/structured-attrs.sh old mode 100644 new mode 100755 index 6711efbb4..ba7f5967e --- a/tests/functional/structured-attrs.sh +++ b/tests/functional/structured-attrs.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh # 27ce722638 required some incompatible changes to the nix file, so skip this diff --git a/tests/functional/substitute-with-invalid-ca.sh b/tests/functional/substitute-with-invalid-ca.sh old mode 100644 new mode 100755 index 4d0b01e0f..d8af67237 --- a/tests/functional/substitute-with-invalid-ca.sh +++ b/tests/functional/substitute-with-invalid-ca.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh BINARY_CACHE=file://$cacheDir diff --git a/tests/functional/suggestions.sh b/tests/functional/suggestions.sh old mode 100644 new mode 100755 index f18fefef9..6ec1cd322 --- a/tests/functional/suggestions.sh +++ b/tests/functional/suggestions.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/supplementary-groups.sh b/tests/functional/supplementary-groups.sh old mode 100644 new mode 100755 index d18fb2414..9d474219f --- a/tests/functional/supplementary-groups.sh +++ b/tests/functional/supplementary-groups.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh requireSandboxSupport diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh old mode 100644 new mode 100755 index 062f27ad6..ce162ddce --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/test-infra.sh b/tests/functional/test-infra.sh old mode 100644 new mode 100755 index 54ae120e7..37322b356 --- a/tests/functional/test-infra.sh +++ b/tests/functional/test-infra.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test the functions for testing themselves! # Also test some assumptions on how bash works that they rely on. source common.sh diff --git a/tests/functional/test-libstoreconsumer.sh b/tests/functional/test-libstoreconsumer.sh old mode 100644 new mode 100755 index 8a77cf5a1..d1a1accb6 --- a/tests/functional/test-libstoreconsumer.sh +++ b/tests/functional/test-libstoreconsumer.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh drv="$(nix-instantiate simple.nix)" diff --git a/tests/functional/timeout.sh b/tests/functional/timeout.sh old mode 100644 new mode 100755 index b179b79a2..441c83b0e --- a/tests/functional/timeout.sh +++ b/tests/functional/timeout.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test the `--timeout' option. source common.sh diff --git a/tests/functional/toString-path.sh b/tests/functional/toString-path.sh old mode 100644 new mode 100755 index 07eb87465..d790109f4 --- a/tests/functional/toString-path.sh +++ b/tests/functional/toString-path.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh mkdir -p $TEST_ROOT/foo diff --git a/tests/functional/user-envs-migration.sh b/tests/functional/user-envs-migration.sh old mode 100644 new mode 100755 index 187372b16..992586b95 --- a/tests/functional/user-envs-migration.sh +++ b/tests/functional/user-envs-migration.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # Test that the migration of user environments # (https://github.com/NixOS/nix/pull/5226) does preserve everything diff --git a/tests/functional/user-envs-test-case.sh b/tests/functional/user-envs-test-case.sh new file mode 100644 index 000000000..f4a90a675 --- /dev/null +++ b/tests/functional/user-envs-test-case.sh @@ -0,0 +1,191 @@ +clearProfiles + +# Query installed: should be empty. +test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0 + +nix-env --switch-profile $profiles/test + +# Query available: should contain several. +test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6 +outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0) +drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0) +[ -n "$outPath10" -a -n "$drvPath10" ] + +# Query with json +nix-env -f ./user-envs.nix -qa --json | jq -e '.[] | select(.name == "bar-0.1") | [ + .outputName == "out", + .outputs.out == null +] | all' +nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name == "bar-0.1") | [ + .outputName == "out", + (.outputs.out | test("'$NIX_STORE_DIR'.*-0\\.1")) +] | all' +nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'$NIX_STORE_DIR'.*-0\\.1\\.drv"))' + +# Query descriptions. +nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly +rm -rf $HOME/.nix-defexpr +ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr +nix-env -qa '*' --description | grepQuiet silly + +# Query the system. +nix-env -qa '*' --system | grepQuiet $system + +# Install "foo-1.0". +nix-env -i foo-1.0 + +# Query installed: should contain foo-1.0 now (which should be +# executable). +test "$(nix-env -q '*' | wc -l)" -eq 1 +nix-env -q '*' | grepQuiet foo-1.0 +test "$($profiles/test/bin/foo)" = "foo-1.0" + +# Test nix-env -qc to compare installed against available packages, and vice versa. +nix-env -qc '*' | grepQuiet '< 2.0' +nix-env -qac '*' | grepQuiet '> 1.0' + +# Test the -b flag to filter out source-only packages. +[ "$(nix-env -qab | wc -l)" -eq 1 ] + +# Test the -s flag to get package status. +nix-env -qas | grepQuiet 'IP- foo-1.0' +nix-env -qas | grepQuiet -- '--- bar-0.1' + +# Disable foo. +nix-env --set-flag active false foo +(! [ -e "$profiles/test/bin/foo" ]) + +# Enable foo. +nix-env --set-flag active true foo +[ -e "$profiles/test/bin/foo" ] + +# Store the path of foo-1.0. +outPath10_=$(nix-env -q --out-path --no-name '*' | grep foo-1.0) +echo "foo-1.0 = $outPath10" +[ "$outPath10" = "$outPath10_" ] + +# Install "foo-2.0pre1": should remove foo-1.0. +nix-env -i foo-2.0pre1 + +# Query installed: should contain foo-2.0pre1 now. +test "$(nix-env -q '*' | wc -l)" -eq 1 +nix-env -q '*' | grepQuiet foo-2.0pre1 +test "$($profiles/test/bin/foo)" = "foo-2.0pre1" + +# Upgrade "foo": should install foo-2.0. +NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo + +# Query installed: should contain foo-2.0 now. +test "$(nix-env -q '*' | wc -l)" -eq 1 +nix-env -q '*' | grepQuiet foo-2.0 +test "$($profiles/test/bin/foo)" = "foo-2.0" + +# Store the path of foo-2.0. +outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0) +test -n "$outPath20" + +# Install bar-0.1, uninstall foo. +nix-env -i bar-0.1 +nix-env -e foo + +# Query installed: should only contain bar-0.1 now. +if nix-env -q '*' | grepQuiet foo; then false; fi +nix-env -q '*' | grepQuiet bar + +# Rollback: should bring "foo" back. +oldGen="$(nix-store -q --resolve $profiles/test)" +nix-env --rollback +[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ] +nix-env -q '*' | grepQuiet foo-2.0 +nix-env -q '*' | grepQuiet bar + +# Rollback again: should remove "bar". +nix-env --rollback +nix-env -q '*' | grepQuiet foo-2.0 +if nix-env -q '*' | grepQuiet bar; then false; fi + +# Count generations. +nix-env --list-generations +test "$(nix-env --list-generations | wc -l)" -eq 7 + +# Doing the same operation twice results in the same generation, which triggers +# "lazy" behaviour and does not create a new symlink. + +nix-env -i foo +nix-env -i foo + +# Count generations. +nix-env --list-generations +test "$(nix-env --list-generations | wc -l)" -eq 8 + +# Switch to a specified generation. +nix-env --switch-generation 7 +[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ] + +# Install foo-1.0, now using its store path. +nix-env -i "$outPath10" +nix-env -q '*' | grepQuiet foo-1.0 +nix-store -qR $profiles/test | grep "$outPath10" +nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)" +[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ] + +# Uninstall foo-1.0, using a symlink to its store path. +ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink +nix-env -e $TEST_ROOT/symlink +if nix-env -q '*' | grepQuiet foo; then false; fi +nix-store -qR $profiles/test | grepInverse "$outPath10" + +# Install foo-1.0, now using a symlink to its store path. +nix-env -i $TEST_ROOT/symlink +nix-env -q '*' | grepQuiet foo + +# Delete all old generations. +nix-env --delete-generations old + +# Run the garbage collector. This should get rid of foo-2.0 but not +# foo-1.0. +nix-collect-garbage +test -e "$outPath10" +(! [ -e "$outPath20" ]) + +# Uninstall everything +nix-env -e '*' +test "$(nix-env -q '*' | wc -l)" -eq 0 + +# Installing "foo" should only install the newest foo. +nix-env -i foo +test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1 +nix-env -q '*' | grepQuiet foo-2.0 + +# On the other hand, this should install both (and should fail due to +# a collision). +nix-env -e '*' +(! nix-env -i foo-1.0 foo-2.0) + +# Installing "*" should install one foo and one bar. +nix-env -e '*' +nix-env -i '*' +test "$(nix-env -q '*' | wc -l)" -eq 2 +nix-env -q '*' | grepQuiet foo-2.0 +nix-env -q '*' | grepQuiet bar-0.1.1 + +# Test priorities: foo-0.1 has a lower priority than foo-1.0, so it +# should be possible to install both without a collision. Also test +# ‘--set-flag priority’ to manually override the declared priorities. +nix-env -e '*' +nix-env -i foo-0.1 foo-1.0 +[ "$($profiles/test/bin/foo)" = "foo-1.0" ] +nix-env --set-flag priority 1 foo-0.1 +[ "$($profiles/test/bin/foo)" = "foo-0.1" ] + +# Test nix-env --set. +nix-env --set $outPath10 +[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] +nix-env --set $drvPath10 +[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] + +# Test the case where $HOME contains a symlink. +mkdir -p $TEST_ROOT/real-home/alice/.nix-defexpr/channels +ln -sfn $TEST_ROOT/real-home $TEST_ROOT/home +ln -sfn $(pwd)/user-envs.nix $TEST_ROOT/home/alice/.nix-defexpr/channels/foo +HOME=$TEST_ROOT/home/alice nix-env -i foo-0.1 diff --git a/tests/functional/user-envs.sh b/tests/functional/user-envs.sh old mode 100644 new mode 100755 index 7c643f355..ec9d036f8 --- a/tests/functional/user-envs.sh +++ b/tests/functional/user-envs.sh @@ -1,197 +1,5 @@ -source common.sh +#!/usr/bin/env bash -if [ -z "${storeCleared-}" ]; then - clearStore -fi +source ./common.sh -clearProfiles - -# Query installed: should be empty. -test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 0 - -nix-env --switch-profile $profiles/test - -# Query available: should contain several. -test "$(nix-env -f ./user-envs.nix -qa '*' | wc -l)" -eq 6 -outPath10=$(nix-env -f ./user-envs.nix -qa --out-path --no-name '*' | grep foo-1.0) -drvPath10=$(nix-env -f ./user-envs.nix -qa --drv-path --no-name '*' | grep foo-1.0) -[ -n "$outPath10" -a -n "$drvPath10" ] - -# Query with json -nix-env -f ./user-envs.nix -qa --json | jq -e '.[] | select(.name == "bar-0.1") | [ - .outputName == "out", - .outputs.out == null -] | all' -nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name == "bar-0.1") | [ - .outputName == "out", - (.outputs.out | test("'$NIX_STORE_DIR'.*-0\\.1")) -] | all' -nix-env -f ./user-envs.nix -qa --json --drv-path | jq -e '.[] | select(.name == "bar-0.1") | (.drvPath | test("'$NIX_STORE_DIR'.*-0\\.1\\.drv"))' - -# Query descriptions. -nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly -rm -rf $HOME/.nix-defexpr -ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr -nix-env -qa '*' --description | grepQuiet silly - -# Query the system. -nix-env -qa '*' --system | grepQuiet $system - -# Install "foo-1.0". -nix-env -i foo-1.0 - -# Query installed: should contain foo-1.0 now (which should be -# executable). -test "$(nix-env -q '*' | wc -l)" -eq 1 -nix-env -q '*' | grepQuiet foo-1.0 -test "$($profiles/test/bin/foo)" = "foo-1.0" - -# Test nix-env -qc to compare installed against available packages, and vice versa. -nix-env -qc '*' | grepQuiet '< 2.0' -nix-env -qac '*' | grepQuiet '> 1.0' - -# Test the -b flag to filter out source-only packages. -[ "$(nix-env -qab | wc -l)" -eq 1 ] - -# Test the -s flag to get package status. -nix-env -qas | grepQuiet 'IP- foo-1.0' -nix-env -qas | grepQuiet -- '--- bar-0.1' - -# Disable foo. -nix-env --set-flag active false foo -(! [ -e "$profiles/test/bin/foo" ]) - -# Enable foo. -nix-env --set-flag active true foo -[ -e "$profiles/test/bin/foo" ] - -# Store the path of foo-1.0. -outPath10_=$(nix-env -q --out-path --no-name '*' | grep foo-1.0) -echo "foo-1.0 = $outPath10" -[ "$outPath10" = "$outPath10_" ] - -# Install "foo-2.0pre1": should remove foo-1.0. -nix-env -i foo-2.0pre1 - -# Query installed: should contain foo-2.0pre1 now. -test "$(nix-env -q '*' | wc -l)" -eq 1 -nix-env -q '*' | grepQuiet foo-2.0pre1 -test "$($profiles/test/bin/foo)" = "foo-2.0pre1" - -# Upgrade "foo": should install foo-2.0. -NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo - -# Query installed: should contain foo-2.0 now. -test "$(nix-env -q '*' | wc -l)" -eq 1 -nix-env -q '*' | grepQuiet foo-2.0 -test "$($profiles/test/bin/foo)" = "foo-2.0" - -# Store the path of foo-2.0. -outPath20=$(nix-env -q --out-path --no-name '*' | grep foo-2.0) -test -n "$outPath20" - -# Install bar-0.1, uninstall foo. -nix-env -i bar-0.1 -nix-env -e foo - -# Query installed: should only contain bar-0.1 now. -if nix-env -q '*' | grepQuiet foo; then false; fi -nix-env -q '*' | grepQuiet bar - -# Rollback: should bring "foo" back. -oldGen="$(nix-store -q --resolve $profiles/test)" -nix-env --rollback -[ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ] -nix-env -q '*' | grepQuiet foo-2.0 -nix-env -q '*' | grepQuiet bar - -# Rollback again: should remove "bar". -nix-env --rollback -nix-env -q '*' | grepQuiet foo-2.0 -if nix-env -q '*' | grepQuiet bar; then false; fi - -# Count generations. -nix-env --list-generations -test "$(nix-env --list-generations | wc -l)" -eq 7 - -# Doing the same operation twice results in the same generation, which triggers -# "lazy" behaviour and does not create a new symlink. - -nix-env -i foo -nix-env -i foo - -# Count generations. -nix-env --list-generations -test "$(nix-env --list-generations | wc -l)" -eq 8 - -# Switch to a specified generation. -nix-env --switch-generation 7 -[ "$(nix-store -q --resolve $profiles/test)" = "$oldGen" ] - -# Install foo-1.0, now using its store path. -nix-env -i "$outPath10" -nix-env -q '*' | grepQuiet foo-1.0 -nix-store -qR $profiles/test | grep "$outPath10" -nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)" -[ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ] - -# Uninstall foo-1.0, using a symlink to its store path. -ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink -nix-env -e $TEST_ROOT/symlink -if nix-env -q '*' | grepQuiet foo; then false; fi -nix-store -qR $profiles/test | grepInverse "$outPath10" - -# Install foo-1.0, now using a symlink to its store path. -nix-env -i $TEST_ROOT/symlink -nix-env -q '*' | grepQuiet foo - -# Delete all old generations. -nix-env --delete-generations old - -# Run the garbage collector. This should get rid of foo-2.0 but not -# foo-1.0. -nix-collect-garbage -test -e "$outPath10" -(! [ -e "$outPath20" ]) - -# Uninstall everything -nix-env -e '*' -test "$(nix-env -q '*' | wc -l)" -eq 0 - -# Installing "foo" should only install the newest foo. -nix-env -i foo -test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1 -nix-env -q '*' | grepQuiet foo-2.0 - -# On the other hand, this should install both (and should fail due to -# a collision). -nix-env -e '*' -(! nix-env -i foo-1.0 foo-2.0) - -# Installing "*" should install one foo and one bar. -nix-env -e '*' -nix-env -i '*' -test "$(nix-env -q '*' | wc -l)" -eq 2 -nix-env -q '*' | grepQuiet foo-2.0 -nix-env -q '*' | grepQuiet bar-0.1.1 - -# Test priorities: foo-0.1 has a lower priority than foo-1.0, so it -# should be possible to install both without a collision. Also test -# ‘--set-flag priority’ to manually override the declared priorities. -nix-env -e '*' -nix-env -i foo-0.1 foo-1.0 -[ "$($profiles/test/bin/foo)" = "foo-1.0" ] -nix-env --set-flag priority 1 foo-0.1 -[ "$($profiles/test/bin/foo)" = "foo-0.1" ] - -# Test nix-env --set. -nix-env --set $outPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] -nix-env --set $drvPath10 -[ "$(nix-store -q --resolve $profiles/test)" = $outPath10 ] - -# Test the case where $HOME contains a symlink. -mkdir -p $TEST_ROOT/real-home/alice/.nix-defexpr/channels -ln -sfn $TEST_ROOT/real-home $TEST_ROOT/home -ln -sfn $(pwd)/user-envs.nix $TEST_ROOT/home/alice/.nix-defexpr/channels/foo -HOME=$TEST_ROOT/home/alice nix-env -i foo-0.1 +source ./user-envs-test-case.sh diff --git a/tests/functional/why-depends.sh b/tests/functional/why-depends.sh old mode 100644 new mode 100755 index 9680bf80e..69b365069 --- a/tests/functional/why-depends.sh +++ b/tests/functional/why-depends.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/functional/zstd.sh b/tests/functional/zstd.sh old mode 100644 new mode 100755 index ba7c20501..3bf9d5601 --- a/tests/functional/zstd.sh +++ b/tests/functional/zstd.sh @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + source common.sh clearStore diff --git a/tests/installer/default.nix b/tests/installer/default.nix index 238c6ac8e..4aed6eae4 100644 --- a/tests/installer/default.nix +++ b/tests/installer/default.nix @@ -13,6 +13,17 @@ let ''; }; + install-both-profile-links = { + script = '' + tar -xf ./nix.tar.xz + mv ./nix-* nix + ln -s $HOME/.local/state/nix/profiles/a-profile $HOME/.nix-profile + mkdir -p $HOME/.local/state/nix + ln -s $HOME/.local/state/nix/profiles/b-profile $HOME/.local/state/nix/profile + ./nix/install --no-channel-add + ''; + }; + install-force-no-daemon = { script = '' tar -xf ./nix.tar.xz diff --git a/tests/nixos/containers/containers.nix b/tests/nixos/containers/containers.nix index c8ee78a4a..6773f5628 100644 --- a/tests/nixos/containers/containers.nix +++ b/tests/nixos/containers/containers.nix @@ -33,30 +33,30 @@ # Test that 'id' gives the expected result in various configurations. # Existing UIDs, sandbox. - host.succeed("nix build -v --no-auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-1") + host.succeed("nix build --no-auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-1") host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]") # Existing UIDs, no sandbox. - host.succeed("nix build -v --no-auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-2") + host.succeed("nix build --no-auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-2") host.succeed("[[ $(cat ./result) = 'uid=30001(nixbld1) gid=30000(nixbld) groups=30000(nixbld)' ]]") # Auto-allocated UIDs, sandbox. - host.succeed("nix build -v --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-3") + host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-3") host.succeed("[[ $(cat ./result) = 'uid=1000(nixbld) gid=100(nixbld) groups=100(nixbld)' ]]") # Auto-allocated UIDs, no sandbox. - host.succeed("nix build -v --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-4") + host.succeed("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-4") host.succeed("[[ $(cat ./result) = 'uid=872415232 gid=30000(nixbld) groups=30000(nixbld)' ]]") # Auto-allocated UIDs, UID range, sandbox. - host.succeed("nix build -v --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-5 --arg uidRange true") + host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-5 --arg uidRange true") host.succeed("[[ $(cat ./result) = 'uid=0(root) gid=0(root) groups=0(root)' ]]") # Auto-allocated UIDs, UID range, no sandbox. - host.fail("nix build -v --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-6 --arg uidRange true") + host.fail("nix build --auto-allocate-uids --no-sandbox -L --offline --impure --file ${./id-test.nix} --argstr name id-test-6 --arg uidRange true") # Run systemd-nspawn in a Nix build. - host.succeed("nix build -v --auto-allocate-uids --sandbox -L --offline --impure --file ${./systemd-nspawn.nix} --argstr nixpkgs ${nixpkgs}") + host.succeed("nix build --auto-allocate-uids --sandbox -L --offline --impure --file ${./systemd-nspawn.nix} --argstr nixpkgs ${nixpkgs}") host.succeed("[[ $(cat ./result/msg) = 'Hello World' ]]") ''; diff --git a/tests/repl-completion.nix b/tests/repl-completion.nix new file mode 100644 index 000000000..3ba198a98 --- /dev/null +++ b/tests/repl-completion.nix @@ -0,0 +1,40 @@ +{ runCommand, nix, expect }: + +# We only use expect when necessary, e.g. for testing tab completion in nix repl. +# See also tests/functional/repl.sh + +runCommand "repl-completion" { + nativeBuildInputs = [ + expect + nix + ]; + expectScript = '' + # Regression https://github.com/NixOS/nix/pull/10778 + spawn nix repl --offline --extra-experimental-features nix-command + expect "nix-repl>" + send "foo = import ./does-not-exist.nix\n" + expect "nix-repl>" + send "foo.\t" + expect { + "nix-repl>" { + puts "Got another prompt. Good." + } + eof { + puts "Got EOF. Bad." + exit 1 + } + } + exit 0 + ''; + passAsFile = [ "expectScript" ]; +} +'' + export NIX_STORE=$TMPDIR/store + export NIX_STATE_DIR=$TMPDIR/state + export HOME=$TMPDIR/home + mkdir $HOME + + nix-store --init + expect $expectScriptPath + touch $out +'' \ No newline at end of file diff --git a/tests/unit/libexpr/local.mk b/tests/unit/libexpr/local.mk index c59191db4..09a7dfca1 100644 --- a/tests/unit/libexpr/local.mk +++ b/tests/unit/libexpr/local.mk @@ -38,3 +38,8 @@ libexpr-tests_LIBS = \ libexpr libexprc libfetchers libstore libstorec libutil libutilc libexpr-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS) -lgmock + +ifdef HOST_WINDOWS + # Increase the default reserved stack size to 65 MB so Nix doesn't run out of space + libexpr-tests_LDFLAGS += -Wl,--stack,$(shell echo $$((65 * 1024 * 1024))) +endif diff --git a/tests/unit/libexpr/nix_api_expr.cc b/tests/unit/libexpr/nix_api_expr.cc index 0818f1cab..92a6a1175 100644 --- a/tests/unit/libexpr/nix_api_expr.cc +++ b/tests/unit/libexpr/nix_api_expr.cc @@ -191,4 +191,212 @@ TEST_F(nix_api_expr_test, nix_expr_realise_context) nix_realised_string_free(r); } +const char * SAMPLE_USER_DATA = "whatever"; + +static void primop_square(void * user_data, nix_c_context * context, EvalState * state, Value ** args, Value * ret) +{ + assert(context); + assert(state); + assert(user_data == SAMPLE_USER_DATA); + auto i = nix_get_int(context, args[0]); + nix_init_int(context, ret, i * i); +} + +TEST_F(nix_api_expr_test, nix_expr_primop) +{ + PrimOp * primop = + nix_alloc_primop(ctx, primop_square, 1, "square", nullptr, "square an integer", (void *) SAMPLE_USER_DATA); + assert_ctx_ok(); + Value * primopValue = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_primop(ctx, primopValue, primop); + assert_ctx_ok(); + + Value * three = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_int(ctx, three, 3); + assert_ctx_ok(); + + Value * result = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_value_call(ctx, state, primopValue, three, result); + assert_ctx_ok(); + + auto r = nix_get_int(ctx, result); + ASSERT_EQ(9, r); +} + +static void primop_repeat(void * user_data, nix_c_context * context, EvalState * state, Value ** args, Value * ret) +{ + assert(context); + assert(state); + assert(user_data == SAMPLE_USER_DATA); + + // Get the string to repeat + std::string s; + if (nix_get_string(context, args[0], OBSERVE_STRING(s)) != NIX_OK) + return; + + // Get the number of times to repeat + auto n = nix_get_int(context, args[1]); + if (nix_err_code(context) != NIX_OK) + return; + + // Repeat the string + std::string result; + for (int i = 0; i < n; ++i) + result += s; + + nix_init_string(context, ret, result.c_str()); +} + +TEST_F(nix_api_expr_test, nix_expr_primop_arity_2_multiple_calls) +{ + PrimOp * primop = + nix_alloc_primop(ctx, primop_repeat, 2, "repeat", nullptr, "repeat a string", (void *) SAMPLE_USER_DATA); + assert_ctx_ok(); + Value * primopValue = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_primop(ctx, primopValue, primop); + assert_ctx_ok(); + + Value * hello = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_string(ctx, hello, "hello"); + assert_ctx_ok(); + + Value * three = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_int(ctx, three, 3); + assert_ctx_ok(); + + Value * partial = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_value_call(ctx, state, primopValue, hello, partial); + assert_ctx_ok(); + + Value * result = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_value_call(ctx, state, partial, three, result); + assert_ctx_ok(); + + std::string r; + nix_get_string(ctx, result, OBSERVE_STRING(r)); + ASSERT_STREQ("hellohellohello", r.c_str()); +} + +TEST_F(nix_api_expr_test, nix_expr_primop_arity_2_single_call) +{ + PrimOp * primop = + nix_alloc_primop(ctx, primop_repeat, 2, "repeat", nullptr, "repeat a string", (void *) SAMPLE_USER_DATA); + assert_ctx_ok(); + Value * primopValue = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_primop(ctx, primopValue, primop); + assert_ctx_ok(); + + Value * hello = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_string(ctx, hello, "hello"); + assert_ctx_ok(); + + Value * three = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_int(ctx, three, 3); + assert_ctx_ok(); + + Value * result = nix_alloc_value(ctx, state); + assert_ctx_ok(); + NIX_VALUE_CALL(ctx, state, result, primopValue, hello, three); + assert_ctx_ok(); + + std::string r; + nix_get_string(ctx, result, OBSERVE_STRING(r)); + assert_ctx_ok(); + + ASSERT_STREQ("hellohellohello", r.c_str()); +} + +static void +primop_bad_no_return(void * user_data, nix_c_context * context, EvalState * state, Value ** args, Value * ret) +{ +} + +TEST_F(nix_api_expr_test, nix_expr_primop_bad_no_return) +{ + PrimOp * primop = + nix_alloc_primop(ctx, primop_bad_no_return, 1, "badNoReturn", nullptr, "a broken primop", nullptr); + assert_ctx_ok(); + Value * primopValue = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_primop(ctx, primopValue, primop); + assert_ctx_ok(); + + Value * three = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_int(ctx, three, 3); + assert_ctx_ok(); + + Value * result = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_value_call(ctx, state, primopValue, three, result); + ASSERT_EQ(ctx->last_err_code, NIX_ERR_NIX_ERROR); + ASSERT_THAT( + ctx->last_err, + testing::Optional( + testing::HasSubstr("Implementation error in custom function: return value was not initialized"))); + ASSERT_THAT(ctx->last_err, testing::Optional(testing::HasSubstr("badNoReturn"))); +} + +static void +primop_bad_return_thunk(void * user_data, nix_c_context * context, EvalState * state, Value ** args, Value * ret) +{ + nix_init_apply(context, ret, args[0], args[1]); +} +TEST_F(nix_api_expr_test, nix_expr_primop_bad_return_thunk) +{ + PrimOp * primop = + nix_alloc_primop(ctx, primop_bad_return_thunk, 2, "badReturnThunk", nullptr, "a broken primop", nullptr); + assert_ctx_ok(); + Value * primopValue = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_primop(ctx, primopValue, primop); + assert_ctx_ok(); + + Value * toString = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_expr_eval_from_string(ctx, state, "builtins.toString", ".", toString); + assert_ctx_ok(); + + Value * four = nix_alloc_value(ctx, state); + assert_ctx_ok(); + nix_init_int(ctx, four, 4); + assert_ctx_ok(); + + Value * result = nix_alloc_value(ctx, state); + assert_ctx_ok(); + NIX_VALUE_CALL(ctx, state, result, primopValue, toString, four); + + ASSERT_EQ(ctx->last_err_code, NIX_ERR_NIX_ERROR); + ASSERT_THAT( + ctx->last_err, + testing::Optional( + testing::HasSubstr("Implementation error in custom function: return value must not be a thunk"))); + ASSERT_THAT(ctx->last_err, testing::Optional(testing::HasSubstr("badReturnThunk"))); +} + +TEST_F(nix_api_expr_test, nix_value_call_multi_no_args) +{ + Value * n = nix_alloc_value(ctx, state); + nix_init_int(ctx, n, 3); + assert_ctx_ok(); + + Value * r = nix_alloc_value(ctx, state); + nix_value_call_multi(ctx, state, n, 0, nullptr, r); + assert_ctx_ok(); + + auto rInt = nix_get_int(ctx, r); + assert_ctx_ok(); + ASSERT_EQ(3, rInt); +} } // namespace nixC diff --git a/tests/unit/libexpr/nix_api_value.cc b/tests/unit/libexpr/nix_api_value.cc index 6e1131e10..c71593c85 100644 --- a/tests/unit/libexpr/nix_api_value.cc +++ b/tests/unit/libexpr/nix_api_value.cc @@ -256,10 +256,13 @@ TEST_F(nix_api_expr_test, nix_value_init) Value * f = nix_alloc_value(ctx, state); nix_expr_eval_from_string( - ctx, state, R"( + ctx, + state, + R"( a: a * a )", - "", f); + "", + f); // Test @@ -325,20 +328,26 @@ TEST_F(nix_api_expr_test, nix_value_init_apply_lazy_arg) Value * f = nix_alloc_value(ctx, state); nix_expr_eval_from_string( - ctx, state, R"( + ctx, + state, + R"( a: { foo = a; } )", - "", f); + "", + f); assert_ctx_ok(); Value * e = nix_alloc_value(ctx, state); { Value * g = nix_alloc_value(ctx, state); nix_expr_eval_from_string( - ctx, state, R"( + ctx, + state, + R"( _ignore: throw "error message for test case nix_value_init_apply_lazy_arg" )", - "", g); + "", + g); assert_ctx_ok(); nix_init_apply(ctx, e, g, g); diff --git a/tests/unit/libfetchers/local.mk b/tests/unit/libfetchers/local.mk index e9f659fd7..d576d28f3 100644 --- a/tests/unit/libfetchers/local.mk +++ b/tests/unit/libfetchers/local.mk @@ -30,3 +30,8 @@ libfetchers-tests_LIBS = \ libfetchers libstore libutil libfetchers-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS) + +ifdef HOST_WINDOWS + # Increase the default reserved stack size to 65 MB so Nix doesn't run out of space + libfetchers-tests_LDFLAGS += -Wl,--stack,$(shell echo $$((65 * 1024 * 1024))) +endif diff --git a/tests/unit/libstore/data/derivation/output-caFixedFlat.json b/tests/unit/libstore/data/derivation/output-caFixedFlat.json index fe000ea36..7001ea0a9 100644 --- a/tests/unit/libstore/data/derivation/output-caFixedFlat.json +++ b/tests/unit/libstore/data/derivation/output-caFixedFlat.json @@ -1,5 +1,6 @@ { "hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f", "hashAlgo": "sha256", + "method": "flat", "path": "/nix/store/rhcg9h16sqvlbpsa6dqm57sbr2al6nzg-drv-name-output-name" } diff --git a/tests/unit/libstore/data/derivation/output-caFixedNAR.json b/tests/unit/libstore/data/derivation/output-caFixedNAR.json index 1afd60223..54eb306e6 100644 --- a/tests/unit/libstore/data/derivation/output-caFixedNAR.json +++ b/tests/unit/libstore/data/derivation/output-caFixedNAR.json @@ -1,5 +1,6 @@ { "hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f", - "hashAlgo": "r:sha256", + "hashAlgo": "sha256", + "method": "nar", "path": "/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-drv-name-output-name" } diff --git a/tests/unit/libstore/data/derivation/output-caFixedText.json b/tests/unit/libstore/data/derivation/output-caFixedText.json index 0b2cc8bbc..e8a651860 100644 --- a/tests/unit/libstore/data/derivation/output-caFixedText.json +++ b/tests/unit/libstore/data/derivation/output-caFixedText.json @@ -1,5 +1,6 @@ { "hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f", - "hashAlgo": "text:sha256", + "hashAlgo": "sha256", + "method": "text", "path": "/nix/store/6s1zwabh956jvhv4w9xcdb5jiyanyxg1-drv-name-output-name" } diff --git a/tests/unit/libstore/data/derivation/output-caFloating.json b/tests/unit/libstore/data/derivation/output-caFloating.json index 9115de851..8b9b5f681 100644 --- a/tests/unit/libstore/data/derivation/output-caFloating.json +++ b/tests/unit/libstore/data/derivation/output-caFloating.json @@ -1,3 +1,4 @@ { - "hashAlgo": "r:sha256" + "hashAlgo": "sha256", + "method": "nar" } diff --git a/tests/unit/libstore/data/derivation/output-impure.json b/tests/unit/libstore/data/derivation/output-impure.json index 62b61cdca..bec03702b 100644 --- a/tests/unit/libstore/data/derivation/output-impure.json +++ b/tests/unit/libstore/data/derivation/output-impure.json @@ -1,4 +1,5 @@ { - "hashAlgo": "r:sha256", - "impure": true + "hashAlgo": "sha256", + "impure": true, + "method": "nar" } diff --git a/tests/unit/libstore/test-data/machines.bad_format b/tests/unit/libstore/data/machines/bad_format similarity index 100% rename from tests/unit/libstore/test-data/machines.bad_format rename to tests/unit/libstore/data/machines/bad_format diff --git a/tests/unit/libstore/test-data/machines.valid b/tests/unit/libstore/data/machines/valid similarity index 100% rename from tests/unit/libstore/test-data/machines.valid rename to tests/unit/libstore/data/machines/valid diff --git a/tests/unit/libstore/data/path-info/empty_impure.json b/tests/unit/libstore/data/path-info/empty_impure.json new file mode 100644 index 000000000..be982dcef --- /dev/null +++ b/tests/unit/libstore/data/path-info/empty_impure.json @@ -0,0 +1,10 @@ +{ + "ca": null, + "deriver": null, + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 0, + "references": [], + "registrationTime": null, + "signatures": [], + "ultimate": false +} diff --git a/tests/unit/libstore/data/path-info/empty_pure.json b/tests/unit/libstore/data/path-info/empty_pure.json new file mode 100644 index 000000000..10d9f508a --- /dev/null +++ b/tests/unit/libstore/data/path-info/empty_pure.json @@ -0,0 +1,6 @@ +{ + "ca": null, + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 0, + "references": [] +} diff --git a/tests/unit/libstore/data/store-reference/auto.txt b/tests/unit/libstore/data/store-reference/auto.txt new file mode 100644 index 000000000..4d18c3e59 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/auto.txt @@ -0,0 +1 @@ +auto \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/auto_param.txt b/tests/unit/libstore/data/store-reference/auto_param.txt new file mode 100644 index 000000000..54adabb25 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/auto_param.txt @@ -0,0 +1 @@ +auto?root=/foo/bar/baz \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/local_1.txt b/tests/unit/libstore/data/store-reference/local_1.txt new file mode 100644 index 000000000..74b1b9677 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/local_1.txt @@ -0,0 +1 @@ +local://?root=/foo/bar/baz \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/local_2.txt b/tests/unit/libstore/data/store-reference/local_2.txt new file mode 100644 index 000000000..8b5593fb1 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/local_2.txt @@ -0,0 +1 @@ +local:///foo/bar/baz?trusted=true \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/local_shorthand_1.txt b/tests/unit/libstore/data/store-reference/local_shorthand_1.txt new file mode 100644 index 000000000..896189be9 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/local_shorthand_1.txt @@ -0,0 +1 @@ +local?root=/foo/bar/baz \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/local_shorthand_2.txt b/tests/unit/libstore/data/store-reference/local_shorthand_2.txt new file mode 100644 index 000000000..7a9dad3b3 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/local_shorthand_2.txt @@ -0,0 +1 @@ +/foo/bar/baz?trusted=true \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/ssh.txt b/tests/unit/libstore/data/store-reference/ssh.txt new file mode 100644 index 000000000..8c61010ec --- /dev/null +++ b/tests/unit/libstore/data/store-reference/ssh.txt @@ -0,0 +1 @@ +ssh://localhost \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/unix.txt b/tests/unit/libstore/data/store-reference/unix.txt new file mode 100644 index 000000000..195489048 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/unix.txt @@ -0,0 +1 @@ +unix://?max-connections=7&trusted=true \ No newline at end of file diff --git a/tests/unit/libstore/data/store-reference/unix_shorthand.txt b/tests/unit/libstore/data/store-reference/unix_shorthand.txt new file mode 100644 index 000000000..0300337e9 --- /dev/null +++ b/tests/unit/libstore/data/store-reference/unix_shorthand.txt @@ -0,0 +1 @@ +daemon?max-connections=7&trusted=true \ No newline at end of file diff --git a/tests/unit/libstore/data/worker-protocol/build-mode.bin b/tests/unit/libstore/data/worker-protocol/build-mode.bin new file mode 100644 index 000000000..51b239409 Binary files /dev/null and b/tests/unit/libstore/data/worker-protocol/build-mode.bin differ diff --git a/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_30.bin b/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_30.bin new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_33.bin b/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_33.bin new file mode 100644 index 000000000..96c6efafc Binary files /dev/null and b/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_33.bin differ diff --git a/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_35.bin b/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_35.bin new file mode 100644 index 000000000..e877159aa Binary files /dev/null and b/tests/unit/libstore/data/worker-protocol/client-handshake-info_1_35.bin differ diff --git a/tests/unit/libstore/data/worker-protocol/handshake-to-client.bin b/tests/unit/libstore/data/worker-protocol/handshake-to-client.bin new file mode 100644 index 000000000..bee94fbe5 Binary files /dev/null and b/tests/unit/libstore/data/worker-protocol/handshake-to-client.bin differ diff --git a/tests/unit/libstore/local.mk b/tests/unit/libstore/local.mk index b8f895fad..0af1d2622 100644 --- a/tests/unit/libstore/local.mk +++ b/tests/unit/libstore/local.mk @@ -31,3 +31,8 @@ libstore-tests_LIBS = \ libstore libstorec libutil libutilc libstore-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS) + +ifdef HOST_WINDOWS + # Increase the default reserved stack size to 65 MB so Nix doesn't run out of space + libstore-tests_LDFLAGS += -Wl,--stack,$(shell echo $$((65 * 1024 * 1024))) +endif diff --git a/tests/unit/libstore/machines.cc b/tests/unit/libstore/machines.cc index 9fd7fda54..2307f4d62 100644 --- a/tests/unit/libstore/machines.cc +++ b/tests/unit/libstore/machines.cc @@ -1,46 +1,29 @@ #include "machines.hh" -#include "globals.hh" #include "file-system.hh" #include "util.hh" +#include "tests/characterization.hh" + +#include #include using testing::Contains; using testing::ElementsAre; -using testing::EndsWith; using testing::Eq; using testing::Field; using testing::SizeIs; -using nix::absPath; -using nix::FormatError; -using nix::UsageError; -using nix::getMachines; -using nix::Machine; -using nix::Machines; -using nix::pathExists; -using nix::Settings; -using nix::settings; - -class Environment : public ::testing::Environment { - public: - void SetUp() override { settings.thisSystem = "TEST_ARCH-TEST_OS"; } -}; - -testing::Environment* const foo_env = - testing::AddGlobalTestEnvironment(new Environment); +using namespace nix; TEST(machines, getMachinesWithEmptyBuilders) { - settings.builders = ""; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, ""); ASSERT_THAT(actual, SizeIs(0)); } TEST(machines, getMachinesUriOnly) { - settings.builders = "nix@scratchy.labs.cs.uu.nl"; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({"TEST_ARCH-TEST_OS"}, "nix@scratchy.labs.cs.uu.nl"); ASSERT_THAT(actual, SizeIs(1)); - EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq("ssh://nix@scratchy.labs.cs.uu.nl"))); + EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq(StoreReference::parse("ssh://nix@scratchy.labs.cs.uu.nl")))); EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("TEST_ARCH-TEST_OS"))); EXPECT_THAT(actual[0], Field(&Machine::sshKey, SizeIs(0))); EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(1))); @@ -51,10 +34,9 @@ TEST(machines, getMachinesUriOnly) { } TEST(machines, getMachinesDefaults) { - settings.builders = "nix@scratchy.labs.cs.uu.nl - - - - - - -"; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({"TEST_ARCH-TEST_OS"}, "nix@scratchy.labs.cs.uu.nl - - - - - - -"); ASSERT_THAT(actual, SizeIs(1)); - EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq("ssh://nix@scratchy.labs.cs.uu.nl"))); + EXPECT_THAT(actual[0], Field(&Machine::storeUri, Eq(StoreReference::parse("ssh://nix@scratchy.labs.cs.uu.nl")))); EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("TEST_ARCH-TEST_OS"))); EXPECT_THAT(actual[0], Field(&Machine::sshKey, SizeIs(0))); EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(1))); @@ -64,29 +46,38 @@ TEST(machines, getMachinesDefaults) { EXPECT_THAT(actual[0], Field(&Machine::sshPublicHostKey, SizeIs(0))); } +MATCHER_P(AuthorityMatches, authority, "") { + *result_listener + << "where the authority of " + << arg.render() + << " is " + << authority; + auto * generic = std::get_if(&arg.variant); + if (!generic) return false; + return generic->authority == authority; +} + TEST(machines, getMachinesWithNewLineSeparator) { - settings.builders = "nix@scratchy.labs.cs.uu.nl\nnix@itchy.labs.cs.uu.nl"; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl\nnix@itchy.labs.cs.uu.nl"); ASSERT_THAT(actual, SizeIs(2)); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")))); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@scratchy.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@itchy.labs.cs.uu.nl")))); } TEST(machines, getMachinesWithSemicolonSeparator) { - settings.builders = "nix@scratchy.labs.cs.uu.nl ; nix@itchy.labs.cs.uu.nl"; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl ; nix@itchy.labs.cs.uu.nl"); EXPECT_THAT(actual, SizeIs(2)); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")))); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@scratchy.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@itchy.labs.cs.uu.nl")))); } TEST(machines, getMachinesWithCorrectCompleteSingleBuilder) { - settings.builders = "nix@scratchy.labs.cs.uu.nl i686-linux " - "/home/nix/.ssh/id_scratchy_auto 8 3 kvm " - "benchmark SSH+HOST+PUBLIC+KEY+BASE64+ENCODED=="; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, + "nix@scratchy.labs.cs.uu.nl i686-linux " + "/home/nix/.ssh/id_scratchy_auto 8 3 kvm " + "benchmark SSH+HOST+PUBLIC+KEY+BASE64+ENCODED=="); ASSERT_THAT(actual, SizeIs(1)); - EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))); + EXPECT_THAT(actual[0], Field(&Machine::storeUri, AuthorityMatches("nix@scratchy.labs.cs.uu.nl"))); EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("i686-linux"))); EXPECT_THAT(actual[0], Field(&Machine::sshKey, Eq("/home/nix/.ssh/id_scratchy_auto"))); EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(8))); @@ -98,13 +89,12 @@ TEST(machines, getMachinesWithCorrectCompleteSingleBuilder) { TEST(machines, getMachinesWithCorrectCompleteSingleBuilderWithTabColumnDelimiter) { - settings.builders = + auto actual = Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl\ti686-linux\t/home/nix/.ssh/" "id_scratchy_auto\t8\t3\tkvm\tbenchmark\tSSH+HOST+PUBLIC+" - "KEY+BASE64+ENCODED=="; - Machines actual = getMachines(); + "KEY+BASE64+ENCODED=="); ASSERT_THAT(actual, SizeIs(1)); - EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))); + EXPECT_THAT(actual[0], Field(&Machine::storeUri, AuthorityMatches("nix@scratchy.labs.cs.uu.nl"))); EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("i686-linux"))); EXPECT_THAT(actual[0], Field(&Machine::sshKey, Eq("/home/nix/.ssh/id_scratchy_auto"))); EXPECT_THAT(actual[0], Field(&Machine::maxJobs, Eq(8))); @@ -115,58 +105,61 @@ TEST(machines, } TEST(machines, getMachinesWithMultiOptions) { - settings.builders = "nix@scratchy.labs.cs.uu.nl Arch1,Arch2 - - - " - "SupportedFeature1,SupportedFeature2 " - "MandatoryFeature1,MandatoryFeature2"; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, + "nix@scratchy.labs.cs.uu.nl Arch1,Arch2 - - - " + "SupportedFeature1,SupportedFeature2 " + "MandatoryFeature1,MandatoryFeature2"); ASSERT_THAT(actual, SizeIs(1)); - EXPECT_THAT(actual[0], Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl"))); + EXPECT_THAT(actual[0], Field(&Machine::storeUri, AuthorityMatches("nix@scratchy.labs.cs.uu.nl"))); EXPECT_THAT(actual[0], Field(&Machine::systemTypes, ElementsAre("Arch1", "Arch2"))); EXPECT_THAT(actual[0], Field(&Machine::supportedFeatures, ElementsAre("SupportedFeature1", "SupportedFeature2"))); EXPECT_THAT(actual[0], Field(&Machine::mandatoryFeatures, ElementsAre("MandatoryFeature1", "MandatoryFeature2"))); } TEST(machines, getMachinesWithIncorrectFormat) { - settings.builders = "nix@scratchy.labs.cs.uu.nl - - eight"; - EXPECT_THROW(getMachines(), FormatError); - settings.builders = "nix@scratchy.labs.cs.uu.nl - - -1"; - EXPECT_THROW(getMachines(), FormatError); - settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 three"; - EXPECT_THROW(getMachines(), FormatError); - settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 -3"; - EXPECT_THROW(getMachines(), UsageError); - settings.builders = "nix@scratchy.labs.cs.uu.nl - - 8 3 - - BAD_BASE64"; - EXPECT_THROW(getMachines(), FormatError); + EXPECT_THROW( + Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl - - eight"), + FormatError); + EXPECT_THROW( + Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl - - -1"), + FormatError); + EXPECT_THROW( + Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl - - 8 three"), + FormatError); + EXPECT_THROW( + Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl - - 8 -3"), + UsageError); + EXPECT_THROW( + Machine::parseConfig({}, "nix@scratchy.labs.cs.uu.nl - - 8 3 - - BAD_BASE64"), + FormatError); } TEST(machines, getMachinesWithCorrectFileReference) { - auto path = absPath("tests/unit/libstore/test-data/machines.valid"); + auto path = absPath(getUnitTestData() + "/machines/valid"); ASSERT_TRUE(pathExists(path)); - settings.builders = std::string("@") + path; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, "@" + path); ASSERT_THAT(actual, SizeIs(3)); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@scratchy.labs.cs.uu.nl")))); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@itchy.labs.cs.uu.nl")))); - EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, EndsWith("nix@poochie.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@scratchy.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@itchy.labs.cs.uu.nl")))); + EXPECT_THAT(actual, Contains(Field(&Machine::storeUri, AuthorityMatches("nix@poochie.labs.cs.uu.nl")))); } TEST(machines, getMachinesWithCorrectFileReferenceToEmptyFile) { auto path = "/dev/null"; ASSERT_TRUE(pathExists(path)); - settings.builders = std::string("@") + path; - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, std::string{"@"} + path); ASSERT_THAT(actual, SizeIs(0)); } TEST(machines, getMachinesWithIncorrectFileReference) { - settings.builders = std::string("@") + absPath("/not/a/file"); - Machines actual = getMachines(); + auto actual = Machine::parseConfig({}, "@" + absPath("/not/a/file")); ASSERT_THAT(actual, SizeIs(0)); } TEST(machines, getMachinesWithCorrectFileReferenceToIncorrectFile) { - settings.builders = std::string("@") + absPath("tests/unit/libstore/test-data/machines.bad_format"); - EXPECT_THROW(getMachines(), FormatError); + EXPECT_THROW( + Machine::parseConfig({}, "@" + absPath(getUnitTestData() + "/machines/bad_format")), + FormatError); } diff --git a/tests/unit/libstore/path-info.cc b/tests/unit/libstore/path-info.cc index 80d6fcfed..06c662b74 100644 --- a/tests/unit/libstore/path-info.cc +++ b/tests/unit/libstore/path-info.cc @@ -19,7 +19,15 @@ class PathInfoTest : public CharacterizationTest, public LibStoreTest } }; -static UnkeyedValidPathInfo makePathInfo(const Store & store, bool includeImpureInfo) { +static UnkeyedValidPathInfo makeEmpty() +{ + return { + Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + }; +} + +static UnkeyedValidPathInfo makeFull(const Store & store, bool includeImpureInfo) +{ UnkeyedValidPathInfo info = ValidPathInfo { store, "foo", @@ -50,22 +58,21 @@ static UnkeyedValidPathInfo makePathInfo(const Store & store, bool includeImpure return info; } -#define JSON_TEST(STEM, PURE) \ +#define JSON_TEST(STEM, OBJ, PURE) \ TEST_F(PathInfoTest, PathInfo_ ## STEM ## _from_json) { \ readTest(#STEM, [&](const auto & encoded_) { \ auto encoded = json::parse(encoded_); \ UnkeyedValidPathInfo got = UnkeyedValidPathInfo::fromJSON( \ *store, \ encoded); \ - auto expected = makePathInfo(*store, PURE); \ + auto expected = OBJ; \ ASSERT_EQ(got, expected); \ }); \ } \ \ TEST_F(PathInfoTest, PathInfo_ ## STEM ## _to_json) { \ writeTest(#STEM, [&]() -> json { \ - return makePathInfo(*store, PURE) \ - .toJSON(*store, PURE, HashFormat::SRI); \ + return OBJ.toJSON(*store, PURE, HashFormat::SRI); \ }, [](const auto & file) { \ return json::parse(readFile(file)); \ }, [](const auto & file, const auto & got) { \ @@ -73,7 +80,10 @@ static UnkeyedValidPathInfo makePathInfo(const Store & store, bool includeImpure }); \ } -JSON_TEST(pure, false) -JSON_TEST(impure, true) +JSON_TEST(empty_pure, makeEmpty(), false) +JSON_TEST(empty_impure, makeEmpty(), true) + +JSON_TEST(pure, makeFull(*store, false), false) +JSON_TEST(impure, makeFull(*store, true), true) } diff --git a/tests/unit/libstore/serve-protocol.cc b/tests/unit/libstore/serve-protocol.cc index b2fd0fb82..ebf0c52b0 100644 --- a/tests/unit/libstore/serve-protocol.cc +++ b/tests/unit/libstore/serve-protocol.cc @@ -6,6 +6,7 @@ #include "serve-protocol.hh" #include "serve-protocol-impl.hh" +#include "serve-protocol-connection.hh" #include "build-result.hh" #include "file-descriptor.hh" #include "tests/protocol.hh" @@ -505,7 +506,8 @@ TEST_F(ServeProtoTest, handshake_client_corrupted_throws) } else { auto ver = ServeProto::BasicClientConnection::handshake( nullSink, in, defaultVersion, "blah"); - EXPECT_NE(ver, defaultVersion); + // `std::min` of this and the other version saves us + EXPECT_EQ(ver, defaultVersion); } } }); diff --git a/tests/unit/libstore/store-reference.cc b/tests/unit/libstore/store-reference.cc new file mode 100644 index 000000000..16e033ec4 --- /dev/null +++ b/tests/unit/libstore/store-reference.cc @@ -0,0 +1,123 @@ +#include +#include + +#include "file-system.hh" +#include "store-reference.hh" + +#include "tests/characterization.hh" +#include "tests/libstore.hh" + +namespace nix { + +using nlohmann::json; + +class StoreReferenceTest : public CharacterizationTest, public LibStoreTest +{ + Path unitTestData = getUnitTestData() + "/store-reference"; + + Path goldenMaster(PathView testStem) const override + { + return unitTestData + "/" + testStem + ".txt"; + } +}; + +#define URI_TEST_READ(STEM, OBJ) \ + TEST_F(StoreReferenceTest, PathInfo_##STEM##_from_uri) \ + { \ + readTest(#STEM, ([&](const auto & encoded) { \ + StoreReference expected = OBJ; \ + auto got = StoreReference::parse(encoded); \ + ASSERT_EQ(got, expected); \ + })); \ + } + +#define URI_TEST_WRITE(STEM, OBJ) \ + TEST_F(StoreReferenceTest, PathInfo_##STEM##_to_uri) \ + { \ + writeTest( \ + #STEM, \ + [&]() -> StoreReference { return OBJ; }, \ + [](const auto & file) { return StoreReference::parse(readFile(file)); }, \ + [](const auto & file, const auto & got) { return writeFile(file, got.render()); }); \ + } + +#define URI_TEST(STEM, OBJ) \ + URI_TEST_READ(STEM, OBJ) \ + URI_TEST_WRITE(STEM, OBJ) + +URI_TEST( + auto, + (StoreReference{ + .variant = StoreReference::Auto{}, + .params = {}, + })) + +URI_TEST( + auto_param, + (StoreReference{ + .variant = StoreReference::Auto{}, + .params = + { + {"root", "/foo/bar/baz"}, + }, + })) + +static StoreReference localExample_1{ + .variant = + StoreReference::Specified{ + .scheme = "local", + }, + .params = + { + {"root", "/foo/bar/baz"}, + }, +}; + +static StoreReference localExample_2{ + .variant = + StoreReference::Specified{ + .scheme = "local", + .authority = "/foo/bar/baz", + }, + .params = + { + {"trusted", "true"}, + }, +}; + +URI_TEST(local_1, localExample_1) + +URI_TEST(local_2, localExample_2) + +URI_TEST_READ(local_shorthand_1, localExample_1) + +URI_TEST_READ(local_shorthand_2, localExample_2) + +static StoreReference unixExample{ + .variant = + StoreReference::Specified{ + .scheme = "unix", + }, + .params = + { + {"max-connections", "7"}, + {"trusted", "true"}, + }, +}; + +URI_TEST(unix, unixExample) + +URI_TEST_READ(unix_shorthand, unixExample) + +URI_TEST( + ssh, + (StoreReference{ + .variant = + StoreReference::Specified{ + .scheme = "ssh", + .authority = "localhost", + }, + .params = {}, + })) + +} diff --git a/tests/unit/libstore/worker-protocol.cc b/tests/unit/libstore/worker-protocol.cc index 2b2e559a9..5907ea5a4 100644 --- a/tests/unit/libstore/worker-protocol.cc +++ b/tests/unit/libstore/worker-protocol.cc @@ -4,6 +4,7 @@ #include #include "worker-protocol.hh" +#include "worker-protocol-connection.hh" #include "worker-protocol-impl.hh" #include "derived-path.hh" #include "build-result.hh" @@ -18,9 +19,9 @@ struct WorkerProtoTest : VersionedProtoTest { /** * For serializers that don't care about the minimum version, we - * used the oldest one: 1.0. + * used the oldest one: 1.10. */ - WorkerProto::Version defaultVersion = 1 << 8 | 0; + WorkerProto::Version defaultVersion = 1 << 8 | 10; }; @@ -529,6 +530,17 @@ VERSIONED_CHARACTERIZATION_TEST( }), })) +VERSIONED_CHARACTERIZATION_TEST( + WorkerProtoTest, + buildMode, + "build-mode", + defaultVersion, + (std::tuple { + bmNormal, + bmRepair, + bmCheck, + })) + VERSIONED_CHARACTERIZATION_TEST( WorkerProtoTest, optionalTrustedFlag, @@ -591,4 +603,152 @@ VERSIONED_CHARACTERIZATION_TEST( }, })) +VERSIONED_CHARACTERIZATION_TEST( + WorkerProtoTest, + clientHandshakeInfo_1_30, + "client-handshake-info_1_30", + 1 << 8 | 30, + (std::tuple { + {}, + })) + +VERSIONED_CHARACTERIZATION_TEST( + WorkerProtoTest, + clientHandshakeInfo_1_33, + "client-handshake-info_1_33", + 1 << 8 | 33, + (std::tuple { + { + .daemonNixVersion = std::optional { "foo" }, + }, + { + .daemonNixVersion = std::optional { "bar" }, + }, + })) + +VERSIONED_CHARACTERIZATION_TEST( + WorkerProtoTest, + clientHandshakeInfo_1_35, + "client-handshake-info_1_35", + 1 << 8 | 35, + (std::tuple { + { + .daemonNixVersion = std::optional { "foo" }, + .remoteTrustsUs = std::optional { NotTrusted }, + }, + { + .daemonNixVersion = std::optional { "bar" }, + .remoteTrustsUs = std::optional { Trusted }, + }, + })) + +TEST_F(WorkerProtoTest, handshake_log) +{ + CharacterizationTest::writeTest("handshake-to-client", [&]() -> std::string { + StringSink toClientLog; + + Pipe toClient, toServer; + toClient.create(); + toServer.create(); + + WorkerProto::Version clientResult; + + auto thread = std::thread([&]() { + FdSink out { toServer.writeSide.get() }; + FdSource in0 { toClient.readSide.get() }; + TeeSource in { in0, toClientLog }; + clientResult = WorkerProto::BasicClientConnection::handshake( + out, in, defaultVersion); + }); + + { + FdSink out { toClient.writeSide.get() }; + FdSource in { toServer.readSide.get() }; + WorkerProto::BasicServerConnection::handshake( + out, in, defaultVersion); + }; + + thread.join(); + + return std::move(toClientLog.s); + }); +} + +/// Has to be a `BufferedSink` for handshake. +struct NullBufferedSink : BufferedSink { + void writeUnbuffered(std::string_view data) override { } +}; + +TEST_F(WorkerProtoTest, handshake_client_replay) +{ + CharacterizationTest::readTest("handshake-to-client", [&](std::string toClientLog) { + NullBufferedSink nullSink; + + StringSource in { toClientLog }; + auto clientResult = WorkerProto::BasicClientConnection::handshake( + nullSink, in, defaultVersion); + + EXPECT_EQ(clientResult, defaultVersion); + }); +} + +TEST_F(WorkerProtoTest, handshake_client_truncated_replay_throws) +{ + CharacterizationTest::readTest("handshake-to-client", [&](std::string toClientLog) { + for (size_t len = 0; len < toClientLog.size(); ++len) { + NullBufferedSink nullSink; + StringSource in { + // truncate + toClientLog.substr(0, len) + }; + if (len < 8) { + EXPECT_THROW( + WorkerProto::BasicClientConnection::handshake( + nullSink, in, defaultVersion), + EndOfFile); + } else { + // Not sure why cannot keep on checking for `EndOfFile`. + EXPECT_THROW( + WorkerProto::BasicClientConnection::handshake( + nullSink, in, defaultVersion), + Error); + } + } + }); +} + +TEST_F(WorkerProtoTest, handshake_client_corrupted_throws) +{ + CharacterizationTest::readTest("handshake-to-client", [&](const std::string toClientLog) { + for (size_t idx = 0; idx < toClientLog.size(); ++idx) { + // corrupt a copy + std::string toClientLogCorrupt = toClientLog; + toClientLogCorrupt[idx] *= 4; + ++toClientLogCorrupt[idx]; + + NullBufferedSink nullSink; + StringSource in { toClientLogCorrupt }; + + if (idx < 4 || idx == 9) { + // magic bytes don't match + EXPECT_THROW( + WorkerProto::BasicClientConnection::handshake( + nullSink, in, defaultVersion), + Error); + } else if (idx < 8 || idx >= 12) { + // Number out of bounds + EXPECT_THROW( + WorkerProto::BasicClientConnection::handshake( + nullSink, in, defaultVersion), + SerialisationError); + } else { + auto ver = WorkerProto::BasicClientConnection::handshake( + nullSink, in, defaultVersion); + // `std::min` of this and the other version saves us + EXPECT_EQ(ver, defaultVersion); + } + } + }); +} + } diff --git a/tests/unit/libutil-support/tests/characterization.hh b/tests/unit/libutil-support/tests/characterization.hh index 9d6c850f0..c2f686dbf 100644 --- a/tests/unit/libutil-support/tests/characterization.hh +++ b/tests/unit/libutil-support/tests/characterization.hh @@ -12,7 +12,7 @@ namespace nix { * The path to the unit test data directory. See the contributing guide * in the manual for further details. */ -static Path getUnitTestData() { +static inline Path getUnitTestData() { return getEnv("_NIX_TEST_UNIT_DATA").value(); } @@ -21,7 +21,7 @@ static Path getUnitTestData() { * against them. See the contributing guide in the manual for further * details. */ -static bool testAccept() { +static inline bool testAccept() { return getEnv("_NIX_TEST_ACCEPT") == "1"; } diff --git a/tests/unit/libutil/json-utils.cc b/tests/unit/libutil/json-utils.cc index c9370a74b..704a4acb0 100644 --- a/tests/unit/libutil/json-utils.cc +++ b/tests/unit/libutil/json-utils.cc @@ -175,13 +175,16 @@ TEST(optionalValueAt, empty) { TEST(getNullable, null) { auto json = R"(null)"_json; - ASSERT_EQ(getNullable(json), std::nullopt); + ASSERT_EQ(getNullable(json), nullptr); } TEST(getNullable, empty) { auto json = R"({})"_json; - ASSERT_EQ(getNullable(json), std::optional { R"({})"_json }); + auto * p = getNullable(json); + + ASSERT_NE(p, nullptr); + ASSERT_EQ(*p, R"({})"_json); } } /* namespace nix */ diff --git a/tests/unit/libutil/local.mk b/tests/unit/libutil/local.mk index 39b4c0782..b9bddc24d 100644 --- a/tests/unit/libutil/local.mk +++ b/tests/unit/libutil/local.mk @@ -27,6 +27,11 @@ libutil-tests_LIBS = libutil-test-support libutil libutilc libutil-tests_LDFLAGS := -lrapidcheck $(GTEST_LIBS) +ifdef HOST_WINDOWS + # Increase the default reserved stack size to 65 MB so Nix doesn't run out of space + libutil-tests_LDFLAGS += -Wl,--stack,$(shell echo $$((65 * 1024 * 1024))) +endif + check: $(d)/data/git/check-data.sh.test $(eval $(call run-test,$(d)/data/git/check-data.sh))