diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index db69e51db..4488c7b7d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -23,6 +23,11 @@ Maintainers: tick if completed or explain if not relevant - unit tests - `src/*/tests` - integration tests - `tests/nixos/*` - [ ] documentation in the manual + - [ ] documentation in the internal API docs - [ ] code and comments are self-explanatory - [ ] commit message explains why the change was made - [ ] new feature or incompatible change: updated release notes + +# Priorities + +Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc). diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md deleted file mode 100644 index 5311be01f..000000000 --- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +++ /dev/null @@ -1,11 +0,0 @@ -**Release Notes** -Please include relevant [release notes](https://github.com/NixOS/nix/blob/master/doc/manual/src/release-notes/rl-next.md) as needed. - - -**Testing** - -If this issue is a regression or something that should block release, please consider including a test either in the [testsuite](https://github.com/NixOS/nix/tree/master/tests) or as a [hydraJob]( https://github.com/NixOS/nix/blob/master/flake.nix#L396) so that it can be part of the [automatic checks](https://hydra.nixos.org/jobset/nix/master). - -**Priorities** - -Add :+1: to [pull requests you find important](https://github.com/NixOS/nix/pulls?q=is%3Aopen+sort%3Areactions-%2B1-desc). diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 000000000..fce0d3aeb --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,23 @@ +"documentation": + - doc/manual/* + - src/nix/**/*.md + +"store": + - src/libstore/store-api.* + - src/libstore/*-store.* + +"fetching": + - src/libfetchers/**/* + +"repl": + - src/libcmd/repl.* + - src/nix/repl.* + +"new-cli": + - src/nix/**/* + +"tests": + # Unit tests + - src/*/tests/**/* + # Functional and integration tests + - tests/**/* diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 558cfa804..b04723b95 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -21,7 +21,7 @@ jobs: fetch-depth: 0 - name: Create backport PRs # should be kept in sync with `version` - uses: zeebe-io/backport-action@v1.1.0 + uses: zeebe-io/backport-action@v1.2.0 with: # Config README: https://github.com/zeebe-io/backport-action#backport-action github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 325579a5b..c06c77043 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v19 + - uses: cachix/install-nix-action@v20 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - uses: cachix/cachix-action@v12 if: needs.check_secrets.outputs.cachix == 'true' @@ -58,7 +58,9 @@ jobs: with: fetch-depth: 0 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/install-nix-action@v19 + - uses: cachix/install-nix-action@v20 + with: + install_url: https://releases.nixos.org/nix/nix-2.13.3/install - uses: cachix/cachix-action@v12 with: name: '${{ env.CACHIX_NAME }}' @@ -77,7 +79,7 @@ jobs: steps: - uses: actions/checkout@v3 - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - - uses: cachix/install-nix-action@v19 + - uses: cachix/install-nix-action@v20 with: install_url: '${{needs.installer.outputs.installerURL}}' install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve" @@ -89,6 +91,8 @@ jobs: - run: exec sh -c "nix-instantiate -E 'builtins.currentTime' --eval" - run: exec zsh -c "nix-instantiate -E 'builtins.currentTime' --eval" - run: exec fish -c "nix-instantiate -E 'builtins.currentTime' --eval" + - run: exec bash -c "nix-channel --add https://releases.nixos.org/nixos/unstable/nixos-23.05pre466020.60c1d71f2ba nixpkgs" + - run: exec bash -c "nix-channel --update && nix-env -iA nixpkgs.hello && hello" docker_push_image: needs: [check_secrets, tests] @@ -102,7 +106,9 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: cachix/install-nix-action@v19 + - uses: cachix/install-nix-action@v20 + with: + install_url: https://releases.nixos.org/nix/nix-2.13.3/install - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV - run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV - uses: cachix/cachix-action@v12 diff --git a/.github/workflows/labels.yml b/.github/workflows/labels.yml new file mode 100644 index 000000000..5f949ddc5 --- /dev/null +++ b/.github/workflows/labels.yml @@ -0,0 +1,24 @@ +name: "Label PR" + +on: + pull_request_target: + types: [edited, opened, synchronize, reopened] + +# WARNING: +# When extending this action, be aware that $GITHUB_TOKEN allows some write +# access to the GitHub API. This means that it should not evaluate user input in +# a way that allows code injection. + +permissions: + contents: read + pull-requests: write + +jobs: + labels: + runs-on: ubuntu-latest + if: github.repository_owner == 'NixOS' + steps: + - uses: actions/labeler@v4 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + sync-labels: true diff --git a/.gitignore b/.gitignore index e326966d6..8ceff4ef2 100644 --- a/.gitignore +++ b/.gitignore @@ -19,9 +19,12 @@ perl/Makefile.config /doc/manual/nix.json /doc/manual/conf-file.json /doc/manual/builtins.json +/doc/manual/xp-features.json /doc/manual/src/SUMMARY.md /doc/manual/src/command-ref/new-cli /doc/manual/src/command-ref/conf-file.md +/doc/manual/src/command-ref/experimental-features-shortlist.md +/doc/manual/src/contributing/experimental-feature-descriptions.md /doc/manual/src/language/builtins.md # /scripts/ diff --git a/.version b/.version index 575a07b9f..752490696 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.14.0 \ No newline at end of file +2.16.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..1b0ecaf36 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,61 @@ +# Contributing to Nix + +Welcome and thank you for your interest in contributing to Nix! +We appreciate your support. + +Reading and following these guidelines will help us make the contribution process easy and effective for everyone involved. + + +## Report a bug + +1. Check on the [GitHub issue tracker](https://github.com/NixOS/nix/issues) if your bug was already reported. + +2. If you were not able to find the bug or feature [open a new issue](https://github.com/NixOS/nix/issues/new/choose) + +3. The issue templates will guide you in specifying your issue. + The more complete the information you provide, the more likely it can be found by others and the more useful it is in the future. + Make sure reported bugs can be reproduced easily. + +4. Once submitted, do not expect issues to be picked up or solved right away. + The only way to ensure this, is to [work on the issue yourself](#making-changes-to-nix). + +## Report a security vulnerability + +Check out the [security policy](https://github.com/NixOS/nix/security/policy). + +## Making changes to Nix + +1. Check for [pull requests](https://github.com/NixOS/nix/pulls) that might already cover the contribution you are about to make. + There are many open pull requests that might already do what you intent to work on. + You can use [labels](https://github.com/NixOS/nix/labels) to filter for relevant topics. + +2. Search for related issues that cover what you're going to work on. It could help to mention there that you will work on the issue. + +3. Check the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html) for information on building Nix and running its tests. + + For contributions to the command line interface, please check the [CLI guidelines](https://nixos.org/manual/nix/unstable/contributing/cli-guideline.html). + +4. Make your changes! + +5. [Create a pull request](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request) for your changes. + * [Mark the pull request as draft](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request) if you're not done with the changes. + * Make sure to have [a clean history of commits on your branch by using rebase](https://www.digitalocean.com/community/tutorials/how-to-rebase-and-update-a-pull-request). + * Link related issues in your pull request to inform interested parties and future contributors about your change. + If your pull request closes one or multiple issues, note that in the description using `Closes: #`, as it will then happen automatically when your change is merged. + +6. Do not expect your pull request to be reviewed immediately. + Nix maintainers follow a [structured process for reviews and design decisions](https://github.com/NixOS/nix/tree/master/maintainers#project-board-protocol), which may or may not prioritise your work. + +7. If you need additional feedback or help to getting pull request into shape, ask other contributors using [@mentions](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax#mentioning-people-and-teams). + +## Making changes to the Nix manual + +The Nix reference manual is hosted on https://nixos.org/manual/nix. +The underlying source files are located in [`doc/manual/src`](./doc/manual/src). +For small changes you can [use GitHub to edit these files](https://docs.github.com/en/repositories/working-with-files/managing-files/editing-files) +For larger changes see the [Nix reference manual](https://nixos.org/manual/nix/unstable/contributing/hacking.html). + +## Getting help + +Whenever you're stuck or do not know how to proceed, you can always ask for help. +The appropriate channels to do so can be found on the [NixOS Community](https://nixos.org/community/) page. diff --git a/Makefile b/Makefile index 8675c9925..d6b49473a 100644 --- a/Makefile +++ b/Makefile @@ -2,13 +2,10 @@ makefiles = \ mk/precompiled-headers.mk \ local.mk \ src/libutil/local.mk \ - src/libutil/tests/local.mk \ src/libstore/local.mk \ - src/libstore/tests/local.mk \ src/libfetchers/local.mk \ src/libmain/local.mk \ src/libexpr/local.mk \ - src/libexpr/tests/local.mk \ src/libcmd/local.mk \ src/nix/local.mk \ src/resolve-system-dependencies/local.mk \ @@ -20,11 +17,22 @@ makefiles = \ misc/launchd/local.mk \ misc/upstart/local.mk \ doc/manual/local.mk \ - tests/local.mk \ - tests/plugins/local.mk + doc/internal-api/local.mk -include Makefile.config +ifeq ($(tests), yes) +makefiles += \ + src/libutil/tests/local.mk \ + src/libstore/tests/local.mk \ + src/libexpr/tests/local.mk \ + tests/local.mk \ + tests/plugins/local.mk +else +makefiles += \ + mk/disable-tests.mk +endif + OPTIMIZE = 1 ifeq ($(OPTIMIZE), 1) diff --git a/Makefile.config.in b/Makefile.config.in index 1c5405c6d..707cfe0e3 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -22,6 +22,7 @@ LOWDOWN_LIBS = @LOWDOWN_LIBS@ OPENSSL_LIBS = @OPENSSL_LIBS@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ +RAPIDCHECK_HEADERS = @RAPIDCHECK_HEADERS@ SHELL = @bash@ SODIUM_LIBS = @SODIUM_LIBS@ SQLITE3_LIBS = @SQLITE3_LIBS@ @@ -45,3 +46,5 @@ sandbox_shell = @sandbox_shell@ storedir = @storedir@ sysconfdir = @sysconfdir@ system = @system@ +tests = @tests@ +internal_api_docs = @internal_api_docs@ diff --git a/boehmgc-coroutine-sp-fallback.diff b/boehmgc-coroutine-sp-fallback.diff index 2826486fb..5066d8278 100644 --- a/boehmgc-coroutine-sp-fallback.diff +++ b/boehmgc-coroutine-sp-fallback.diff @@ -1,8 +1,8 @@ diff --git a/darwin_stop_world.c b/darwin_stop_world.c -index 3dbaa3fb..36a1d1f7 100644 +index 0468aaec..b348d869 100644 --- a/darwin_stop_world.c +++ b/darwin_stop_world.c -@@ -352,6 +352,7 @@ GC_INNER void GC_push_all_stacks(void) +@@ -356,6 +356,7 @@ GC_INNER void GC_push_all_stacks(void) int nthreads = 0; word total_size = 0; mach_msg_type_number_t listcount = (mach_msg_type_number_t)THREAD_TABLE_SZ; @@ -10,7 +10,7 @@ index 3dbaa3fb..36a1d1f7 100644 if (!EXPECT(GC_thr_initialized, TRUE)) GC_thr_init(); -@@ -407,6 +408,19 @@ GC_INNER void GC_push_all_stacks(void) +@@ -411,6 +412,19 @@ GC_INNER void GC_push_all_stacks(void) GC_push_all_stack_sections(lo, hi, p->traced_stack_sect); } if (altstack_lo) { @@ -30,6 +30,22 @@ index 3dbaa3fb..36a1d1f7 100644 total_size += altstack_hi - altstack_lo; GC_push_all_stack(altstack_lo, altstack_hi); } +diff --git a/include/gc.h b/include/gc.h +index edab6c22..f2c61282 100644 +--- a/include/gc.h ++++ b/include/gc.h +@@ -2172,6 +2172,11 @@ GC_API void GC_CALL GC_win32_free_heap(void); + (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page) + #endif /* _AMIGA && !GC_AMIGA_MAKINGLIB */ + ++#if !__APPLE__ ++/* Patch doesn't work on apple */ ++#define NIX_BOEHM_PATCH_VERSION 1 ++#endif ++ + #ifdef __cplusplus + } /* extern "C" */ + #endif diff --git a/pthread_stop_world.c b/pthread_stop_world.c index b5d71e62..aed7b0bf 100644 --- a/pthread_stop_world.c diff --git a/configure.ac b/configure.ac index 09b3651b9..bb3f92e4d 100644 --- a/configure.ac +++ b/configure.ac @@ -145,6 +145,18 @@ if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then LDFLAGS="-latomic $LDFLAGS" fi +# Building without tests is useful for bootstrapping with a smaller footprint +# or running the tests in a separate derivation. Otherwise, we do compile and +# run them. +AC_ARG_ENABLE(tests, AS_HELP_STRING([--disable-tests],[Do not build the tests]), + tests=$enableval, tests=yes) +AC_SUBST(tests) + +# Building without API docs is the default as Nix' C++ interfaces are internal and unstable. +AC_ARG_ENABLE(internal_api_docs, AS_HELP_STRING([--enable-internal-api-docs],[Build API docs for Nix's internal unstable C++ interfaces]), + internal_api_docs=$enableval, internal_api_docs=no) +AC_SUBST(internal_api_docs) + # LTO is currently broken with clang for unknown reasons; ld segfaults in the llvm plugin AC_ARG_ENABLE(lto, AS_HELP_STRING([--enable-lto],[Enable LTO (only supported with GCC) [default=no]]), lto=$enableval, lto=no) @@ -172,7 +184,7 @@ fi # Look for OpenSSL, a required dependency. FIXME: this is only (maybe) # used by S3BinaryCacheStore. -PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]) +PKG_CHECK_MODULES([OPENSSL], [libcrypto >= 1.1.1], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"]) # Look for libarchive. @@ -270,18 +282,34 @@ if test "$gc" = yes; then fi +if test "$tests" = yes; then + # Look for gtest. PKG_CHECK_MODULES([GTEST], [gtest_main]) # Look for rapidcheck. +AC_ARG_VAR([RAPIDCHECK_HEADERS], [include path of gtest headers shipped by RAPIDCHECK]) # No pkg-config yet, https://github.com/emil-e/rapidcheck/issues/302 AC_LANG_PUSH(C++) +AC_SUBST(RAPIDCHECK_HEADERS) +[CXXFLAGS="-I $RAPIDCHECK_HEADERS $CXXFLAGS"] +[LIBS="-lrapidcheck -lgtest $LIBS"] AC_CHECK_HEADERS([rapidcheck/gtest.h], [], [], [#include ]) -dnl No good for C++ libs with mangled symbols -dnl AC_CHECK_LIB([rapidcheck], []) +dnl AC_CHECK_LIB doesn't work for C++ libs with mangled symbols +AC_LINK_IFELSE([ + AC_LANG_PROGRAM([[ + #include + #include + ]], [[ + return RUN_ALL_TESTS(); + ]]) + ], + [], + [AC_MSG_ERROR([librapidcheck is not found.])]) AC_LANG_POP(C++) +fi # Look for nlohmann/json. PKG_CHECK_MODULES([NLOHMANN_JSON], [nlohmann_json >= 3.9]) diff --git a/default.nix b/default.nix index 00ec5b617..2cccff28d 100644 --- a/default.nix +++ b/default.nix @@ -1,3 +1,10 @@ -(import (fetchTarball "https://github.com/edolstra/flake-compat/archive/master.tar.gz") { - src = ./.; -}).defaultNix +(import + ( + let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in + fetchTarball { + url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz"; + sha256 = lock.nodes.flake-compat.locked.narHash; + } + ) + { src = ./.; } +).defaultNix diff --git a/doc/internal-api/.gitignore b/doc/internal-api/.gitignore new file mode 100644 index 000000000..dab28b6b0 --- /dev/null +++ b/doc/internal-api/.gitignore @@ -0,0 +1,3 @@ +/doxygen.cfg +/html +/latex diff --git a/doc/internal-api/doxygen.cfg.in b/doc/internal-api/doxygen.cfg.in new file mode 100644 index 000000000..8f526536d --- /dev/null +++ b/doc/internal-api/doxygen.cfg.in @@ -0,0 +1,63 @@ +# Doxyfile 1.9.5 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "Nix" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @PACKAGE_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "Nix, the purely functional package manager; unstable internal interfaces" + +# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. +# The default value is: YES. + +GENERATE_LATEX = NO + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +# FIXME Make this list more maintainable somehow. We could maybe generate this +# in the Makefile, but we would need to change how `.in` files are preprocessed +# so they can expand variables despite configure variables. + +INPUT = \ + src/libcmd \ + src/libexpr \ + src/libexpr/flake \ + src/libexpr/tests \ + src/libexpr/tests/value \ + src/libexpr/value \ + src/libfetchers \ + src/libmain \ + src/libstore \ + src/libstore/build \ + src/libstore/builtins \ + src/libstore/tests \ + src/libutil \ + src/libutil/tests \ + src/nix \ + src/nix-env \ + src/nix-store + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by the +# preprocessor. Note that the INCLUDE_PATH is not recursive, so the setting of +# RECURSIVE has no effect here. +# This tag requires that the tag SEARCH_INCLUDES is set to YES. + +INCLUDE_PATH = @RAPIDCHECK_HEADERS@ diff --git a/doc/internal-api/local.mk b/doc/internal-api/local.mk new file mode 100644 index 000000000..890f341b7 --- /dev/null +++ b/doc/internal-api/local.mk @@ -0,0 +1,19 @@ +.PHONY: internal-api-html + +ifeq ($(internal_api_docs), yes) + +$(docdir)/internal-api/html/index.html $(docdir)/internal-api/latex: $(d)/doxygen.cfg + mkdir -p $(docdir)/internal-api + { cat $< ; echo "OUTPUT_DIRECTORY=$(docdir)/internal-api" ; } | doxygen - + +# Generate the HTML API docs for Nix's unstable internal interfaces. +internal-api-html: $(docdir)/internal-api/html/index.html + +else + +# Make a nicer error message +internal-api-html: + @echo "Internal API docs are disabled. Configure with '--enable-internal-api-docs', or avoid calling 'make internal-api-html'." + @exit 1 + +endif diff --git a/doc/manual/generate-builtins.nix b/doc/manual/generate-builtins.nix index 115bb3f94..71f96153f 100644 --- a/doc/manual/generate-builtins.nix +++ b/doc/manual/generate-builtins.nix @@ -1,8 +1,12 @@ -builtinsDump: +let + inherit (builtins) concatStringsSep attrNames; +in + +builtinsInfo: let showBuiltin = name: let - inherit (builtinsDump.${name}) doc args; + inherit (builtinsInfo.${name}) doc args; in ''
@@ -14,7 +18,7 @@ let ''; - listArgs = args: builtins.concatStringsSep " " (map (s: "${s}") args); + listArgs = args: concatStringsSep " " (map (s: "${s}") args); in -with builtins; concatStringsSep "\n" (map showBuiltin (attrNames builtinsDump)) +concatStringsSep "\n" (map showBuiltin (attrNames builtinsInfo)) diff --git a/doc/manual/generate-manpage.nix b/doc/manual/generate-manpage.nix index 8c7c4d358..fb34898f3 100644 --- a/doc/manual/generate-manpage.nix +++ b/doc/manual/generate-manpage.nix @@ -1,15 +1,24 @@ -{ toplevel }: +let + inherit (builtins) + attrNames attrValues fromJSON listToAttrs mapAttrs + concatStringsSep concatMap length lessThan replaceStrings sort; + inherit (import ./utils.nix) concatStrings optionalString filterAttrs trim squash unique showSettings; +in -with builtins; -with import ./utils.nix; +commandDump: let + commandInfo = fromJSON commandDump; + showCommand = { command, details, filename, toplevel }: let + result = '' > **Warning** \ - > This program is **experimental** and its interface is subject to change. + > This program is + > [**experimental**](@docroot@/contributing/experimental-features.md#xp-feature-nix-command) + > and its interface is subject to change. # Name @@ -25,59 +34,75 @@ let ${maybeOptions} ''; + showSynopsis = command: args: let - showArgument = arg: "*${arg.label}*" + (if arg ? arity then "" else "..."); + showArgument = arg: "*${arg.label}*" + optionalString (! arg ? arity) "..."; arguments = concatStringsSep " " (map showArgument args); in '' `${command}` [*option*...] ${arguments} ''; - maybeSubcommands = if details ? commands && details.commands != {} - then '' + + maybeSubcommands = optionalString (details ? commands && details.commands != {}) + '' where *subcommand* is one of the following: ${subcommands} - '' - else ""; + ''; + subcommands = if length categories > 1 then listCategories else listSubcommands details.commands; + categories = sort (x: y: x.id < y.id) (unique (map (cmd: cmd.category) (attrValues details.commands))); + listCategories = concatStrings (map showCategory categories); + showCategory = cat: '' **${toString cat.description}:** ${listSubcommands (filterAttrs (n: v: v.category == cat) details.commands)} ''; + listSubcommands = cmds: concatStrings (attrValues (mapAttrs showSubcommand cmds)); + showSubcommand = name: subcmd: '' * [`${command} ${name}`](./${appendName filename name}.md) - ${subcmd.description} ''; - maybeDocumentation = if details ? doc then details.doc else ""; - maybeOptions = if details.flags == {} then "" else '' + + maybeDocumentation = optionalString + (details ? doc) + (replaceStrings ["@stores@"] [storeDocs] details.doc); + + maybeOptions = optionalString (details.flags != {}) '' # Options ${showOptions details.flags toplevel.flags} ''; + showOptions = options: commonOptions: let allOptions = options // commonOptions; showCategory = cat: '' - ${if cat != "" then "**${cat}:**" else ""} + ${optionalString (cat != "") "**${cat}:**"} ${listOptions (filterAttrs (n: v: v.category == cat) allOptions)} ''; listOptions = opts: concatStringsSep "\n" (attrValues (mapAttrs showOption opts)); showOption = name: option: let - shortName = if option ? shortName then "/ `-${option.shortName}`" else ""; - labels = if option ? labels then (concatStringsSep " " (map (s: "*${s}*") option.labels)) else ""; + shortName = optionalString + (option ? shortName) + ("/ `-${option.shortName}`"); + labels = optionalString + (option ? labels) + (concatStringsSep " " (map (s: "*${s}*") option.labels)); in trim '' - `--${name}` ${shortName} ${labels} ${option.description} ''; - categories = sort builtins.lessThan (unique (map (cmd: cmd.category) (attrValues allOptions))); + categories = sort lessThan (unique (map (cmd: cmd.category) (attrValues allOptions))); in concatStrings (map showCategory categories); in squash result; @@ -98,13 +123,11 @@ let }; in [ cmd ] ++ concatMap subcommand (attrNames details.commands or {}); - parsedToplevel = builtins.fromJSON toplevel; - manpages = processCommand { command = "nix"; - details = parsedToplevel; + details = commandInfo.args; filename = "nix"; - toplevel = parsedToplevel; + toplevel = commandInfo.args; }; tableOfContents = let @@ -112,4 +135,18 @@ let " - [${page.command}](command-ref/new-cli/${page.name})"; in concatStringsSep "\n" (map showEntry manpages) + "\n"; + storeDocs = + let + showStore = name: { settings, doc }: + '' + ## ${name} + + ${doc} + + **Settings**: + + ${showSettings { useAnchors = false; } settings} + ''; + in concatStrings (attrValues (mapAttrs showStore commandInfo.stores)); + in (listToAttrs manpages) // { "SUMMARY.md" = tableOfContents; } diff --git a/doc/manual/generate-options.nix b/doc/manual/generate-options.nix deleted file mode 100644 index a4ec36477..000000000 --- a/doc/manual/generate-options.nix +++ /dev/null @@ -1,41 +0,0 @@ -let - inherit (builtins) attrNames concatStringsSep isAttrs isBool; - inherit (import ./utils.nix) concatStrings squash splitLines; -in - -optionsInfo: -let - showOption = name: - let - inherit (optionsInfo.${name}) description documentDefault defaultValue aliases; - result = squash '' - - [`${name}`](#conf-${name}) - - ${indent " " body} - ''; - # separate body to cleanly handle indentation - body = '' - ${description} - - **Default:** ${showDefault documentDefault defaultValue} - - ${showAliases aliases} - ''; - showDefault = documentDefault: defaultValue: - if documentDefault then - # a StringMap value type is specified as a string, but - # this shows the value type. The empty stringmap is `null` in - # JSON, but that converts to `{ }` here. - if defaultValue == "" || defaultValue == [] || isAttrs defaultValue - then "*empty*" - else if isBool defaultValue then - if defaultValue then "`true`" else "`false`" - else "`${toString defaultValue}`" - else "*machine-specific*"; - showAliases = aliases: - if aliases == [] then "" else - "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}"; - indent = prefix: s: - concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s)); - in result; -in concatStrings (map showOption (attrNames optionsInfo)) diff --git a/doc/manual/generate-xp-features-shortlist.nix b/doc/manual/generate-xp-features-shortlist.nix new file mode 100644 index 000000000..30e211c96 --- /dev/null +++ b/doc/manual/generate-xp-features-shortlist.nix @@ -0,0 +1,9 @@ +with builtins; +with import ./utils.nix; + +let + showExperimentalFeature = name: doc: + '' + - [`${name}`](@docroot@/contributing/experimental-features.md#xp-feature-${name}) + ''; +in xps: indent " " (concatStrings (attrValues (mapAttrs showExperimentalFeature xps))) diff --git a/doc/manual/generate-xp-features.nix b/doc/manual/generate-xp-features.nix new file mode 100644 index 000000000..adb94355c --- /dev/null +++ b/doc/manual/generate-xp-features.nix @@ -0,0 +1,11 @@ +with builtins; +with import ./utils.nix; + +let + showExperimentalFeature = name: doc: + squash '' + ## [`${name}`]{#xp-feature-${name}} + + ${doc} + ''; +in xps: (concatStringsSep "\n" (attrValues (mapAttrs showExperimentalFeature xps))) diff --git a/doc/manual/local.mk b/doc/manual/local.mk index f43510b6d..63e7e61e4 100644 --- a/doc/manual/local.mk +++ b/doc/manual/local.mk @@ -1,17 +1,24 @@ ifeq ($(doc_generate),yes) MANUAL_SRCS := \ - $(call rwildcard, $(d)/src, *.md) \ - $(call rwildcard, $(d)/src, */*.md) + $(call rwildcard, $(d)/src, *.md) \ + $(call rwildcard, $(d)/src, */*.md) -# Generate man pages. man-pages := $(foreach n, \ - nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \ - nix-collect-garbage.1 \ - nix-prefetch-url.1 nix-channel.1 \ - nix-hash.1 nix-copy-closure.1 \ - nix.conf.5 nix-daemon.8, \ - $(d)/$(n)) + nix-env.1 nix-store.1 \ + nix-build.1 nix-shell.1 nix-instantiate.1 \ + nix-collect-garbage.1 \ + nix-prefetch-url.1 nix-channel.1 \ + nix-hash.1 nix-copy-closure.1 \ + nix.conf.5 nix-daemon.8 \ +, $(d)/$(n)) + +# man pages for subcommands +# convert from `$(d)/src/command-ref/nix-{1}/{2}.md` to `$(d)/nix-{1}-{2}.1` +# FIXME: unify with how nix3-cli man pages are generated +man-pages += $(foreach subcommand, \ + $(filter-out %opt-common.md %env-common.md, $(wildcard $(d)/src/command-ref/nix-*/*.md)), \ + $(d)/$(subst /,-,$(subst $(d)/src/command-ref/,,$(subst .md,.1,$(subcommand))))) clean-files += $(d)/*.1 $(d)/*.5 $(d)/*.8 @@ -26,9 +33,42 @@ dummy-env = env -i \ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -I nix/corepkgs=corepkgs --store dummy:// --impure --raw +# re-implement mdBook's include directive to make it usable for terminal output and for proper @docroot@ substitution +define process-includes + while read -r line; do \ + set -euo pipefail; \ + filename="$$(dirname $(1))/$$(sed 's/{{#include \(.*\)}}/\1/'<<< $$line)"; \ + test -f "$$filename" || ( echo "#include-d file '$$filename' does not exist." >&2; exit 1; ); \ + matchline="$$(sed 's|/|\\/|g' <<< $$line)"; \ + sed -i "/$$matchline/r $$filename" $(2); \ + sed -i "s/$$matchline//" $(2); \ + done < <(grep '{{#include' $(1)) +endef + +$(d)/nix-env-%.1: $(d)/src/command-ref/nix-env/%.md + @printf "Title: %s\n\n" "$(subst nix-env-,nix-env --,$$(basename "$@" .1))" > $^.tmp + $(render-subcommand) + +$(d)/nix-store-%.1: $(d)/src/command-ref/nix-store/%.md + @printf -- 'Title: %s\n\n' "$(subst nix-store-,nix-store --,$$(basename "$@" .1))" > $^.tmp + $(render-subcommand) + +# FIXME: there surely is some more deduplication to be achieved here with even darker Make magic +define render-subcommand + @cat $^ >> $^.tmp + @$(call process-includes,$^,$^.tmp) + $(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@ + @# fix up `lowdown`'s automatic escaping of `--` + @# https://github.com/kristapsdz/lowdown/blob/edca6ce6d5336efb147321a43c47a698de41bb7c/entity.c#L202 + @sed -i 's/\e\[u2013\]/--/' $@ + @rm $^.tmp +endef + + $(d)/%.1: $(d)/src/command-ref/%.md @printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp @cat $^ >> $^.tmp + @$(call process-includes,$^,$^.tmp) $(trace-gen) lowdown -sT man --nroff-nolinks -M section=1 $^.tmp -o $@ @rm $^.tmp @@ -41,40 +81,49 @@ $(d)/%.8: $(d)/src/command-ref/%.md $(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md @printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp @cat $^ >> $^.tmp + @$(call process-includes,$^,$^.tmp) $(trace-gen) lowdown -sT man --nroff-nolinks -M section=5 $^.tmp -o $@ @rm $^.tmp -$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli - $(trace-gen) cat doc/manual/src/SUMMARY.md.in | while IFS= read line; do if [[ $$line = @manpages@ ]]; then cat doc/manual/src/command-ref/new-cli/SUMMARY.md; else echo "$$line"; fi; done > $@.tmp +$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli $(d)/src/contributing/experimental-feature-descriptions.md + @cp $< $@ + @$(call process-includes,$@,$@) + +$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/utils.nix $(d)/generate-manpage.nix $(bindir)/nix + @rm -rf $@ $@.tmp + $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix (builtins.readFile $<)' @mv $@.tmp $@ -$(d)/src/command-ref/new-cli: $(d)/nix.json $(d)/generate-manpage.nix $(bindir)/nix - @rm -rf $@ - $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-manpage.nix { toplevel = builtins.readFile $<; }' - @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable - $(trace-gen) sed -i $@.tmp/*.md -e 's^@docroot@^../..^g' - @mv $@.tmp $@ - -$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/generate-options.nix $(d)/src/command-ref/conf-file-prefix.md $(bindir)/nix +$(d)/src/command-ref/conf-file.md: $(d)/conf-file.json $(d)/utils.nix $(d)/src/command-ref/conf-file-prefix.md $(d)/src/command-ref/experimental-features-shortlist.md $(bindir)/nix @cat doc/manual/src/command-ref/conf-file-prefix.md > $@.tmp - @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable - $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-options.nix (builtins.fromJSON (builtins.readFile $<))' \ - | sed -e 's^@docroot@^..^g'>> $@.tmp + $(trace-gen) $(nix-eval) --expr '(import doc/manual/utils.nix).showSettings { useAnchors = true; } (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp; @mv $@.tmp $@ $(d)/nix.json: $(bindir)/nix - $(trace-gen) $(dummy-env) $(bindir)/nix __dump-args > $@.tmp + $(trace-gen) $(dummy-env) $(bindir)/nix __dump-cli > $@.tmp @mv $@.tmp $@ $(d)/conf-file.json: $(bindir)/nix $(trace-gen) $(dummy-env) $(bindir)/nix show-config --json --experimental-features nix-command > $@.tmp @mv $@.tmp $@ +$(d)/src/contributing/experimental-feature-descriptions.md: $(d)/xp-features.json $(d)/utils.nix $(d)/generate-xp-features.nix $(bindir)/nix + @rm -rf $@ $@.tmp + $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-xp-features.nix (builtins.fromJSON (builtins.readFile $<))' + @mv $@.tmp $@ + +$(d)/src/command-ref/experimental-features-shortlist.md: $(d)/xp-features.json $(d)/utils.nix $(d)/generate-xp-features-shortlist.nix $(bindir)/nix + @rm -rf $@ $@.tmp + $(trace-gen) $(nix-eval) --write-to $@.tmp --expr 'import doc/manual/generate-xp-features-shortlist.nix (builtins.fromJSON (builtins.readFile $<))' + @mv $@.tmp $@ + +$(d)/xp-features.json: $(bindir)/nix + $(trace-gen) $(dummy-env) NIX_PATH=nix/corepkgs=corepkgs $(bindir)/nix __dump-xp-features > $@.tmp + @mv $@.tmp $@ + $(d)/src/language/builtins.md: $(d)/builtins.json $(d)/generate-builtins.nix $(d)/src/language/builtins-prefix.md $(bindir)/nix @cat doc/manual/src/language/builtins-prefix.md > $@.tmp - @# @docroot@: https://nixos.org/manual/nix/unstable/contributing/hacking.html#docroot-variable - $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' \ - | sed -e 's^@docroot@^..^g' >> $@.tmp + $(trace-gen) $(nix-eval) --expr 'import doc/manual/generate-builtins.nix (builtins.fromJSON (builtins.readFile $<))' >> $@.tmp; @cat doc/manual/src/language/builtins-suffix.md >> $@.tmp @mv $@.tmp $@ @@ -83,7 +132,8 @@ $(d)/builtins.json: $(bindir)/nix @mv $@.tmp $@ # Generate the HTML manual. -html: $(docdir)/manual/index.html +.PHONY: manual-html +manual-html: $(docdir)/manual/index.html install: $(docdir)/manual/index.html # Generate 'nix' manpages. @@ -91,6 +141,8 @@ install: $(mandir)/man1/nix3-manpages man: doc/manual/generated/man1/nix3-manpages all: doc/manual/generated/man1/nix3-manpages +# FIXME: unify with how the other man pages are generated. +# this one works differently and does not use any of the amenities provided by `/mk/lib.mk`. $(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages @mkdir -p $(DESTDIR)$$(dirname $@) $(trace-install) install -m 0644 $$(dirname $<)/* $(DESTDIR)$$(dirname $@) @@ -98,21 +150,31 @@ $(mandir)/man1/nix3-manpages: doc/manual/generated/man1/nix3-manpages doc/manual/generated/man1/nix3-manpages: $(d)/src/command-ref/new-cli @mkdir -p $(DESTDIR)$$(dirname $@) $(trace-gen) for i in doc/manual/src/command-ref/new-cli/*.md; do \ - name=$$(basename $$i .md); \ - tmpFile=$$(mktemp); \ - if [[ $$name = SUMMARY ]]; then continue; fi; \ - printf "Title: %s\n\n" "$$name" > $$tmpFile; \ - cat $$i >> $$tmpFile; \ - lowdown -sT man --nroff-nolinks -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \ - rm $$tmpFile; \ + name=$$(basename $$i .md); \ + tmpFile=$$(mktemp); \ + if [[ $$name = SUMMARY ]]; then continue; fi; \ + printf "Title: %s\n\n" "$$name" > $$tmpFile; \ + cat $$i >> $$tmpFile; \ + lowdown -sT man --nroff-nolinks -M section=1 $$tmpFile -o $(DESTDIR)$$(dirname $@)/$$name.1; \ + rm $$tmpFile; \ done @touch $@ -$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/anchors.jq $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/language/builtins.md +$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/anchors.jq $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/contributing/experimental-feature-descriptions.md $(d)/src/command-ref/conf-file.md $(d)/src/language/builtins.md $(trace-gen) \ - set -euo pipefail; \ - RUST_LOG=warn mdbook build doc/manual -d $(DESTDIR)$(docdir)/manual.tmp 2>&1 \ - | { grep -Fv "because fragment resolution isn't implemented" || :; } + tmp="$$(mktemp -d)"; \ + cp -r doc/manual "$$tmp"; \ + find "$$tmp" -name '*.md' | while read -r file; do \ + $(call process-includes,$$file,$$file); \ + done; \ + find "$$tmp" -name '*.md' | while read -r file; do \ + docroot="$$(realpath --relative-to="$$(dirname "$$file")" $$tmp/manual/src)"; \ + sed -i "s,@docroot@,$$docroot,g" "$$file"; \ + done; \ + set -euo pipefail; \ + RUST_LOG=warn mdbook build "$$tmp/manual" -d $(DESTDIR)$(docdir)/manual.tmp 2>&1 \ + | { grep -Fv "because fragment resolution isn't implemented" || :; }; \ + rm -rf "$$tmp/manual" @rm -rf $(DESTDIR)$(docdir)/manual @mv $(DESTDIR)$(docdir)/manual.tmp/html $(DESTDIR)$(docdir)/manual @rm -rf $(DESTDIR)$(docdir)/manual.tmp diff --git a/doc/manual/redirects.js b/doc/manual/redirects.js index 69f75d3a0..5cd6fdea2 100644 --- a/doc/manual/redirects.js +++ b/doc/manual/redirects.js @@ -338,6 +338,9 @@ const redirects = { "strings": "#string", "lists": "#list", "attribute-sets": "#attribute-set" + }, + "installation/installing-binary.html": { + "uninstalling": "uninstall.html" } }; diff --git a/doc/manual/src/SUMMARY.md.in b/doc/manual/src/SUMMARY.md.in index b1c551969..1d5613354 100644 --- a/doc/manual/src/SUMMARY.md.in +++ b/doc/manual/src/SUMMARY.md.in @@ -15,6 +15,7 @@ - [Multi-User Mode](installation/multi-user.md) - [Environment Variables](installation/env-variables.md) - [Upgrading Nix](installation/upgrading.md) + - [Uninstalling Nix](installation/uninstall.md) - [Package Management](package-management/package-management.md) - [Basic Package Management](package-management/basic-package-mgmt.md) - [Profiles](package-management/profiles.md) @@ -44,10 +45,41 @@ - [Common Options](command-ref/opt-common.md) - [Common Environment Variables](command-ref/env-common.md) - [Main Commands](command-ref/main-commands.md) - - [nix-env](command-ref/nix-env.md) - [nix-build](command-ref/nix-build.md) - [nix-shell](command-ref/nix-shell.md) - [nix-store](command-ref/nix-store.md) + - [nix-store --add-fixed](command-ref/nix-store/add-fixed.md) + - [nix-store --add](command-ref/nix-store/add.md) + - [nix-store --delete](command-ref/nix-store/delete.md) + - [nix-store --dump-db](command-ref/nix-store/dump-db.md) + - [nix-store --dump](command-ref/nix-store/dump.md) + - [nix-store --export](command-ref/nix-store/export.md) + - [nix-store --gc](command-ref/nix-store/gc.md) + - [nix-store --generate-binary-cache-key](command-ref/nix-store/generate-binary-cache-key.md) + - [nix-store --import](command-ref/nix-store/import.md) + - [nix-store --load-db](command-ref/nix-store/load-db.md) + - [nix-store --optimise](command-ref/nix-store/optimise.md) + - [nix-store --print-env](command-ref/nix-store/print-env.md) + - [nix-store --query](command-ref/nix-store/query.md) + - [nix-store --read-log](command-ref/nix-store/read-log.md) + - [nix-store --realise](command-ref/nix-store/realise.md) + - [nix-store --repair-path](command-ref/nix-store/repair-path.md) + - [nix-store --restore](command-ref/nix-store/restore.md) + - [nix-store --serve](command-ref/nix-store/serve.md) + - [nix-store --verify-path](command-ref/nix-store/verify-path.md) + - [nix-store --verify](command-ref/nix-store/verify.md) + - [nix-env](command-ref/nix-env.md) + - [nix-env --delete-generations](command-ref/nix-env/delete-generations.md) + - [nix-env --install](command-ref/nix-env/install.md) + - [nix-env --list-generations](command-ref/nix-env/list-generations.md) + - [nix-env --query](command-ref/nix-env/query.md) + - [nix-env --rollback](command-ref/nix-env/rollback.md) + - [nix-env --set-flag](command-ref/nix-env/set-flag.md) + - [nix-env --set](command-ref/nix-env/set.md) + - [nix-env --switch-generation](command-ref/nix-env/switch-generation.md) + - [nix-env --switch-profile](command-ref/nix-env/switch-profile.md) + - [nix-env --uninstall](command-ref/nix-env/uninstall.md) + - [nix-env --upgrade](command-ref/nix-env/upgrade.md) - [Utilities](command-ref/utilities.md) - [nix-channel](command-ref/nix-channel.md) - [nix-collect-garbage](command-ref/nix-collect-garbage.md) @@ -57,16 +89,19 @@ - [nix-instantiate](command-ref/nix-instantiate.md) - [nix-prefetch-url](command-ref/nix-prefetch-url.md) - [Experimental Commands](command-ref/experimental-commands.md) -@manpages@ +{{#include ./command-ref/new-cli/SUMMARY.md}} - [Files](command-ref/files.md) - [nix.conf](command-ref/conf-file.md) - [Architecture](architecture/architecture.md) - [Glossary](glossary.md) - [Contributing](contributing/contributing.md) - [Hacking](contributing/hacking.md) + - [Experimental Features](contributing/experimental-features.md) - [CLI guideline](contributing/cli-guideline.md) - [Release Notes](release-notes/release-notes.md) - [Release X.Y (202?-??-??)](release-notes/rl-next.md) + - [Release 2.15 (2023-04-11)](release-notes/rl-2.15.md) + - [Release 2.14 (2023-02-28)](release-notes/rl-2.14.md) - [Release 2.13 (2023-01-17)](release-notes/rl-2.13.md) - [Release 2.12 (2022-12-06)](release-notes/rl-2.12.md) - [Release 2.11 (2022-08-25)](release-notes/rl-2.11.md) diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md index c5d38db47..bf00be84f 100644 --- a/doc/manual/src/command-ref/env-common.md +++ b/doc/manual/src/command-ref/env-common.md @@ -2,18 +2,29 @@ Most Nix commands interpret the following environment variables: - - [`IN_NIX_SHELL`]{#env-IN_NIX_SHELL}\ + - [`IN_NIX_SHELL`](#env-IN_NIX_SHELL)\ Indicator that tells if the current environment was set up by `nix-shell`. It can have the values `pure` or `impure`. - - [`NIX_PATH`]{#env-NIX_PATH}\ + - [`NIX_PATH`](#env-NIX_PATH)\ A colon-separated list of directories used to look up the location of Nix - expressions using [paths](../language/values.md#type-path) + expressions using [paths](@docroot@/language/values.md#type-path) enclosed in angle brackets (i.e., ``), e.g. `/home/eelco/Dev:/etc/nixos`. It can be extended using the - [`-I` option](./opt-common.md#opt-I). + [`-I` option](@docroot@/command-ref/opt-common.md#opt-I). - - [`NIX_IGNORE_SYMLINK_STORE`]{#env-NIX_IGNORE_SYMLINK_STORE}\ + If `NIX_PATH` is not set at all, Nix will fall back to the following list in [impure](@docroot@/command-ref/conf-file.md#conf-pure-eval) and [unrestricted](@docroot@/command-ref/conf-file.md#conf-restrict-eval) evaluation mode: + + 1. `$HOME/.nix-defexpr/channels` + 2. `nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixpkgs` + 3. `/nix/var/nix/profiles/per-user/root/channels` + + If `NIX_PATH` is set to an empty string, resolving search paths will always fail. + For example, attempting to use `` will produce: + + error: file 'nixpkgs' was not found in the Nix search path + + - [`NIX_IGNORE_SYMLINK_STORE`](#env-NIX_IGNORE_SYMLINK_STORE)\ Normally, the Nix store directory (typically `/nix/store`) is not allowed to contain any symlink components. This is to prevent “impure” builds. Builders sometimes “canonicalise” paths by @@ -35,58 +46,58 @@ Most Nix commands interpret the following environment variables: Consult the mount 8 manual page for details. - - [`NIX_STORE_DIR`]{#env-NIX_STORE_DIR}\ + - [`NIX_STORE_DIR`](#env-NIX_STORE_DIR)\ Overrides the location of the Nix store (default `prefix/store`). - - [`NIX_DATA_DIR`]{#env-NIX_DATA_DIR}\ + - [`NIX_DATA_DIR`](#env-NIX_DATA_DIR)\ Overrides the location of the Nix static data directory (default `prefix/share`). - - [`NIX_LOG_DIR`]{#env-NIX_LOG_DIR}\ + - [`NIX_LOG_DIR`](#env-NIX_LOG_DIR)\ Overrides the location of the Nix log directory (default `prefix/var/log/nix`). - - [`NIX_STATE_DIR`]{#env-NIX_STATE_DIR}\ + - [`NIX_STATE_DIR`](#env-NIX_STATE_DIR)\ Overrides the location of the Nix state directory (default `prefix/var/nix`). - - [`NIX_CONF_DIR`]{#env-NIX_CONF_DIR}\ + - [`NIX_CONF_DIR`](#env-NIX_CONF_DIR)\ Overrides the location of the system Nix configuration directory (default `prefix/etc/nix`). - - [`NIX_CONFIG`]{#env-NIX_CONFIG}\ + - [`NIX_CONFIG`](#env-NIX_CONFIG)\ Applies settings from Nix configuration from the environment. The content is treated as if it was read from a Nix configuration file. Settings are separated by the newline character. - - [`NIX_USER_CONF_FILES`]{#env-NIX_USER_CONF_FILES}\ + - [`NIX_USER_CONF_FILES`](#env-NIX_USER_CONF_FILES)\ Overrides the location of the user Nix configuration files to load from (defaults to the XDG spec locations). The variable is treated as a list separated by the `:` token. - - [`TMPDIR`]{#env-TMPDIR}\ + - [`TMPDIR`](#env-TMPDIR)\ Use the specified directory to store temporary files. In particular, this includes temporary build directories; these can take up substantial amounts of disk space. The default is `/tmp`. - - [`NIX_REMOTE`]{#env-NIX_REMOTE}\ + - [`NIX_REMOTE`](#env-NIX_REMOTE)\ This variable should be set to `daemon` if you want to use the Nix daemon to execute Nix operations. This is necessary in [multi-user - Nix installations](../installation/multi-user.md). If the Nix + Nix installations](@docroot@/installation/multi-user.md). If the Nix daemon's Unix socket is at some non-standard path, this variable should be set to `unix://path/to/socket`. Otherwise, it should be left unset. - - [`NIX_SHOW_STATS`]{#env-NIX_SHOW_STATS}\ + - [`NIX_SHOW_STATS`](#env-NIX_SHOW_STATS)\ If set to `1`, Nix will print some evaluation statistics, such as the number of values allocated. - - [`NIX_COUNT_CALLS`]{#env-NIX_COUNT_CALLS}\ + - [`NIX_COUNT_CALLS`](#env-NIX_COUNT_CALLS)\ If set to `1`, Nix will print how often functions were called during Nix expression evaluation. This is useful for profiling your Nix expressions. - - [`GC_INITIAL_HEAP_SIZE`]{#env-GC_INITIAL_HEAP_SIZE}\ + - [`GC_INITIAL_HEAP_SIZE`](#env-GC_INITIAL_HEAP_SIZE)\ If Nix has been configured to use the Boehm garbage collector, this variable sets the initial size of the heap in bytes. It defaults to 384 MiB. Setting it to a low value reduces memory consumption, but @@ -103,4 +114,4 @@ New Nix commands conform to the [XDG Base Directory Specification], and use the Classic Nix commands can also be made to follow this standard using the [`use-xdg-base-directories`] configuration option. [XDG Base Directory Specification]: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html -[`use-xdg-base-directories`]: ../command-ref/conf-file.md#conf-use-xdg-base-directories \ No newline at end of file +[`use-xdg-base-directories`]: @docroot@/command-ref/conf-file.md#conf-use-xdg-base-directories \ No newline at end of file diff --git a/doc/manual/src/command-ref/experimental-commands.md b/doc/manual/src/command-ref/experimental-commands.md index cfa6f8b73..286ddc6d6 100644 --- a/doc/manual/src/command-ref/experimental-commands.md +++ b/doc/manual/src/command-ref/experimental-commands.md @@ -1,6 +1,6 @@ # Experimental Commands -This section lists experimental commands. +This section lists [experimental commands](@docroot@/contributing/experimental-features.md#xp-feature-nix-command). > **Warning** > diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md index 937b046b8..44de4cf53 100644 --- a/doc/manual/src/command-ref/nix-build.md +++ b/doc/manual/src/command-ref/nix-build.md @@ -38,7 +38,7 @@ directory containing at least a file named `default.nix`. `nix-build` is essentially a wrapper around [`nix-instantiate`](nix-instantiate.md) (to translate a high-level Nix expression to a low-level [store derivation]) and [`nix-store ---realise`](nix-store.md#operation---realise) (to build the store +--realise`](@docroot@/command-ref/nix-store/realise.md) (to build the store derivation). [store derivation]: ../glossary.md#gloss-store-derivation @@ -51,9 +51,8 @@ derivation). # Options -All options not listed here are passed to `nix-store ---realise`, except for `--arg` and `--attr` / `-A` which are passed to -`nix-instantiate`. +All options not listed here are passed to `nix-store --realise`, +except for `--arg` and `--attr` / `-A` which are passed to `nix-instantiate`. - [`--no-out-link`](#opt-no-out-link) @@ -70,7 +69,9 @@ All options not listed here are passed to `nix-store Change the name of the symlink to the output path created from `result` to *outlink*. -The following common options are supported: +{{#include ./opt-common.md}} + +{{#include ./env-common.md}} # Examples diff --git a/doc/manual/src/command-ref/nix-channel.md b/doc/manual/src/command-ref/nix-channel.md index 24353525f..2027cc98d 100644 --- a/doc/manual/src/command-ref/nix-channel.md +++ b/doc/manual/src/command-ref/nix-channel.md @@ -45,6 +45,10 @@ Note that `--add` does not automatically perform an update. The list of subscribed channels is stored in `~/.nix-channels`. +{{#include ./opt-common.md}} + +{{#include ./env-common.md}} + # Examples To subscribe to the Nixpkgs channel and install the GNU Hello package: @@ -70,7 +74,7 @@ $ nix-instantiate --eval -E '(import {}).lib.version' # Files - - `/nix/var/nix/profiles/per-user/username/channels`\ + - `${XDG_STATE_HOME-$HOME/.local/state}/nix/profiles/channels`\ `nix-channel` uses a `nix-env` profile to keep track of previous versions of the subscribed channels. Every time you run `nix-channel --update`, a new channel generation (that is, a symlink to the @@ -79,7 +83,7 @@ $ nix-instantiate --eval -E '(import {}).lib.version' - `~/.nix-defexpr/channels`\ This is a symlink to - `/nix/var/nix/profiles/per-user/username/channels`. It ensures that + `${XDG_STATE_HOME-$HOME/.local/state}/nix/profiles/channels`. It ensures that `nix-env` can find your channels. In a multi-user installation, you may also have `~/.nix-defexpr/channels_root`, which links to the channels of the root user. diff --git a/doc/manual/src/command-ref/nix-collect-garbage.md b/doc/manual/src/command-ref/nix-collect-garbage.md index 296165993..51db5fc67 100644 --- a/doc/manual/src/command-ref/nix-collect-garbage.md +++ b/doc/manual/src/command-ref/nix-collect-garbage.md @@ -9,7 +9,7 @@ # Description The command `nix-collect-garbage` is mostly an alias of [`nix-store ---gc`](nix-store.md#operation---gc), that is, it deletes all +--gc`](@docroot@/command-ref/nix-store/gc.md), that is, it deletes all unreachable paths in the Nix store to clean up your system. However, it provides two additional options: `-d` (`--delete-old`), which deletes all old generations of all profiles in `/nix/var/nix/profiles` @@ -20,6 +20,10 @@ and `--delete-older-than` *period*, where period is a value such as of days in all profiles in `/nix/var/nix/profiles` (except for the generations that were active at that point in time). +{{#include ./opt-common.md}} + +{{#include ./env-common.md}} + # Example To delete from the Nix store everything that is not used by the current diff --git a/doc/manual/src/command-ref/nix-copy-closure.md b/doc/manual/src/command-ref/nix-copy-closure.md index cd8e351bb..0801e8246 100644 --- a/doc/manual/src/command-ref/nix-copy-closure.md +++ b/doc/manual/src/command-ref/nix-copy-closure.md @@ -63,12 +63,16 @@ authentication, you can avoid typing the passphrase with `ssh-agent`. - `-v`\ Show verbose output. +{{#include ./opt-common.md}} + # Environment variables - `NIX_SSHOPTS`\ Additional options to be passed to `ssh` on the command line. +{{#include ./env-common.md}} + # Examples Copy Firefox with all its dependencies to a remote machine: diff --git a/doc/manual/src/command-ref/nix-env.md b/doc/manual/src/command-ref/nix-env.md index f4fa5b50c..42b5bca77 100644 --- a/doc/manual/src/command-ref/nix-env.md +++ b/doc/manual/src/command-ref/nix-env.md @@ -4,15 +4,14 @@ # Synopsis -`nix-env` +`nix-env` *operation* [*options*] [*arguments…*] [`--option` *name* *value*] [`--arg` *name* *value*] [`--argstr` *name* *value*] [{`--file` | `-f`} *path*] - [{`--profile` | `-p`} *path(] + [{`--profile` | `-p`} *path*] [`--system-filter` *system*] [`--dry-run`] - *operation* [*options…*] [*arguments…*] # Description @@ -24,7 +23,29 @@ environments: different users can have different environments, and individual users can switch between different environments. `nix-env` takes exactly one *operation* flag which indicates the -subcommand to be performed. These are documented below. +subcommand to be performed. The following operations are available: + +- [`--install`](./nix-env/install.md) +- [`--upgrade`](./nix-env/upgrade.md) +- [`--uninstall`](./nix-env/uninstall.md) +- [`--set`](./nix-env/set.md) +- [`--set-flag`](./nix-env/set-flag.md) +- [`--query`](./nix-env/query.md) +- [`--switch-profile`](./nix-env/switch-profile.md) +- [`--list-generations`](./nix-env/list-generations.md) +- [`--delete-generations`](./nix-env/delete-generations.md) +- [`--switch-generation`](./nix-env/switch-generation.md) +- [`--rollback`](./nix-env/rollback.md) + +These pages can be viewed offline: + +- `man nix-env-`. + + Example: `man nix-env-install` + +- `nix-env --help --` + + Example: `nix-env --help --install` # Selectors @@ -60,46 +81,6 @@ match. Here are some examples: Matches any package name containing the strings `firefox` or `chromium`. -# Common options - -This section lists the options that are common to all operations. These -options are allowed for every subcommand, though they may not always -have an effect. - - - `--file` / `-f` *path*\ - Specifies the Nix expression (designated below as the *active Nix - expression*) used by the `--install`, `--upgrade`, and `--query - --available` operations to obtain derivations. The default is - `~/.nix-defexpr`. - - If the argument starts with `http://` or `https://`, it is - interpreted as the URL of a tarball that will be downloaded and - unpacked to a temporary location. The tarball must include a single - top-level directory containing at least a file named `default.nix`. - - - `--profile` / `-p` *path*\ - Specifies the profile to be used by those operations that operate on - a profile (designated below as the *active profile*). A profile is a - sequence of user environments called *generations*, one of which is - the *current generation*. - - - `--dry-run`\ - For the `--install`, `--upgrade`, `--uninstall`, - `--switch-generation`, `--delete-generations` and `--rollback` - operations, this flag will cause `nix-env` to print what *would* be - done if this flag had not been specified, without actually doing it. - - `--dry-run` also prints out which paths will be - [substituted](../glossary.md) (i.e., downloaded) and which paths - will be built from source (because no substitute is available). - - - `--system-filter` *system*\ - By default, operations such as `--query - --available` show derivations matching any platform. This option - allows you to use derivations for the specified platform *system*. - - - # Files - `~/.nix-defexpr`\ @@ -145,750 +126,3 @@ have an effect. symlink points to `prefix/var/nix/profiles/default`. The `PATH` environment variable should include `~/.nix-profile/bin` for the user environment to be visible to the user. - -# Operation `--install` - -## Synopsis - -`nix-env` {`--install` | `-i`} *args…* - [{`--prebuilt-only` | `-b`}] - [{`--attr` | `-A`}] - [`--from-expression`] [`-E`] - [`--from-profile` *path*] - [`--preserve-installed` | `-P`] - [`--remove-all` | `-r`] - -## Description - -The install operation creates a new user environment, based on the -current generation of the active profile, to which a set of store paths -described by *args* is added. The arguments *args* map to store paths in -a number of possible ways: - - - By default, *args* is a set of derivation names denoting derivations - in the active Nix expression. These are realised, and the resulting - output paths are installed. Currently installed derivations with a - name equal to the name of a derivation being added are removed - unless the option `--preserve-installed` is specified. - - If there are multiple derivations matching a name in *args* that - have the same name (e.g., `gcc-3.3.6` and `gcc-4.1.1`), then the - derivation with the highest *priority* is used. A derivation can - define a priority by declaring the `meta.priority` attribute. This - attribute should be a number, with a higher value denoting a lower - priority. The default priority is `0`. - - If there are multiple matching derivations with the same priority, - then the derivation with the highest version will be installed. - - You can force the installation of multiple derivations with the same - name by being specific about the versions. For instance, `nix-env -i - gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will - probably cause a user environment conflict\!). - - - If `--attr` (`-A`) is specified, the arguments are *attribute - paths* that select attributes from the top-level Nix - expression. This is faster than using derivation names and - unambiguous. To find out the attribute paths of available - packages, use `nix-env -qaP`. - - - If `--from-profile` *path* is given, *args* is a set of names - denoting installed store paths in the profile *path*. This is an - easy way to copy user environment elements from one profile to - another. - - - If `--from-expression` is given, *args* are Nix - [functions](../language/constructs.md#functions) - that are called with the active Nix expression as their single - argument. The derivations returned by those function calls are - installed. This allows derivations to be specified in an - unambiguous way, which is necessary if there are multiple - derivations with the same name. - - - If *args* are [store derivation]s, then these are - [realised](nix-store.md#operation---realise), and the resulting output paths - are installed. - - [store derivation]: ../glossary.md#gloss-store-derivation - - - If *args* are store paths that are not store derivations, then these - are [realised](nix-store.md#operation---realise) and installed. - - - By default all outputs are installed for each derivation. That can - be reduced by setting `meta.outputsToInstall`. - -## Flags - - - `--prebuilt-only` / `-b`\ - Use only derivations for which a substitute is registered, i.e., - there is a pre-built binary available that can be downloaded in lieu - of building the derivation. Thus, no packages will be built from - source. - - - `--preserve-installed`; `-P`\ - Do not remove derivations with a name matching one of the - derivations being installed. Usually, trying to have two versions of - the same package installed in the same generation of a profile will - lead to an error in building the generation, due to file name - clashes between the two versions. However, this is not the case for - all packages. - - - `--remove-all`; `-r`\ - Remove all previously installed packages first. This is equivalent - to running `nix-env -e '.*'` first, except that everything happens - in a single transaction. - -## Examples - -To install a package using a specific attribute path from the active Nix expression: - -```console -$ nix-env -iA gcc40mips -installing `gcc-4.0.2' -$ nix-env -iA xorg.xorgserver -installing `xorg-server-1.2.0' -``` - -To install a specific version of `gcc` using the derivation name: - -```console -$ nix-env --install gcc-3.3.2 -installing `gcc-3.3.2' -uninstalling `gcc-3.1' -``` - -Using attribute path for selecting a package is preferred, -as it is much faster and there will not be multiple matches. - -Note the previously installed version is removed, since -`--preserve-installed` was not specified. - -To install an arbitrary version: - -```console -$ nix-env --install gcc -installing `gcc-3.3.2' -``` - -To install all derivations in the Nix expression `foo.nix`: - -```console -$ nix-env -f ~/foo.nix -i '.*' -``` - -To copy the store path with symbolic name `gcc` from another profile: - -```console -$ nix-env -i --from-profile /nix/var/nix/profiles/foo gcc -``` - -To install a specific [store derivation] (typically created by -`nix-instantiate`): - -```console -$ nix-env -i /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv -``` - -To install a specific output path: - -```console -$ nix-env -i /nix/store/y3cgx0xj1p4iv9x0pnnmdhr8iyg741vk-gcc-3.4.3 -``` - -To install from a Nix expression specified on the command-line: - -```console -$ nix-env -f ./foo.nix -i -E \ - 'f: (f {system = "i686-linux";}).subversionWithJava' -``` - -I.e., this evaluates to `(f: (f {system = -"i686-linux";}).subversionWithJava) (import ./foo.nix)`, thus selecting -the `subversionWithJava` attribute from the set returned by calling the -function defined in `./foo.nix`. - -A dry-run tells you which paths will be downloaded or built from source: - -```console -$ nix-env -f '' -iA hello --dry-run -(dry run; not doing anything) -installing ‘hello-2.10’ -this path will be fetched (0.04 MiB download, 0.19 MiB unpacked): - /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10 - ... -``` - -To install Firefox from the latest revision in the Nixpkgs/NixOS 14.12 -channel: - -```console -$ nix-env -f https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz -iA firefox -``` - -# Operation `--upgrade` - -## Synopsis - -`nix-env` {`--upgrade` | `-u`} *args* - [`--lt` | `--leq` | `--eq` | `--always`] - [{`--prebuilt-only` | `-b`}] - [{`--attr` | `-A`}] - [`--from-expression`] [`-E`] - [`--from-profile` *path*] - [`--preserve-installed` | `-P`] - -## Description - -The upgrade operation creates a new user environment, based on the -current generation of the active profile, in which all store paths are -replaced for which there are newer versions in the set of paths -described by *args*. Paths for which there are no newer versions are -left untouched; this is not an error. It is also not an error if an -element of *args* matches no installed derivations. - -For a description of how *args* is mapped to a set of store paths, see -[`--install`](#operation---install). If *args* describes multiple -store paths with the same symbolic name, only the one with the highest -version is installed. - -## Flags - - - `--lt`\ - Only upgrade a derivation to newer versions. This is the default. - - - `--leq`\ - In addition to upgrading to newer versions, also “upgrade” to - derivations that have the same version. Version are not a unique - identification of a derivation, so there may be many derivations - that have the same version. This flag may be useful to force - “synchronisation” between the installed and available derivations. - - - `--eq`\ - *Only* “upgrade” to derivations that have the same version. This may - not seem very useful, but it actually is, e.g., when there is a new - release of Nixpkgs and you want to replace installed applications - with the same versions built against newer dependencies (to reduce - the number of dependencies floating around on your system). - - - `--always`\ - In addition to upgrading to newer versions, also “upgrade” to - derivations that have the same or a lower version. I.e., derivations - may actually be downgraded depending on what is available in the - active Nix expression. - -For the other flags, see `--install`. - -## Examples - -```console -$ nix-env --upgrade -A nixpkgs.gcc -upgrading `gcc-3.3.1' to `gcc-3.4' -``` - -When there are no updates available, nothing will happen: - -```console -$ nix-env --upgrade -A nixpkgs.pan -``` - -Using `-A` is preferred when possible, as it is faster and unambiguous but -it is also possible to upgrade to a specific version by matching the derivation name: - -```console -$ nix-env -u gcc-3.3.2 --always -upgrading `gcc-3.4' to `gcc-3.3.2' -``` - -To try to upgrade everything -(matching packages based on the part of the derivation name without version): - -```console -$ nix-env -u -upgrading `hello-2.1.2' to `hello-2.1.3' -upgrading `mozilla-1.2' to `mozilla-1.4' -``` - -## Versions - -The upgrade operation determines whether a derivation `y` is an upgrade -of a derivation `x` by looking at their respective `name` attributes. -The names (e.g., `gcc-3.3.1` are split into two parts: the package name -(`gcc`), and the version (`3.3.1`). The version part starts after the -first dash not followed by a letter. `y` is considered an upgrade of `x` -if their package names match, and the version of `y` is higher than that -of `x`. - -The versions are compared by splitting them into contiguous components -of numbers and letters. E.g., `3.3.1pre5` is split into `[3, 3, 1, -"pre", 5]`. These lists are then compared lexicographically (from left -to right). Corresponding components `a` and `b` are compared as follows. -If they are both numbers, integer comparison is used. If `a` is an empty -string and `b` is a number, `a` is considered less than `b`. The special -string component `pre` (for *pre-release*) is considered to be less than -other components. String components are considered less than number -components. Otherwise, they are compared lexicographically (i.e., using -case-sensitive string comparison). - -This is illustrated by the following examples: - - 1.0 < 2.3 - 2.1 < 2.3 - 2.3 = 2.3 - 2.5 > 2.3 - 3.1 > 2.3 - 2.3.1 > 2.3 - 2.3.1 > 2.3a - 2.3pre1 < 2.3 - 2.3pre3 < 2.3pre12 - 2.3a < 2.3c - 2.3pre1 < 2.3c - 2.3pre1 < 2.3q - -# Operation `--uninstall` - -## Synopsis - -`nix-env` {`--uninstall` | `-e`} *drvnames…* - -## Description - -The uninstall operation creates a new user environment, based on the -current generation of the active profile, from which the store paths -designated by the symbolic names *drvnames* are removed. - -## Examples - -```console -$ nix-env --uninstall gcc -$ nix-env -e '.*' (remove everything) -``` - -# Operation `--set` - -## Synopsis - -`nix-env` `--set` *drvname* - -## Description - -The `--set` operation modifies the current generation of a profile so -that it contains exactly the specified derivation, and nothing else. - -## Examples - -The following updates a profile such that its current generation will -contain just Firefox: - -```console -$ nix-env -p /nix/var/nix/profiles/browser --set firefox -``` - -# Operation `--set-flag` - -## Synopsis - -`nix-env` `--set-flag` *name* *value* *drvnames* - -## Description - -The `--set-flag` operation allows meta attributes of installed packages -to be modified. There are several attributes that can be usefully -modified, because they affect the behaviour of `nix-env` or the user -environment build script: - - - `priority` can be changed to resolve filename clashes. The user - environment build script uses the `meta.priority` attribute of - derivations to resolve filename collisions between packages. Lower - priority values denote a higher priority. For instance, the GCC - wrapper package and the Binutils package in Nixpkgs both have a file - `bin/ld`, so previously if you tried to install both you would get a - collision. Now, on the other hand, the GCC wrapper declares a higher - priority than Binutils, so the former’s `bin/ld` is symlinked in the - user environment. - - - `keep` can be set to `true` to prevent the package from being - upgraded or replaced. This is useful if you want to hang on to an - older version of a package. - - - `active` can be set to `false` to “disable” the package. That is, no - symlinks will be generated to the files of the package, but it - remains part of the profile (so it won’t be garbage-collected). It - can be set back to `true` to re-enable the package. - -## Examples - -To prevent the currently installed Firefox from being upgraded: - -```console -$ nix-env --set-flag keep true firefox -``` - -After this, `nix-env -u` will ignore Firefox. - -To disable the currently installed Firefox, then install a new Firefox -while the old remains part of the profile: - -```console -$ nix-env -q -firefox-2.0.0.9 (the current one) - -$ nix-env --preserve-installed -i firefox-2.0.0.11 -installing `firefox-2.0.0.11' -building path(s) `/nix/store/myy0y59q3ig70dgq37jqwg1j0rsapzsl-user-environment' -collision between `/nix/store/...-firefox-2.0.0.11/bin/firefox' - and `/nix/store/...-firefox-2.0.0.9/bin/firefox'. -(i.e., can’t have two active at the same time) - -$ nix-env --set-flag active false firefox -setting flag on `firefox-2.0.0.9' - -$ nix-env --preserve-installed -i firefox-2.0.0.11 -installing `firefox-2.0.0.11' - -$ nix-env -q -firefox-2.0.0.11 (the enabled one) -firefox-2.0.0.9 (the disabled one) -``` - -To make files from `binutils` take precedence over files from `gcc`: - -```console -$ nix-env --set-flag priority 5 binutils -$ nix-env --set-flag priority 10 gcc -``` - -# Operation `--query` - -## Synopsis - -`nix-env` {`--query` | `-q`} *names…* - [`--installed` | `--available` | `-a`] - [{`--status` | `-s`}] - [{`--attr-path` | `-P`}] - [`--no-name`] - [{`--compare-versions` | `-c`}] - [`--system`] - [`--drv-path`] - [`--out-path`] - [`--description`] - [`--meta`] - [`--xml`] - [`--json`] - [{`--prebuilt-only` | `-b`}] - [{`--attr` | `-A`} *attribute-path*] - -## Description - -The query operation displays information about either the store paths -that are installed in the current generation of the active profile -(`--installed`), or the derivations that are available for installation -in the active Nix expression (`--available`). It only prints information -about derivations whose symbolic name matches one of *names*. - -The derivations are sorted by their `name` attributes. - -## Source selection - -The following flags specify the set of things on which the query -operates. - - - `--installed`\ - The query operates on the store paths that are installed in the - current generation of the active profile. This is the default. - - - `--available`; `-a`\ - The query operates on the derivations that are available in the - active Nix expression. - -## Queries - -The following flags specify what information to display about the -selected derivations. Multiple flags may be specified, in which case the -information is shown in the order given here. Note that the name of the -derivation is shown unless `--no-name` is specified. - - - `--xml`\ - Print the result in an XML representation suitable for automatic - processing by other tools. The root element is called `items`, which - contains a `item` element for each available or installed - derivation. The fields discussed below are all stored in attributes - of the `item` elements. - - - `--json`\ - Print the result in a JSON representation suitable for automatic - processing by other tools. - - - `--prebuilt-only` / `-b`\ - Show only derivations for which a substitute is registered, i.e., - there is a pre-built binary available that can be downloaded in lieu - of building the derivation. Thus, this shows all packages that - probably can be installed quickly. - - - `--status`; `-s`\ - Print the *status* of the derivation. The status consists of three - characters. The first is `I` or `-`, indicating whether the - derivation is currently installed in the current generation of the - active profile. This is by definition the case for `--installed`, - but not for `--available`. The second is `P` or `-`, indicating - whether the derivation is present on the system. This indicates - whether installation of an available derivation will require the - derivation to be built. The third is `S` or `-`, indicating whether - a substitute is available for the derivation. - - - `--attr-path`; `-P`\ - Print the *attribute path* of the derivation, which can be used to - unambiguously select it using the `--attr` option available in - commands that install derivations like `nix-env --install`. This - option only works together with `--available` - - - `--no-name`\ - Suppress printing of the `name` attribute of each derivation. - - - `--compare-versions` / `-c`\ - Compare installed versions to available versions, or vice versa (if - `--available` is given). This is useful for quickly seeing whether - upgrades for installed packages are available in a Nix expression. A - column is added with the following meaning: - - - `<` *version*\ - A newer version of the package is available or installed. - - - `=` *version*\ - At most the same version of the package is available or - installed. - - - `>` *version*\ - Only older versions of the package are available or installed. - - - `- ?`\ - No version of the package is available or installed. - - - `--system`\ - Print the `system` attribute of the derivation. - - - `--drv-path`\ - Print the path of the [store derivation]. - - - `--out-path`\ - Print the output path of the derivation. - - - `--description`\ - Print a short (one-line) description of the derivation, if - available. The description is taken from the `meta.description` - attribute of the derivation. - - - `--meta`\ - Print all of the meta-attributes of the derivation. This option is - only available with `--xml` or `--json`. - -## Examples - -To show installed packages: - -```console -$ nix-env -q -bison-1.875c -docbook-xml-4.2 -firefox-1.0.4 -MPlayer-1.0pre7 -ORBit2-2.8.3 -… -``` - -To show available packages: - -```console -$ nix-env -qa -firefox-1.0.7 -GConf-2.4.0.1 -MPlayer-1.0pre7 -ORBit2-2.8.3 -… -``` - -To show the status of available packages: - -```console -$ nix-env -qas --P- firefox-1.0.7 (not installed but present) ---S GConf-2.4.0.1 (not present, but there is a substitute for fast installation) ---S MPlayer-1.0pre3 (i.e., this is not the installed MPlayer, even though the version is the same!) -IP- ORBit2-2.8.3 (installed and by definition present) -… -``` - -To show available packages in the Nix expression `foo.nix`: - -```console -$ nix-env -f ./foo.nix -qa -foo-1.2.3 -``` - -To compare installed versions to what’s available: - -```console -$ nix-env -qc -... -acrobat-reader-7.0 - ? (package is not available at all) -autoconf-2.59 = 2.59 (same version) -firefox-1.0.4 < 1.0.7 (a more recent version is available) -... -``` - -To show all packages with “`zip`” in the name: - -```console -$ nix-env -qa '.*zip.*' -bzip2-1.0.6 -gzip-1.6 -zip-3.0 -… -``` - -To show all packages with “`firefox`” or “`chromium`” in the name: - -```console -$ nix-env -qa '.*(firefox|chromium).*' -chromium-37.0.2062.94 -chromium-beta-38.0.2125.24 -firefox-32.0.3 -firefox-with-plugins-13.0.1 -… -``` - -To show all packages in the latest revision of the Nixpkgs repository: - -```console -$ nix-env -f https://github.com/NixOS/nixpkgs/archive/master.tar.gz -qa -``` - -# Operation `--switch-profile` - -## Synopsis - -`nix-env` {`--switch-profile` | `-S`} *path* - -## Description - -This operation makes *path* the current profile for the user. That is, -the symlink `~/.nix-profile` is made to point to *path*. - -## Examples - -```console -$ nix-env -S ~/my-profile -``` - -# Operation `--list-generations` - -## Synopsis - -`nix-env` `--list-generations` - -## Description - -This operation print a list of all the currently existing generations -for the active profile. These may be switched to using the -`--switch-generation` operation. It also prints the creation date of the -generation, and indicates the current generation. - -## Examples - -```console -$ nix-env --list-generations - 95 2004-02-06 11:48:24 - 96 2004-02-06 11:49:01 - 97 2004-02-06 16:22:45 - 98 2004-02-06 16:24:33 (current) -``` - -# Operation `--delete-generations` - -## Synopsis - -`nix-env` `--delete-generations` *generations* - -## Description - -This operation deletes the specified generations of the current profile. -The generations can be a list of generation numbers, the special value -`old` to delete all non-current generations, a value such as `30d` to -delete all generations older than the specified number of days (except -for the generation that was active at that point in time), or a value -such as `+5` to keep the last `5` generations ignoring any newer than -current, e.g., if `30` is the current generation `+5` will delete -generation `25` and all older generations. Periodically deleting old -generations is important to make garbage collection effective. - -## Examples - -```console -$ nix-env --delete-generations 3 4 8 -``` - -```console -$ nix-env --delete-generations +5 -``` - -```console -$ nix-env --delete-generations 30d -``` - -```console -$ nix-env -p other_profile --delete-generations old -``` - -# Operation `--switch-generation` - -## Synopsis - -`nix-env` {`--switch-generation` | `-G`} *generation* - -## Description - -This operation makes generation number *generation* the current -generation of the active profile. That is, if the `profile` is the path -to the active profile, then the symlink `profile` is made to point to -`profile-generation-link`, which is in turn a symlink to the actual user -environment in the Nix store. - -Switching will fail if the specified generation does not exist. - -## Examples - -```console -$ nix-env -G 42 -switching from generation 50 to 42 -``` - -# Operation `--rollback` - -## Synopsis - -`nix-env` `--rollback` - -## Description - -This operation switches to the “previous” generation of the active -profile, that is, the highest numbered generation lower than the current -generation, if it exists. It is just a convenience wrapper around -`--list-generations` and `--switch-generation`. - -## Examples - -```console -$ nix-env --rollback -switching from generation 92 to 91 -``` - -```console -$ nix-env --rollback -error: no generation older than the current (91) exists -``` - -# Environment variables - - - `NIX_PROFILE`\ - Location of the Nix profile. Defaults to the target of the symlink - `~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default` - otherwise. diff --git a/doc/manual/src/command-ref/nix-env/delete-generations.md b/doc/manual/src/command-ref/nix-env/delete-generations.md new file mode 100644 index 000000000..6f0af5384 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/delete-generations.md @@ -0,0 +1,46 @@ +# Name + +`nix-env --delete-generations` - delete profile generations + +# Synopsis + +`nix-env` `--delete-generations` *generations* + +# Description + +This operation deletes the specified generations of the current profile. +The generations can be a list of generation numbers, the special value +`old` to delete all non-current generations, a value such as `30d` to +delete all generations older than the specified number of days (except +for the generation that was active at that point in time), or a value +such as `+5` to keep the last `5` generations ignoring any newer than +current, e.g., if `30` is the current generation `+5` will delete +generation `25` and all older generations. Periodically deleting old +generations is important to make garbage collection effective. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env --delete-generations 3 4 8 +``` + +```console +$ nix-env --delete-generations +5 +``` + +```console +$ nix-env --delete-generations 30d +``` + +```console +$ nix-env -p other_profile --delete-generations old +``` + diff --git a/doc/manual/src/command-ref/nix-env/env-common.md b/doc/manual/src/command-ref/nix-env/env-common.md new file mode 100644 index 000000000..735817959 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/env-common.md @@ -0,0 +1,6 @@ +# Environment variables + +- `NIX_PROFILE`\ + Location of the Nix profile. Defaults to the target of the symlink + `~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default` + otherwise. diff --git a/doc/manual/src/command-ref/nix-env/install.md b/doc/manual/src/command-ref/nix-env/install.md new file mode 100644 index 000000000..d754accfe --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/install.md @@ -0,0 +1,187 @@ +# Name + +`nix-env --install` - add packages to user environment + +# Synopsis + +`nix-env` {`--install` | `-i`} *args…* + [{`--prebuilt-only` | `-b`}] + [{`--attr` | `-A`}] + [`--from-expression`] [`-E`] + [`--from-profile` *path*] + [`--preserve-installed` | `-P`] + [`--remove-all` | `-r`] + +# Description + +The install operation creates a new user environment, based on the +current generation of the active profile, to which a set of store paths +described by *args* is added. The arguments *args* map to store paths in +a number of possible ways: + + - By default, *args* is a set of derivation names denoting derivations + in the active Nix expression. These are realised, and the resulting + output paths are installed. Currently installed derivations with a + name equal to the name of a derivation being added are removed + unless the option `--preserve-installed` is specified. + + If there are multiple derivations matching a name in *args* that + have the same name (e.g., `gcc-3.3.6` and `gcc-4.1.1`), then the + derivation with the highest *priority* is used. A derivation can + define a priority by declaring the `meta.priority` attribute. This + attribute should be a number, with a higher value denoting a lower + priority. The default priority is `0`. + + If there are multiple matching derivations with the same priority, + then the derivation with the highest version will be installed. + + You can force the installation of multiple derivations with the same + name by being specific about the versions. For instance, `nix-env -i + gcc-3.3.6 gcc-4.1.1` will install both version of GCC (and will + probably cause a user environment conflict\!). + + - If `--attr` (`-A`) is specified, the arguments are *attribute + paths* that select attributes from the top-level Nix + expression. This is faster than using derivation names and + unambiguous. To find out the attribute paths of available + packages, use `nix-env -qaP`. + + - If `--from-profile` *path* is given, *args* is a set of names + denoting installed store paths in the profile *path*. This is an + easy way to copy user environment elements from one profile to + another. + + - If `--from-expression` is given, *args* are Nix + [functions](@docroot@/language/constructs.md#functions) + that are called with the active Nix expression as their single + argument. The derivations returned by those function calls are + installed. This allows derivations to be specified in an + unambiguous way, which is necessary if there are multiple + derivations with the same name. + + - If *args* are [store derivations](@docroot@/glossary.md#gloss-store-derivation), then these are + [realised](@docroot@/command-ref/nix-store/realise.md), and the resulting output paths + are installed. + + - If *args* are store paths that are not store derivations, then these + are [realised](@docroot@/command-ref/nix-store/realise.md) and installed. + + - By default all outputs are installed for each derivation. That can + be reduced by setting `meta.outputsToInstall`. + +# Flags + + - `--prebuilt-only` / `-b`\ + Use only derivations for which a substitute is registered, i.e., + there is a pre-built binary available that can be downloaded in lieu + of building the derivation. Thus, no packages will be built from + source. + + - `--preserve-installed` / `-P`\ + Do not remove derivations with a name matching one of the + derivations being installed. Usually, trying to have two versions of + the same package installed in the same generation of a profile will + lead to an error in building the generation, due to file name + clashes between the two versions. However, this is not the case for + all packages. + + - `--remove-all` / `-r`\ + Remove all previously installed packages first. This is equivalent + to running `nix-env -e '.*'` first, except that everything happens + in a single transaction. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +To install a package using a specific attribute path from the active Nix expression: + +```console +$ nix-env -iA gcc40mips +installing `gcc-4.0.2' +$ nix-env -iA xorg.xorgserver +installing `xorg-server-1.2.0' +``` + +To install a specific version of `gcc` using the derivation name: + +```console +$ nix-env --install gcc-3.3.2 +installing `gcc-3.3.2' +uninstalling `gcc-3.1' +``` + +Using attribute path for selecting a package is preferred, +as it is much faster and there will not be multiple matches. + +Note the previously installed version is removed, since +`--preserve-installed` was not specified. + +To install an arbitrary version: + +```console +$ nix-env --install gcc +installing `gcc-3.3.2' +``` + +To install all derivations in the Nix expression `foo.nix`: + +```console +$ nix-env -f ~/foo.nix -i '.*' +``` + +To copy the store path with symbolic name `gcc` from another profile: + +```console +$ nix-env -i --from-profile /nix/var/nix/profiles/foo gcc +``` + +To install a specific [store derivation] (typically created by +`nix-instantiate`): + +```console +$ nix-env -i /nix/store/fibjb1bfbpm5mrsxc4mh2d8n37sxh91i-gcc-3.4.3.drv +``` + +To install a specific output path: + +```console +$ nix-env -i /nix/store/y3cgx0xj1p4iv9x0pnnmdhr8iyg741vk-gcc-3.4.3 +``` + +To install from a Nix expression specified on the command-line: + +```console +$ nix-env -f ./foo.nix -i -E \ + 'f: (f {system = "i686-linux";}).subversionWithJava' +``` + +I.e., this evaluates to `(f: (f {system = +"i686-linux";}).subversionWithJava) (import ./foo.nix)`, thus selecting +the `subversionWithJava` attribute from the set returned by calling the +function defined in `./foo.nix`. + +A dry-run tells you which paths will be downloaded or built from source: + +```console +$ nix-env -f '' -iA hello --dry-run +(dry run; not doing anything) +installing ‘hello-2.10’ +this path will be fetched (0.04 MiB download, 0.19 MiB unpacked): + /nix/store/wkhdf9jinag5750mqlax6z2zbwhqb76n-hello-2.10 + ... +``` + +To install Firefox from the latest revision in the Nixpkgs/NixOS 14.12 +channel: + +```console +$ nix-env -f https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz -iA firefox +``` + diff --git a/doc/manual/src/command-ref/nix-env/list-generations.md b/doc/manual/src/command-ref/nix-env/list-generations.md new file mode 100644 index 000000000..a4881ece8 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/list-generations.md @@ -0,0 +1,33 @@ +# Name + +`nix-env --list-generations` - list profile generations + +# Synopsis + +`nix-env` `--list-generations` + +# Description + +This operation print a list of all the currently existing generations +for the active profile. These may be switched to using the +`--switch-generation` operation. It also prints the creation date of the +generation, and indicates the current generation. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env --list-generations + 95 2004-02-06 11:48:24 + 96 2004-02-06 11:49:01 + 97 2004-02-06 16:22:45 + 98 2004-02-06 16:24:33 (current) +``` + diff --git a/doc/manual/src/command-ref/nix-env/opt-common.md b/doc/manual/src/command-ref/nix-env/opt-common.md new file mode 100644 index 000000000..636281b6d --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/opt-common.md @@ -0,0 +1,35 @@ +# Options + +The following options are allowed for all `nix-env` operations, but may not always have an effect. + + - `--file` / `-f` *path*\ + Specifies the Nix expression (designated below as the *active Nix + expression*) used by the `--install`, `--upgrade`, and `--query + --available` operations to obtain derivations. The default is + `~/.nix-defexpr`. + + If the argument starts with `http://` or `https://`, it is + interpreted as the URL of a tarball that will be downloaded and + unpacked to a temporary location. The tarball must include a single + top-level directory containing at least a file named `default.nix`. + + - `--profile` / `-p` *path*\ + Specifies the profile to be used by those operations that operate on + a profile (designated below as the *active profile*). A profile is a + sequence of user environments called *generations*, one of which is + the *current generation*. + + - `--dry-run`\ + For the `--install`, `--upgrade`, `--uninstall`, + `--switch-generation`, `--delete-generations` and `--rollback` + operations, this flag will cause `nix-env` to print what *would* be + done if this flag had not been specified, without actually doing it. + + `--dry-run` also prints out which paths will be + [substituted](@docroot@/glossary.md) (i.e., downloaded) and which paths + will be built from source (because no substitute is available). + + - `--system-filter` *system*\ + By default, operations such as `--query + --available` show derivations matching any platform. This option + allows you to use derivations for the specified platform *system*. diff --git a/doc/manual/src/command-ref/nix-env/query.md b/doc/manual/src/command-ref/nix-env/query.md new file mode 100644 index 000000000..18f0ee210 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/query.md @@ -0,0 +1,215 @@ +# Name + +`nix-env --query` - display information about packages + +# Synopsis + +`nix-env` {`--query` | `-q`} *names…* + [`--installed` | `--available` | `-a`] + [{`--status` | `-s`}] + [{`--attr-path` | `-P`}] + [`--no-name`] + [{`--compare-versions` | `-c`}] + [`--system`] + [`--drv-path`] + [`--out-path`] + [`--description`] + [`--meta`] + [`--xml`] + [`--json`] + [{`--prebuilt-only` | `-b`}] + [{`--attr` | `-A`} *attribute-path*] + +# Description + +The query operation displays information about either the store paths +that are installed in the current generation of the active profile +(`--installed`), or the derivations that are available for installation +in the active Nix expression (`--available`). It only prints information +about derivations whose symbolic name matches one of *names*. + +The derivations are sorted by their `name` attributes. + +# Source selection + +The following flags specify the set of things on which the query +operates. + + - `--installed`\ + The query operates on the store paths that are installed in the + current generation of the active profile. This is the default. + + - `--available`; `-a`\ + The query operates on the derivations that are available in the + active Nix expression. + +# Queries + +The following flags specify what information to display about the +selected derivations. Multiple flags may be specified, in which case the +information is shown in the order given here. Note that the name of the +derivation is shown unless `--no-name` is specified. + + - `--xml`\ + Print the result in an XML representation suitable for automatic + processing by other tools. The root element is called `items`, which + contains a `item` element for each available or installed + derivation. The fields discussed below are all stored in attributes + of the `item` elements. + + - `--json`\ + Print the result in a JSON representation suitable for automatic + processing by other tools. + + - `--prebuilt-only` / `-b`\ + Show only derivations for which a substitute is registered, i.e., + there is a pre-built binary available that can be downloaded in lieu + of building the derivation. Thus, this shows all packages that + probably can be installed quickly. + + - `--status`; `-s`\ + Print the *status* of the derivation. The status consists of three + characters. The first is `I` or `-`, indicating whether the + derivation is currently installed in the current generation of the + active profile. This is by definition the case for `--installed`, + but not for `--available`. The second is `P` or `-`, indicating + whether the derivation is present on the system. This indicates + whether installation of an available derivation will require the + derivation to be built. The third is `S` or `-`, indicating whether + a substitute is available for the derivation. + + - `--attr-path`; `-P`\ + Print the *attribute path* of the derivation, which can be used to + unambiguously select it using the `--attr` option available in + commands that install derivations like `nix-env --install`. This + option only works together with `--available` + + - `--no-name`\ + Suppress printing of the `name` attribute of each derivation. + + - `--compare-versions` / `-c`\ + Compare installed versions to available versions, or vice versa (if + `--available` is given). This is useful for quickly seeing whether + upgrades for installed packages are available in a Nix expression. A + column is added with the following meaning: + + - `<` *version*\ + A newer version of the package is available or installed. + + - `=` *version*\ + At most the same version of the package is available or + installed. + + - `>` *version*\ + Only older versions of the package are available or installed. + + - `- ?`\ + No version of the package is available or installed. + + - `--system`\ + Print the `system` attribute of the derivation. + + - `--drv-path`\ + Print the path of the [store derivation](@docroot@/glossary.md#gloss-store-derivation). + + - `--out-path`\ + Print the output path of the derivation. + + - `--description`\ + Print a short (one-line) description of the derivation, if + available. The description is taken from the `meta.description` + attribute of the derivation. + + - `--meta`\ + Print all of the meta-attributes of the derivation. This option is + only available with `--xml` or `--json`. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +To show installed packages: + +```console +$ nix-env -q +bison-1.875c +docbook-xml-4.2 +firefox-1.0.4 +MPlayer-1.0pre7 +ORBit2-2.8.3 +… +``` + +To show available packages: + +```console +$ nix-env -qa +firefox-1.0.7 +GConf-2.4.0.1 +MPlayer-1.0pre7 +ORBit2-2.8.3 +… +``` + +To show the status of available packages: + +```console +$ nix-env -qas +-P- firefox-1.0.7 (not installed but present) +--S GConf-2.4.0.1 (not present, but there is a substitute for fast installation) +--S MPlayer-1.0pre3 (i.e., this is not the installed MPlayer, even though the version is the same!) +IP- ORBit2-2.8.3 (installed and by definition present) +… +``` + +To show available packages in the Nix expression `foo.nix`: + +```console +$ nix-env -f ./foo.nix -qa +foo-1.2.3 +``` + +To compare installed versions to what’s available: + +```console +$ nix-env -qc +... +acrobat-reader-7.0 - ? (package is not available at all) +autoconf-2.59 = 2.59 (same version) +firefox-1.0.4 < 1.0.7 (a more recent version is available) +... +``` + +To show all packages with “`zip`” in the name: + +```console +$ nix-env -qa '.*zip.*' +bzip2-1.0.6 +gzip-1.6 +zip-3.0 +… +``` + +To show all packages with “`firefox`” or “`chromium`” in the name: + +```console +$ nix-env -qa '.*(firefox|chromium).*' +chromium-37.0.2062.94 +chromium-beta-38.0.2125.24 +firefox-32.0.3 +firefox-with-plugins-13.0.1 +… +``` + +To show all packages in the latest revision of the Nixpkgs repository: + +```console +$ nix-env -f https://github.com/NixOS/nixpkgs/archive/master.tar.gz -qa +``` + diff --git a/doc/manual/src/command-ref/nix-env/rollback.md b/doc/manual/src/command-ref/nix-env/rollback.md new file mode 100644 index 000000000..1e3958cfc --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/rollback.md @@ -0,0 +1,34 @@ +# Name + +`nix-env --rollback` - set user environment to previous generation + +# Synopsis + +`nix-env` `--rollback` + +# Description + +This operation switches to the “previous” generation of the active +profile, that is, the highest numbered generation lower than the current +generation, if it exists. It is just a convenience wrapper around +`--list-generations` and `--switch-generation`. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env --rollback +switching from generation 92 to 91 +``` + +```console +$ nix-env --rollback +error: no generation older than the current (91) exists +``` diff --git a/doc/manual/src/command-ref/nix-env/set-flag.md b/doc/manual/src/command-ref/nix-env/set-flag.md new file mode 100644 index 000000000..63f0a0ff9 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/set-flag.md @@ -0,0 +1,82 @@ +# Name + +`nix-env --set-flag` - modify meta attributes of installed packages + +# Synopsis + +`nix-env` `--set-flag` *name* *value* *drvnames* + +# Description + +The `--set-flag` operation allows meta attributes of installed packages +to be modified. There are several attributes that can be usefully +modified, because they affect the behaviour of `nix-env` or the user +environment build script: + + - `priority` can be changed to resolve filename clashes. The user + environment build script uses the `meta.priority` attribute of + derivations to resolve filename collisions between packages. Lower + priority values denote a higher priority. For instance, the GCC + wrapper package and the Binutils package in Nixpkgs both have a file + `bin/ld`, so previously if you tried to install both you would get a + collision. Now, on the other hand, the GCC wrapper declares a higher + priority than Binutils, so the former’s `bin/ld` is symlinked in the + user environment. + + - `keep` can be set to `true` to prevent the package from being + upgraded or replaced. This is useful if you want to hang on to an + older version of a package. + + - `active` can be set to `false` to “disable” the package. That is, no + symlinks will be generated to the files of the package, but it + remains part of the profile (so it won’t be garbage-collected). It + can be set back to `true` to re-enable the package. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Examples + +To prevent the currently installed Firefox from being upgraded: + +```console +$ nix-env --set-flag keep true firefox +``` + +After this, `nix-env -u` will ignore Firefox. + +To disable the currently installed Firefox, then install a new Firefox +while the old remains part of the profile: + +```console +$ nix-env -q +firefox-2.0.0.9 (the current one) + +$ nix-env --preserve-installed -i firefox-2.0.0.11 +installing `firefox-2.0.0.11' +building path(s) `/nix/store/myy0y59q3ig70dgq37jqwg1j0rsapzsl-user-environment' +collision between `/nix/store/...-firefox-2.0.0.11/bin/firefox' + and `/nix/store/...-firefox-2.0.0.9/bin/firefox'. +(i.e., can’t have two active at the same time) + +$ nix-env --set-flag active false firefox +setting flag on `firefox-2.0.0.9' + +$ nix-env --preserve-installed -i firefox-2.0.0.11 +installing `firefox-2.0.0.11' + +$ nix-env -q +firefox-2.0.0.11 (the enabled one) +firefox-2.0.0.9 (the disabled one) +``` + +To make files from `binutils` take precedence over files from `gcc`: + +```console +$ nix-env --set-flag priority 5 binutils +$ nix-env --set-flag priority 10 gcc +``` + diff --git a/doc/manual/src/command-ref/nix-env/set.md b/doc/manual/src/command-ref/nix-env/set.md new file mode 100644 index 000000000..c1cf75739 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/set.md @@ -0,0 +1,30 @@ +# Name + +`nix-env --set` - set profile to contain a specified derivation + +## Synopsis + +`nix-env` `--set` *drvname* + +## Description + +The `--set` operation modifies the current generation of a profile so +that it contains exactly the specified derivation, and nothing else. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +## Examples + +The following updates a profile such that its current generation will +contain just Firefox: + +```console +$ nix-env -p /nix/var/nix/profiles/browser --set firefox +``` + diff --git a/doc/manual/src/command-ref/nix-env/switch-generation.md b/doc/manual/src/command-ref/nix-env/switch-generation.md new file mode 100644 index 000000000..e550325c4 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/switch-generation.md @@ -0,0 +1,33 @@ +# Name + +`nix-env --switch-generation` - set user environment to given profile generation + +# Synopsis + +`nix-env` {`--switch-generation` | `-G`} *generation* + +# Description + +This operation makes generation number *generation* the current +generation of the active profile. That is, if the `profile` is the path +to the active profile, then the symlink `profile` is made to point to +`profile-generation-link`, which is in turn a symlink to the actual user +environment in the Nix store. + +Switching will fail if the specified generation does not exist. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env -G 42 +switching from generation 50 to 42 +``` + diff --git a/doc/manual/src/command-ref/nix-env/switch-profile.md b/doc/manual/src/command-ref/nix-env/switch-profile.md new file mode 100644 index 000000000..b389e4140 --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/switch-profile.md @@ -0,0 +1,26 @@ +# Name + +`nix-env --switch-profile` - set user environment to given profile + +# Synopsis + +`nix-env` {`--switch-profile` | `-S`} *path* + +# Description + +This operation makes *path* the current profile for the user. That is, +the symlink `~/.nix-profile` is made to point to *path*. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env -S ~/my-profile +``` diff --git a/doc/manual/src/command-ref/nix-env/uninstall.md b/doc/manual/src/command-ref/nix-env/uninstall.md new file mode 100644 index 000000000..e9ec8a15e --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/uninstall.md @@ -0,0 +1,28 @@ +# Name + +`nix-env --uninstall` - remove packages from user environment + +# Synopsis + +`nix-env` {`--uninstall` | `-e`} *drvnames…* + +# Description + +The uninstall operation creates a new user environment, based on the +current generation of the active profile, from which the store paths +designated by the symbolic names *drvnames* are removed. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env --uninstall gcc +$ nix-env -e '.*' (remove everything) +``` diff --git a/doc/manual/src/command-ref/nix-env/upgrade.md b/doc/manual/src/command-ref/nix-env/upgrade.md new file mode 100644 index 000000000..f88ffcbee --- /dev/null +++ b/doc/manual/src/command-ref/nix-env/upgrade.md @@ -0,0 +1,141 @@ +# Name + +`nix-env --upgrade` - upgrade packages in user environment + +# Synopsis + +`nix-env` {`--upgrade` | `-u`} *args* + [`--lt` | `--leq` | `--eq` | `--always`] + [{`--prebuilt-only` | `-b`}] + [{`--attr` | `-A`}] + [`--from-expression`] [`-E`] + [`--from-profile` *path*] + [`--preserve-installed` | `-P`] + +# Description + +The upgrade operation creates a new user environment, based on the +current generation of the active profile, in which all store paths are +replaced for which there are newer versions in the set of paths +described by *args*. Paths for which there are no newer versions are +left untouched; this is not an error. It is also not an error if an +element of *args* matches no installed derivations. + +For a description of how *args* is mapped to a set of store paths, see +[`--install`](#operation---install). If *args* describes multiple +store paths with the same symbolic name, only the one with the highest +version is installed. + +# Flags + + - `--lt`\ + Only upgrade a derivation to newer versions. This is the default. + + - `--leq`\ + In addition to upgrading to newer versions, also “upgrade” to + derivations that have the same version. Version are not a unique + identification of a derivation, so there may be many derivations + that have the same version. This flag may be useful to force + “synchronisation” between the installed and available derivations. + + - `--eq`\ + *Only* “upgrade” to derivations that have the same version. This may + not seem very useful, but it actually is, e.g., when there is a new + release of Nixpkgs and you want to replace installed applications + with the same versions built against newer dependencies (to reduce + the number of dependencies floating around on your system). + + - `--always`\ + In addition to upgrading to newer versions, also “upgrade” to + derivations that have the same or a lower version. I.e., derivations + may actually be downgraded depending on what is available in the + active Nix expression. + + - `--prebuilt-only` / `-b`\ + Use only derivations for which a substitute is registered, i.e., + there is a pre-built binary available that can be downloaded in lieu + of building the derivation. Thus, no packages will be built from + source. + + - `--preserve-installed` / `-P`\ + Do not remove derivations with a name matching one of the + derivations being installed. Usually, trying to have two versions of + the same package installed in the same generation of a profile will + lead to an error in building the generation, due to file name + clashes between the two versions. However, this is not the case for + all packages. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ./env-common.md}} + +{{#include ../env-common.md}} + +# Examples + +```console +$ nix-env --upgrade -A nixpkgs.gcc +upgrading `gcc-3.3.1' to `gcc-3.4' +``` + +When there are no updates available, nothing will happen: + +```console +$ nix-env --upgrade -A nixpkgs.pan +``` + +Using `-A` is preferred when possible, as it is faster and unambiguous but +it is also possible to upgrade to a specific version by matching the derivation name: + +```console +$ nix-env -u gcc-3.3.2 --always +upgrading `gcc-3.4' to `gcc-3.3.2' +``` + +To try to upgrade everything +(matching packages based on the part of the derivation name without version): + +```console +$ nix-env -u +upgrading `hello-2.1.2' to `hello-2.1.3' +upgrading `mozilla-1.2' to `mozilla-1.4' +``` + +# Versions + +The upgrade operation determines whether a derivation `y` is an upgrade +of a derivation `x` by looking at their respective `name` attributes. +The names (e.g., `gcc-3.3.1` are split into two parts: the package name +(`gcc`), and the version (`3.3.1`). The version part starts after the +first dash not followed by a letter. `y` is considered an upgrade of `x` +if their package names match, and the version of `y` is higher than that +of `x`. + +The versions are compared by splitting them into contiguous components +of numbers and letters. E.g., `3.3.1pre5` is split into `[3, 3, 1, +"pre", 5]`. These lists are then compared lexicographically (from left +to right). Corresponding components `a` and `b` are compared as follows. +If they are both numbers, integer comparison is used. If `a` is an empty +string and `b` is a number, `a` is considered less than `b`. The special +string component `pre` (for *pre-release*) is considered to be less than +other components. String components are considered less than number +components. Otherwise, they are compared lexicographically (i.e., using +case-sensitive string comparison). + +This is illustrated by the following examples: + + 1.0 < 2.3 + 2.1 < 2.3 + 2.3 = 2.3 + 2.5 > 2.3 + 3.1 > 2.3 + 2.3.1 > 2.3 + 2.3.1 > 2.3a + 2.3pre1 < 2.3 + 2.3pre3 < 2.3pre12 + 2.3a < 2.3c + 2.3pre1 < 2.3c + 2.3pre1 < 2.3q + diff --git a/doc/manual/src/command-ref/nix-hash.md b/doc/manual/src/command-ref/nix-hash.md index 45f67f1c5..37c8facec 100644 --- a/doc/manual/src/command-ref/nix-hash.md +++ b/doc/manual/src/command-ref/nix-hash.md @@ -6,9 +6,7 @@ `nix-hash` [`--flat`] [`--base32`] [`--truncate`] [`--type` *hashAlgo*] *path…* -`nix-hash` `--to-base16` *hash…* - -`nix-hash` `--to-base32` *hash…* +`nix-hash` [`--to-base16`|`--to-base32`|`--to-base64`|`--to-sri`] [`--type` *hashAlgo*] *hash…* # Description @@ -23,7 +21,7 @@ The hash is computed over a *serialisation* of each path: a dump of the file system tree rooted at the path. This allows directories and symlinks to be hashed as well as regular files. The dump is in the *NAR format* produced by [`nix-store ---dump`](nix-store.md#operation---dump). Thus, `nix-hash path` +--dump`](@docroot@/command-ref/nix-store/dump.md). Thus, `nix-hash path` yields the same cryptographic hash as `nix-store --dump path | md5sum`. @@ -35,11 +33,23 @@ md5sum`. The result is identical to that produced by the GNU commands `md5sum` and `sha1sum`. + - `--base16`\ + Print the hash in a hexadecimal representation (default). + - `--base32`\ Print the hash in a base-32 representation rather than hexadecimal. This base-32 representation is more compact and can be used in Nix expressions (such as in calls to `fetchurl`). + - `--base64`\ + Similar to --base32, but print the hash in a base-64 representation, + which is more compact than the base-32 one. + + - `--sri`\ + Print the hash in SRI format with base-64 encoding. + The type of hash algorithm will be prepended to the hash string, + followed by a hyphen (-) and the base-64 hash body. + - `--truncate`\ Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits. @@ -55,6 +65,14 @@ md5sum`. Don’t hash anything, but convert the hexadecimal hash representation *hash* to base-32. + - `--to-base64`\ + Don’t hash anything, but convert the hexadecimal hash representation + *hash* to base-64. + + - `--to-sri`\ + Don’t hash anything, but convert the hexadecimal hash representation + *hash* to SRI. + # Examples Computing the same hash as `nix-prefetch-url`: @@ -81,9 +99,18 @@ $ nix-store --dump test/ | md5sum (for comparison) $ nix-hash --type sha1 test/ e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 +$ nix-hash --type sha1 --base16 test/ +e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 + $ nix-hash --type sha1 --base32 test/ nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 +$ nix-hash --type sha1 --base64 test/ +5P2Lpfe76upazon+ECVVNs1g2rY= + +$ nix-hash --type sha1 --sri test/ +sha1-5P2Lpfe76upazon+ECVVNs1g2rY= + $ nix-hash --type sha256 --flat test/ error: reading file `test/': Is a directory @@ -91,7 +118,7 @@ $ nix-hash --type sha256 --flat test/world 5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03 ``` -Converting between hexadecimal and base-32: +Converting between hexadecimal, base-32, base-64, and SRI: ```console $ nix-hash --type sha1 --to-base32 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 @@ -99,4 +126,13 @@ nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 $ nix-hash --type sha1 --to-base16 nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 + +$ nix-hash --type sha1 --to-base64 e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 +5P2Lpfe76upazon+ECVVNs1g2rY= + +$ nix-hash --type sha1 --to-sri nvd61k9nalji1zl9rrdfmsmvyyjqpzg4 +sha1-5P2Lpfe76upazon+ECVVNs1g2rY= + +$ nix-hash --to-base16 sha1-5P2Lpfe76upazon+ECVVNs1g2rY= +e4fd8ba5f7bbeaea5ace89fe10255536cd60dab6 ``` diff --git a/doc/manual/src/command-ref/nix-instantiate.md b/doc/manual/src/command-ref/nix-instantiate.md index 432fb2608..e55fb2afd 100644 --- a/doc/manual/src/command-ref/nix-instantiate.md +++ b/doc/manual/src/command-ref/nix-instantiate.md @@ -76,7 +76,9 @@ standard input. this option is not enabled, there may be uninstantiated store paths in the final output. - +{{#include ./opt-common.md}} + +{{#include ./env-common.md}} # Examples diff --git a/doc/manual/src/command-ref/nix-shell.md b/doc/manual/src/command-ref/nix-shell.md index 840bccd25..576e5ba0b 100644 --- a/doc/manual/src/command-ref/nix-shell.md +++ b/doc/manual/src/command-ref/nix-shell.md @@ -101,7 +101,7 @@ All options not listed here are passed to `nix-store When a `--pure` shell is started, keep the listed environment variables. -The following common options are supported: +{{#include ./opt-common.md}} # Environment variables @@ -110,6 +110,8 @@ The following common options are supported: `bash` found in ``, falling back to the `bash` found in `PATH` if not found. +{{#include ./env-common.md}} + # Examples To build the dependencies of the package Pan, and start an interactive @@ -118,7 +120,8 @@ shell in which to build it: ```console $ nix-shell '' -A pan [nix-shell]$ eval ${unpackPhase:-unpackPhase} -[nix-shell]$ cd pan-* +[nix-shell]$ cd $sourceRoot +[nix-shell]$ eval ${patchPhase:-patchPhase} [nix-shell]$ eval ${configurePhase:-configurePhase} [nix-shell]$ eval ${buildPhase:-buildPhase} [nix-shell]$ ./pan/gui/pan diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md index 31fdd7806..c7c5fdd2f 100644 --- a/doc/manual/src/command-ref/nix-store.md +++ b/doc/manual/src/command-ref/nix-store.md @@ -13,833 +13,35 @@ The command `nix-store` performs primitive operations on the Nix store. You generally do not need to run this command manually. -`nix-store` takes exactly one *operation* flag which indicates the -subcommand to be performed. These are documented below. - -# Common options - -This section lists the options that are common to all operations. These -options are allowed for every subcommand, though they may not always -have an effect. - - - [`--add-root`](#opt-add-root) *path* - - Causes the result of a realisation (`--realise` and - `--force-realise`) to be registered as a root of the garbage - collector. *path* will be created as a symlink to the resulting - store path. In addition, a uniquely named symlink to *path* will - be created in `/nix/var/nix/gcroots/auto/`. For instance, - - ```console - $ nix-store --add-root /home/eelco/bla/result -r ... - - $ ls -l /nix/var/nix/gcroots/auto - lrwxrwxrwx 1 ... 2005-03-13 21:10 dn54lcypm8f8... -> /home/eelco/bla/result - - $ ls -l /home/eelco/bla/result - lrwxrwxrwx 1 ... 2005-03-13 21:10 /home/eelco/bla/result -> /nix/store/1r11343n6qd4...-f-spot-0.0.10 - ``` - - Thus, when `/home/eelco/bla/result` is removed, the GC root in the - `auto` directory becomes a dangling symlink and will be ignored by - the collector. - - > **Warning** - > - > Note that it is not possible to move or rename GC roots, since - > the symlink in the `auto` directory will still point to the old - > location. - - If there are multiple results, then multiple symlinks will be - created by sequentially numbering symlinks beyond the first one - (e.g., `foo`, `foo-2`, `foo-3`, and so on). - -# Operation `--realise` - -## Synopsis - -`nix-store` {`--realise` | `-r`} *paths…* [`--dry-run`] - -## Description - -The operation `--realise` essentially “builds” the specified store -paths. Realisation is a somewhat overloaded term: - - - If the store path is a *derivation*, realisation ensures that the - output paths of the derivation are [valid] (i.e., - the output path and its closure exist in the file system). This - can be done in several ways. First, it is possible that the - outputs are already valid, in which case we are done - immediately. Otherwise, there may be [substitutes] - that produce the outputs (e.g., by downloading them). Finally, the - outputs can be produced by running the build task described - by the derivation. - - - If the store path is not a derivation, realisation ensures that the - specified path is valid (i.e., it and its closure exist in the file - system). If the path is already valid, we are done immediately. - Otherwise, the path and any missing paths in its closure may be - produced through substitutes. If there are no (successful) - substitutes, realisation fails. - -[valid]: ../glossary.md#gloss-validity -[substitutes]: ../glossary.md#gloss-substitute - -The output path of each derivation is printed on standard output. (For -non-derivations argument, the argument itself is printed.) - -The following flags are available: - - - `--dry-run`\ - Print on standard error a description of what packages would be - built or downloaded, without actually performing the operation. - - - `--ignore-unknown`\ - If a non-derivation path does not have a substitute, then silently - ignore it. - - - `--check`\ - This option allows you to check whether a derivation is - deterministic. It rebuilds the specified derivation and checks - whether the result is bitwise-identical with the existing outputs, - printing an error if that’s not the case. The outputs of the - specified derivation must already exist. When used with `-K`, if an - output path is not identical to the corresponding output from the - previous build, the new output path is left in - `/nix/store/name.check.` - -Special exit codes: - - - `100`\ - Generic build failure, the builder process returned with a non-zero - exit code. - - - `101`\ - Build timeout, the build was aborted because it did not complete - within the specified `timeout`. - - - `102`\ - Hash mismatch, the build output was rejected because it does not - match the [`outputHash` attribute of the - derivation](../language/advanced-attributes.md). - - - `104`\ - Not deterministic, the build succeeded in check mode but the - resulting output is not binary reproducible. - -With the `--keep-going` flag it's possible for multiple failures to -occur, in this case the 1xx status codes are or combined using binary -or. - - 1100100 - ^^^^ - |||`- timeout - ||`-- output hash mismatch - |`--- build failure - `---- not deterministic - -## Examples - -This operation is typically used to build [store derivation]s produced by -[`nix-instantiate`](./nix-instantiate.md): - -[store derivation]: ../glossary.md#gloss-store-derivation - -```console -$ nix-store -r $(nix-instantiate ./test.nix) -/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1 -``` - -This is essentially what [`nix-build`](nix-build.md) does. - -To test whether a previously-built derivation is deterministic: - -```console -$ nix-build '' -A hello --check -K -``` - -Use [`--read-log`](#operation---read-log) to show the stderr and stdout of a build: - -```console -$ nix-store --read-log $(nix-instantiate ./test.nix) -``` - -# Operation `--serve` - -## Synopsis - -`nix-store` `--serve` [`--write`] - -## Description - -The operation `--serve` provides access to the Nix store over stdin and -stdout, and is intended to be used as a means of providing Nix store -access to a restricted ssh user. - -The following flags are available: - - - `--write`\ - Allow the connected client to request the realization of - derivations. In effect, this can be used to make the host act as a - remote builder. - -## Examples - -To turn a host into a build server, the `authorized_keys` file can be -used to provide build access to a given SSH public key: - -```console -$ cat <>/root/.ssh/authorized_keys -command="nice -n20 nix-store --serve --write" ssh-rsa AAAAB3NzaC1yc2EAAAA... -EOF -``` - -# Operation `--gc` - -## Synopsis - -`nix-store` `--gc` [`--print-roots` | `--print-live` | `--print-dead`] [`--max-freed` *bytes*] - -## Description - -Without additional flags, the operation `--gc` performs a garbage -collection on the Nix store. That is, all paths in the Nix store not -reachable via file system references from a set of “roots”, are deleted. - -The following suboperations may be specified: - - - `--print-roots`\ - This operation prints on standard output the set of roots used by - the garbage collector. - - - `--print-live`\ - This operation prints on standard output the set of “live” store - paths, which are all the store paths reachable from the roots. Live - paths should never be deleted, since that would break consistency — - it would become possible that applications are installed that - reference things that are no longer present in the store. - - - `--print-dead`\ - This operation prints out on standard output the set of “dead” store - paths, which is just the opposite of the set of live paths: any path - in the store that is not live (with respect to the roots) is dead. - -By default, all unreachable paths are deleted. The following options -control what gets deleted and in what order: - - - `--max-freed` *bytes*\ - Keep deleting paths until at least *bytes* bytes have been deleted, - then stop. The argument *bytes* can be followed by the - multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB - or TiB units. - -The behaviour of the collector is also influenced by the -`keep-outputs` and `keep-derivations` settings in the Nix -configuration file. - -By default, the collector prints the total number of freed bytes when it -finishes (or when it is interrupted). With `--print-dead`, it prints the -number of bytes that would be freed. - -## Examples - -To delete all unreachable paths, just do: - -```console -$ nix-store --gc -deleting `/nix/store/kq82idx6g0nyzsp2s14gfsc38npai7lf-cairo-1.0.4.tar.gz.drv' -... -8825586 bytes freed (8.42 MiB) -``` - -To delete at least 100 MiBs of unreachable paths: - -```console -$ nix-store --gc --max-freed $((100 * 1024 * 1024)) -``` - -# Operation `--delete` - -## Synopsis - -`nix-store` `--delete` [`--ignore-liveness`] *paths…* - -## Description - -The operation `--delete` deletes the store paths *paths* from the Nix -store, but only if it is safe to do so; that is, when the path is not -reachable from a root of the garbage collector. This means that you can -only delete paths that would also be deleted by `nix-store --gc`. Thus, -`--delete` is a more targeted version of `--gc`. - -With the option `--ignore-liveness`, reachability from the roots is -ignored. However, the path still won’t be deleted if there are other -paths in the store that refer to it (i.e., depend on it). - -## Example - -```console -$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4 -0 bytes freed (0.00 MiB) -error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive -``` - -# Operation `--query` - -## Synopsis - -`nix-store` {`--query` | `-q`} - {`--outputs` | `--requisites` | `-R` | `--references` | - `--referrers` | `--referrers-closure` | `--deriver` | `-d` | - `--graph` | `--tree` | `--binding` *name* | `-b` *name* | `--hash` | - `--size` | `--roots`} - [`--use-output`] [`-u`] [`--force-realise`] [`-f`] - *paths…* - -## Description - -The operation `--query` displays information about [store path]s. -The queries are described below. At most one query can be -specified. The default query is `--outputs`. - -The paths *paths* may also be symlinks from outside of the Nix store, to -the Nix store. In that case, the query is applied to the target of the -symlink. - -## Common query options - - - `--use-output`; `-u`\ - For each argument to the query that is a [store derivation], apply the - query to the output path of the derivation instead. - - - `--force-realise`; `-f`\ - Realise each argument to the query first (see [`nix-store - --realise`](#operation---realise)). - -## Queries - - - `--outputs`\ - Prints out the [output path]s of the store - derivations *paths*. These are the paths that will be produced when - the derivation is built. - - - `--requisites`; `-R`\ - Prints out the [closure] of the given *paths*. - - This query has one option: - - - `--include-outputs` - Also include the existing output paths of [store derivation]s, - and their closures. - - This query can be used to implement various kinds of deployment. A - *source deployment* is obtained by distributing the closure of a - store derivation. A *binary deployment* is obtained by distributing - the closure of an output path. A *cache deployment* (combined - source/binary deployment, including binaries of build-time-only - dependencies) is obtained by distributing the closure of a store - derivation and specifying the option `--include-outputs`. - - - `--references`\ - Prints the set of [references]s of the store paths - *paths*, that is, their immediate dependencies. (For *all* - dependencies, use `--requisites`.) - - [reference]: ../glossary.md#gloss-reference - - - `--referrers`\ - Prints the set of *referrers* of the store paths *paths*, that is, - the store paths currently existing in the Nix store that refer to - one of *paths*. Note that contrary to the references, the set of - referrers is not constant; it can change as store paths are added or - removed. - - - `--referrers-closure`\ - Prints the closure of the set of store paths *paths* under the - referrers relation; that is, all store paths that directly or - indirectly refer to one of *paths*. These are all the path currently - in the Nix store that are dependent on *paths*. - - - `--deriver`; `-d`\ - Prints the [deriver] of the store paths *paths*. If - the path has no deriver (e.g., if it is a source file), or if the - deriver is not known (e.g., in the case of a binary-only - deployment), the string `unknown-deriver` is printed. - - [deriver]: ../glossary.md#gloss-deriver - - - `--graph`\ - Prints the references graph of the store paths *paths* in the format - of the `dot` tool of AT\&T's [Graphviz - package](http://www.graphviz.org/). This can be used to visualise - dependency graphs. To obtain a build-time dependency graph, apply - this to a store derivation. To obtain a runtime dependency graph, - apply it to an output path. - - - `--tree`\ - Prints the references graph of the store paths *paths* as a nested - ASCII tree. References are ordered by descending closure size; this - tends to flatten the tree, making it more readable. The query only - recurses into a store path when it is first encountered; this - prevents a blowup of the tree representation of the graph. - - - `--graphml`\ - Prints the references graph of the store paths *paths* in the - [GraphML](http://graphml.graphdrawing.org/) file format. This can be - used to visualise dependency graphs. To obtain a build-time - dependency graph, apply this to a [store derivation]. To obtain a - runtime dependency graph, apply it to an output path. - - - `--binding` *name*; `-b` *name*\ - Prints the value of the attribute *name* (i.e., environment - variable) of the [store derivation]s *paths*. It is an error for a - derivation to not have the specified attribute. - - - `--hash`\ - Prints the SHA-256 hash of the contents of the store paths *paths* - (that is, the hash of the output of `nix-store --dump` on the given - paths). Since the hash is stored in the Nix database, this is a fast - operation. - - - `--size`\ - Prints the size in bytes of the contents of the store paths *paths* - — to be precise, the size of the output of `nix-store --dump` on - the given paths. Note that the actual disk space required by the - store paths may be higher, especially on filesystems with large - cluster sizes. - - - `--roots`\ - Prints the garbage collector roots that point, directly or - indirectly, at the store paths *paths*. - -## Examples - -Print the closure (runtime dependencies) of the `svn` program in the -current user environment: - -```console -$ nix-store -qR $(which svn) -/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4 -/nix/store/9lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 -... -``` - -Print the build-time dependencies of `svn`: - -```console -$ nix-store -qR $(nix-store -qd $(which svn)) -/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv -/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh -/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv -... lots of other paths ... -``` - -The difference with the previous example is that we ask the closure of -the derivation (`-qd`), not the closure of the output path that contains -`svn`. - -Show the build-time dependencies as a tree: - -```console -$ nix-store -q --tree $(nix-store -qd $(which svn)) -/nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv -+---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh -+---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv -| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash -| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh -... -``` - -Show all paths that depend on the same OpenSSL library as `svn`: - -```console -$ nix-store -q --referrers $(nix-store -q --binding openssl $(nix-store -qd $(which svn))) -/nix/store/23ny9l9wixx21632y2wi4p585qhva1q8-sylpheed-1.0.0 -/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4 -/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3 -/nix/store/l51240xqsgg8a7yrbqdx1rfzyv6l26fx-lynx-2.8.5 -``` - -Show all paths that directly or indirectly depend on the Glibc (C -library) used by `svn`: - -```console -$ nix-store -q --referrers-closure $(ldd $(which svn) | grep /libc.so | awk '{print $3}') -/nix/store/034a6h4vpz9kds5r6kzb9lhh81mscw43-libgnomeprintui-2.8.2 -/nix/store/15l3yi0d45prm7a82pcrknxdh6nzmxza-gawk-3.1.4 -... -``` - -Note that `ldd` is a command that prints out the dynamic libraries used -by an ELF executable. - -Make a picture of the runtime dependency graph of the current user -environment: - -```console -$ nix-store -q --graph ~/.nix-profile | dot -Tps > graph.ps -$ gv graph.ps -``` - -Show every garbage collector root that points to a store path that -depends on `svn`: - -```console -$ nix-store -q --roots $(which svn) -/nix/var/nix/profiles/default-81-link -/nix/var/nix/profiles/default-82-link -/nix/var/nix/profiles/per-user/eelco/profile-97-link -``` - -# Operation `--add` - -## Synopsis - -`nix-store` `--add` *paths…* - -## Description - -The operation `--add` adds the specified paths to the Nix store. It -prints the resulting paths in the Nix store on standard output. - -## Example - -```console -$ nix-store --add ./foo.c -/nix/store/m7lrha58ph6rcnv109yzx1nk1cj7k7zf-foo.c -``` - -# Operation `--add-fixed` - -## Synopsis - -`nix-store` `--add-fixed` [`--recursive`] *algorithm* *paths…* - -## Description - -The operation `--add-fixed` adds the specified paths to the Nix store. -Unlike `--add` paths are registered using the specified hashing -algorithm, resulting in the same output path as a fixed-output -derivation. This can be used for sources that are not available from a -public url or broke since the download expression was written. - -This operation has the following options: - - - `--recursive`\ - Use recursive instead of flat hashing mode, used when adding - directories to the store. - -## Example - -```console -$ nix-store --add-fixed sha256 ./hello-2.10.tar.gz -/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz -``` - -# Operation `--verify` - -## Synopsis - -`nix-store` `--verify` [`--check-contents`] [`--repair`] - -## Description - -The operation `--verify` verifies the internal consistency of the Nix -database, and the consistency between the Nix database and the Nix -store. Any inconsistencies encountered are automatically repaired. -Inconsistencies are generally the result of the Nix store or database -being modified by non-Nix tools, or of bugs in Nix itself. - -This operation has the following options: - - - `--check-contents`\ - Checks that the contents of every valid store path has not been - altered by computing a SHA-256 hash of the contents and comparing it - with the hash stored in the Nix database at build time. Paths that - have been modified are printed out. For large stores, - `--check-contents` is obviously quite slow. - - - `--repair`\ - If any valid path is missing from the store, or (if - `--check-contents` is given) the contents of a valid path has been - modified, then try to repair the path by redownloading it. See - `nix-store --repair-path` for details. - -# Operation `--verify-path` - -## Synopsis - -`nix-store` `--verify-path` *paths…* - -## Description - -The operation `--verify-path` compares the contents of the given store -paths to their cryptographic hashes stored in Nix’s database. For every -changed path, it prints a warning message. The exit status is 0 if no -path has changed, and 1 otherwise. - -## Example - -To verify the integrity of the `svn` command and all its dependencies: - -```console -$ nix-store --verify-path $(nix-store -qR $(which svn)) -``` - -# Operation `--repair-path` - -## Synopsis - -`nix-store` `--repair-path` *paths…* - -## Description - -The operation `--repair-path` attempts to “repair” the specified paths -by redownloading them using the available substituters. If no -substitutes are available, then repair is not possible. - -> **Warning** -> -> During repair, there is a very small time window during which the old -> path (if it exists) is moved out of the way and replaced with the new -> path. If repair is interrupted in between, then the system may be left -> in a broken state (e.g., if the path contains a critical system -> component like the GNU C Library). - -## Example - -```console -$ nix-store --verify-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 -path `/nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13' was modified! - expected hash `2db57715ae90b7e31ff1f2ecb8c12ec1cc43da920efcbe3b22763f36a1861588', - got `481c5aa5483ebc97c20457bb8bca24deea56550d3985cda0027f67fe54b808e4' - -$ nix-store --repair-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 -fetching path `/nix/store/d7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13'... -… -``` - -# Operation `--dump` - -## Synopsis - -`nix-store` `--dump` *path* - -## Description - -The operation `--dump` produces a NAR (Nix ARchive) file containing the -contents of the file system tree rooted at *path*. The archive is -written to standard output. - -A NAR archive is like a TAR or Zip archive, but it contains only the -information that Nix considers important. For instance, timestamps are -elided because all files in the Nix store have their timestamp set to 1 -anyway. Likewise, all permissions are left out except for the execute -bit, because all files in the Nix store have 444 or 555 permission. - -Also, a NAR archive is *canonical*, meaning that “equal” paths always -produce the same NAR archive. For instance, directory entries are -always sorted so that the actual on-disk order doesn’t influence the -result. This means that the cryptographic hash of a NAR dump of a -path is usable as a fingerprint of the contents of the path. Indeed, -the hashes of store paths stored in Nix’s database (see `nix-store -q ---hash`) are SHA-256 hashes of the NAR dump of each store path. - -NAR archives support filenames of unlimited length and 64-bit file -sizes. They can contain regular files, directories, and symbolic links, -but not other types of files (such as device nodes). - -A Nix archive can be unpacked using `nix-store ---restore`. - -# Operation `--restore` - -## Synopsis - -`nix-store` `--restore` *path* - -## Description - -The operation `--restore` unpacks a NAR archive to *path*, which must -not already exist. The archive is read from standard input. - -# Operation `--export` - -## Synopsis - -`nix-store` `--export` *paths…* - -## Description - -The operation `--export` writes a serialisation of the specified store -paths to standard output in a format that can be imported into another -Nix store with `nix-store --import`. This is like `nix-store ---dump`, except that the NAR archive produced by that command doesn’t -contain the necessary meta-information to allow it to be imported into -another Nix store (namely, the set of references of the path). - -This command does not produce a *closure* of the specified paths, so if -a store path references other store paths that are missing in the target -Nix store, the import will fail. To copy a whole closure, do something -like: - -```console -$ nix-store --export $(nix-store -qR paths) > out -``` - -To import the whole closure again, run: - -```console -$ nix-store --import < out -``` - -# Operation `--import` - -## Synopsis - -`nix-store` `--import` - -## Description - -The operation `--import` reads a serialisation of a set of store paths -produced by `nix-store --export` from standard input and adds those -store paths to the Nix store. Paths that already exist in the Nix store -are ignored. If a path refers to another path that doesn’t exist in the -Nix store, the import fails. - -# Operation `--optimise` - -## Synopsis - -`nix-store` `--optimise` - -## Description - -The operation `--optimise` reduces Nix store disk space usage by finding -identical files in the store and hard-linking them to each other. It -typically reduces the size of the store by something like 25-35%. Only -regular files and symlinks are hard-linked in this manner. Files are -considered identical when they have the same NAR archive serialisation: -that is, regular files must have the same contents and permission -(executable or non-executable), and symlinks must have the same -contents. - -After completion, or when the command is interrupted, a report on the -achieved savings is printed on standard error. - -Use `-vv` or `-vvv` to get some progress indication. - -## Example - -```console -$ nix-store --optimise -hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1' -... -541838819 bytes (516.74 MiB) freed by hard-linking 54143 files; -there are 114486 files with equal contents out of 215894 files in total -``` - -# Operation `--read-log` - -## Synopsis - -`nix-store` {`--read-log` | `-l`} *paths…* - -## Description - -The operation `--read-log` prints the build log of the specified store -paths on standard output. The build log is whatever the builder of a -derivation wrote to standard output and standard error. If a store path -is not a derivation, the deriver of the store path is used. - -Build logs are kept in `/nix/var/log/nix/drvs`. However, there is no -guarantee that a build log is available for any particular store path. -For instance, if the path was downloaded as a pre-built binary through a -substitute, then the log is unavailable. - -## Example - -```console -$ nix-store -l $(which ktorrent) -building /nix/store/dhc73pvzpnzxhdgpimsd9sw39di66ph1-ktorrent-2.2.1 -unpacking sources -unpacking source archive /nix/store/p8n1jpqs27mgkjw07pb5269717nzf5f8-ktorrent-2.2.1.tar.gz -ktorrent-2.2.1/ -ktorrent-2.2.1/NEWS -... -``` - -# Operation `--dump-db` - -## Synopsis - -`nix-store` `--dump-db` [*paths…*] - -## Description - -The operation `--dump-db` writes a dump of the Nix database to standard -output. It can be loaded into an empty Nix store using `--load-db`. This -is useful for making backups and when migrating to different database -schemas. - -By default, `--dump-db` will dump the entire Nix database. When one or -more store paths is passed, only the subset of the Nix database for -those store paths is dumped. As with `--export`, the user is responsible -for passing all the store paths for a closure. See `--export` for an -example. - -# Operation `--load-db` - -## Synopsis - -`nix-store` `--load-db` - -## Description - -The operation `--load-db` reads a dump of the Nix database created by -`--dump-db` from standard input and loads it into the Nix database. - -# Operation `--print-env` - -## Synopsis - -`nix-store` `--print-env` *drvpath* - -## Description - -The operation `--print-env` prints out the environment of a derivation -in a format that can be evaluated by a shell. The command line arguments -of the builder are placed in the variable `_args`. - -## Example - -```console -$ nix-store --print-env $(nix-instantiate '' -A firefox) -… -export src; src='/nix/store/plpj7qrwcz94z2psh6fchsi7s8yihc7k-firefox-12.0.source.tar.bz2' -export stdenv; stdenv='/nix/store/7c8asx3yfrg5dg1gzhzyq2236zfgibnn-stdenv' -export system; system='x86_64-linux' -export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-builder.sh' -``` - -# Operation `--generate-binary-cache-key` - -## Synopsis - -`nix-store` `--generate-binary-cache-key` *key-name* *secret-key-file* *public-key-file* - -## Description - -This command generates an [Ed25519 key pair](http://ed25519.cr.yp.to/) -that can be used to create a signed binary cache. It takes three -mandatory parameters: - -1. A key name, such as `cache.example.org-1`, that is used to look up - keys on the client when it verifies signatures. It can be anything, - but it’s suggested to use the host name of your cache (e.g. - `cache.example.org`) with a suffix denoting the number of the key - (to be incremented every time you need to revoke a key). - -2. The file name where the secret key is to be stored. - -3. The file name where the public key is to be stored. +`nix-store` takes exactly one *operation* flag which indicates the subcommand to be performed. The following operations are available: + +- [`--realise`](./nix-store/realise.md) +- [`--serve`](./nix-store/serve.md) +- [`--gc`](./nix-store/gc.md) +- [`--delete`](./nix-store/delete.md) +- [`--query`](./nix-store/query.md) +- [`--add`](./nix-store/add.md) +- [`--add-fixed`](./nix-store/add-fixed.md) +- [`--verify`](./nix-store/verify.md) +- [`--verify-path`](./nix-store/verify-path.md) +- [`--repair-path`](./nix-store/repair-path.md) +- [`--dump`](./nix-store/dump.md) +- [`--restore`](./nix-store/restore.md) +- [`--export`](./nix-store/export.md) +- [`--import`](./nix-store/import.md) +- [`--optimise`](./nix-store/optimise.md) +- [`--read-log`](./nix-store/read-log.md) +- [`--dump-db`](./nix-store/dump-db.md) +- [`--load-db`](./nix-store/load-db.md) +- [`--print-env`](./nix-store/print-env.md) +- [`--generate-binary-cache-key`](./nix-store/generate-binary-cache-key.md) + +These pages can be viewed offline: + +- `man nix-store-`. + + Example: `man nix-store-realise` + +- `nix-store --help --` + + Example: `nix-store --help --realise` diff --git a/doc/manual/src/command-ref/nix-store/add-fixed.md b/doc/manual/src/command-ref/nix-store/add-fixed.md new file mode 100644 index 000000000..d25db091c --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/add-fixed.md @@ -0,0 +1,35 @@ +# Name + +`nix-store --add-fixed` - add paths to store using given hashing algorithm + +## Synopsis + +`nix-store` `--add-fixed` [`--recursive`] *algorithm* *paths…* + +## Description + +The operation `--add-fixed` adds the specified paths to the Nix store. +Unlike `--add` paths are registered using the specified hashing +algorithm, resulting in the same output path as a fixed-output +derivation. This can be used for sources that are not available from a +public url or broke since the download expression was written. + +This operation has the following options: + + - `--recursive`\ + Use recursive instead of flat hashing mode, used when adding + directories to the store. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +## Example + +```console +$ nix-store --add-fixed sha256 ./hello-2.10.tar.gz +/nix/store/3x7dwzq014bblazs7kq20p9hyzz0qh8g-hello-2.10.tar.gz +``` + diff --git a/doc/manual/src/command-ref/nix-store/add.md b/doc/manual/src/command-ref/nix-store/add.md new file mode 100644 index 000000000..87d504cd3 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/add.md @@ -0,0 +1,25 @@ +# Name + +`nix-store --add` - add paths to Nix store + +# Synopsis + +`nix-store` `--add` *paths…* + +# Description + +The operation `--add` adds the specified paths to the Nix store. It +prints the resulting paths in the Nix store on standard output. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Example + +```console +$ nix-store --add ./foo.c +/nix/store/m7lrha58ph6rcnv109yzx1nk1cj7k7zf-foo.c +``` diff --git a/doc/manual/src/command-ref/nix-store/delete.md b/doc/manual/src/command-ref/nix-store/delete.md new file mode 100644 index 000000000..550c5ea29 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/delete.md @@ -0,0 +1,33 @@ +# Name + +`nix-store --delete` - delete store paths + +# Synopsis + +`nix-store` `--delete` [`--ignore-liveness`] *paths…* + +# Description + +The operation `--delete` deletes the store paths *paths* from the Nix +store, but only if it is safe to do so; that is, when the path is not +reachable from a root of the garbage collector. This means that you can +only delete paths that would also be deleted by `nix-store --gc`. Thus, +`--delete` is a more targeted version of `--gc`. + +With the option `--ignore-liveness`, reachability from the roots is +ignored. However, the path still won’t be deleted if there are other +paths in the store that refer to it (i.e., depend on it). + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Example + +```console +$ nix-store --delete /nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4 +0 bytes freed (0.00 MiB) +error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4' since it is still alive +``` diff --git a/doc/manual/src/command-ref/nix-store/dump-db.md b/doc/manual/src/command-ref/nix-store/dump-db.md new file mode 100644 index 000000000..b2c77ced0 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/dump-db.md @@ -0,0 +1,26 @@ +# Name + +`nix-store --dump-db` - export Nix database + +# Synopsis + +`nix-store` `--dump-db` [*paths…*] + +# Description + +The operation `--dump-db` writes a dump of the Nix database to standard +output. It can be loaded into an empty Nix store using `--load-db`. This +is useful for making backups and when migrating to different database +schemas. + +By default, `--dump-db` will dump the entire Nix database. When one or +more store paths is passed, only the subset of the Nix database for +those store paths is dumped. As with `--export`, the user is responsible +for passing all the store paths for a closure. See `--export` for an +example. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/dump.md b/doc/manual/src/command-ref/nix-store/dump.md new file mode 100644 index 000000000..62656d599 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/dump.md @@ -0,0 +1,40 @@ +# Name + +`nix-store --dump` - write a single path to a Nix Archive + +## Synopsis + +`nix-store` `--dump` *path* + +## Description + +The operation `--dump` produces a NAR (Nix ARchive) file containing the +contents of the file system tree rooted at *path*. The archive is +written to standard output. + +A NAR archive is like a TAR or Zip archive, but it contains only the +information that Nix considers important. For instance, timestamps are +elided because all files in the Nix store have their timestamp set to 0 +anyway. Likewise, all permissions are left out except for the execute +bit, because all files in the Nix store have 444 or 555 permission. + +Also, a NAR archive is *canonical*, meaning that “equal” paths always +produce the same NAR archive. For instance, directory entries are +always sorted so that the actual on-disk order doesn’t influence the +result. This means that the cryptographic hash of a NAR dump of a +path is usable as a fingerprint of the contents of the path. Indeed, +the hashes of store paths stored in Nix’s database (see `nix-store -q +--hash`) are SHA-256 hashes of the NAR dump of each store path. + +NAR archives support filenames of unlimited length and 64-bit file +sizes. They can contain regular files, directories, and symbolic links, +but not other types of files (such as device nodes). + +A Nix archive can be unpacked using `nix-store +--restore`. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/export.md b/doc/manual/src/command-ref/nix-store/export.md new file mode 100644 index 000000000..aeea38636 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/export.md @@ -0,0 +1,41 @@ +# Name + +`nix-store --export` - export store paths to a Nix Archive + +## Synopsis + +`nix-store` `--export` *paths…* + +## Description + +The operation `--export` writes a serialisation of the specified store +paths to standard output in a format that can be imported into another +Nix store with `nix-store --import`. This is like `nix-store +--dump`, except that the NAR archive produced by that command doesn’t +contain the necessary meta-information to allow it to be imported into +another Nix store (namely, the set of references of the path). + +This command does not produce a *closure* of the specified paths, so if +a store path references other store paths that are missing in the target +Nix store, the import will fail. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Examples + +To copy a whole closure, do something +like: + +```console +$ nix-store --export $(nix-store -qR paths) > out +``` + +To import the whole closure again, run: + +```console +$ nix-store --import < out +``` diff --git a/doc/manual/src/command-ref/nix-store/gc.md b/doc/manual/src/command-ref/nix-store/gc.md new file mode 100644 index 000000000..7be0d559a --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/gc.md @@ -0,0 +1,72 @@ +# Name + +`nix-store --gc` - run garbage collection + +# Synopsis + +`nix-store` `--gc` [`--print-roots` | `--print-live` | `--print-dead`] [`--max-freed` *bytes*] + +# Description + +Without additional flags, the operation `--gc` performs a garbage +collection on the Nix store. That is, all paths in the Nix store not +reachable via file system references from a set of “roots”, are deleted. + +The following suboperations may be specified: + + - `--print-roots`\ + This operation prints on standard output the set of roots used by + the garbage collector. + + - `--print-live`\ + This operation prints on standard output the set of “live” store + paths, which are all the store paths reachable from the roots. Live + paths should never be deleted, since that would break consistency — + it would become possible that applications are installed that + reference things that are no longer present in the store. + + - `--print-dead`\ + This operation prints out on standard output the set of “dead” store + paths, which is just the opposite of the set of live paths: any path + in the store that is not live (with respect to the roots) is dead. + +By default, all unreachable paths are deleted. The following options +control what gets deleted and in what order: + + - `--max-freed` *bytes*\ + Keep deleting paths until at least *bytes* bytes have been deleted, + then stop. The argument *bytes* can be followed by the + multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB + or TiB units. + +The behaviour of the collector is also influenced by the +`keep-outputs` and `keep-derivations` settings in the Nix +configuration file. + +By default, the collector prints the total number of freed bytes when it +finishes (or when it is interrupted). With `--print-dead`, it prints the +number of bytes that would be freed. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Examples + +To delete all unreachable paths, just do: + +```console +$ nix-store --gc +deleting `/nix/store/kq82idx6g0nyzsp2s14gfsc38npai7lf-cairo-1.0.4.tar.gz.drv' +... +8825586 bytes freed (8.42 MiB) +``` + +To delete at least 100 MiBs of unreachable paths: + +```console +$ nix-store --gc --max-freed $((100 * 1024 * 1024)) +``` + diff --git a/doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md b/doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md new file mode 100644 index 000000000..8085d877b --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/generate-binary-cache-key.md @@ -0,0 +1,29 @@ +# Name + +`nix-store --generate-binary-cache-key` - generate key pair to use for a binary cache + +## Synopsis + +`nix-store` `--generate-binary-cache-key` *key-name* *secret-key-file* *public-key-file* + +## Description + +This command generates an [Ed25519 key pair](http://ed25519.cr.yp.to/) +that can be used to create a signed binary cache. It takes three +mandatory parameters: + +1. A key name, such as `cache.example.org-1`, that is used to look up + keys on the client when it verifies signatures. It can be anything, + but it’s suggested to use the host name of your cache (e.g. + `cache.example.org`) with a suffix denoting the number of the key + (to be incremented every time you need to revoke a key). + +2. The file name where the secret key is to be stored. + +3. The file name where the public key is to be stored. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/import.md b/doc/manual/src/command-ref/nix-store/import.md new file mode 100644 index 000000000..2711316a7 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/import.md @@ -0,0 +1,21 @@ +# Name + +`nix-store --import` - import Nix Archive into the store + +# Synopsis + +`nix-store` `--import` + +# Description + +The operation `--import` reads a serialisation of a set of store paths +produced by `nix-store --export` from standard input and adds those +store paths to the Nix store. Paths that already exist in the Nix store +are ignored. If a path refers to another path that doesn’t exist in the +Nix store, the import fails. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/load-db.md b/doc/manual/src/command-ref/nix-store/load-db.md new file mode 100644 index 000000000..e2f438ed6 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/load-db.md @@ -0,0 +1,18 @@ +# Name + +`nix-store --load-db` - import Nix database + +# Synopsis + +`nix-store` `--load-db` + +# Description + +The operation `--load-db` reads a dump of the Nix database created by +`--dump-db` from standard input and loads it into the Nix database. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/opt-common.md b/doc/manual/src/command-ref/nix-store/opt-common.md new file mode 100644 index 000000000..bf6566555 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/opt-common.md @@ -0,0 +1,36 @@ +# Options + +The following options are allowed for all `nix-store` operations, but may not always have an effect. + +- [`--add-root`](#opt-add-root) *path* + + Causes the result of a realisation (`--realise` and + `--force-realise`) to be registered as a root of the garbage + collector. *path* will be created as a symlink to the resulting + store path. In addition, a uniquely named symlink to *path* will + be created in `/nix/var/nix/gcroots/auto/`. For instance, + + ```console + $ nix-store --add-root /home/eelco/bla/result -r ... + + $ ls -l /nix/var/nix/gcroots/auto + lrwxrwxrwx 1 ... 2005-03-13 21:10 dn54lcypm8f8... -> /home/eelco/bla/result + + $ ls -l /home/eelco/bla/result + lrwxrwxrwx 1 ... 2005-03-13 21:10 /home/eelco/bla/result -> /nix/store/1r11343n6qd4...-f-spot-0.0.10 + ``` + + Thus, when `/home/eelco/bla/result` is removed, the GC root in the + `auto` directory becomes a dangling symlink and will be ignored by + the collector. + + > **Warning** + > + > Note that it is not possible to move or rename GC roots, since + > the symlink in the `auto` directory will still point to the old + > location. + + If there are multiple results, then multiple symlinks will be + created by sequentially numbering symlinks beyond the first one + (e.g., `foo`, `foo-2`, `foo-3`, and so on). + diff --git a/doc/manual/src/command-ref/nix-store/optimise.md b/doc/manual/src/command-ref/nix-store/optimise.md new file mode 100644 index 000000000..dc392aeb8 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/optimise.md @@ -0,0 +1,40 @@ +# Name + +`nix-store --optimise` - reduce disk space usage + +## Synopsis + +`nix-store` `--optimise` + +## Description + +The operation `--optimise` reduces Nix store disk space usage by finding +identical files in the store and hard-linking them to each other. It +typically reduces the size of the store by something like 25-35%. Only +regular files and symlinks are hard-linked in this manner. Files are +considered identical when they have the same NAR archive serialisation: +that is, regular files must have the same contents and permission +(executable or non-executable), and symlinks must have the same +contents. + +After completion, or when the command is interrupted, a report on the +achieved savings is printed on standard error. + +Use `-vv` or `-vvv` to get some progress indication. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +## Example + +```console +$ nix-store --optimise +hashing files in `/nix/store/qhqx7l2f1kmwihc9bnxs7rc159hsxnf3-gcc-4.1.1' +... +541838819 bytes (516.74 MiB) freed by hard-linking 54143 files; +there are 114486 files with equal contents out of 215894 files in total +``` + diff --git a/doc/manual/src/command-ref/nix-store/print-env.md b/doc/manual/src/command-ref/nix-store/print-env.md new file mode 100644 index 000000000..bd2084ef6 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/print-env.md @@ -0,0 +1,31 @@ +# Name + +`nix-store --print-env` - print the build environment of a derivation + +## Synopsis + +`nix-store` `--print-env` *drvpath* + +## Description + +The operation `--print-env` prints out the environment of a derivation +in a format that can be evaluated by a shell. The command line arguments +of the builder are placed in the variable `_args`. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +## Example + +```console +$ nix-store --print-env $(nix-instantiate '' -A firefox) +… +export src; src='/nix/store/plpj7qrwcz94z2psh6fchsi7s8yihc7k-firefox-12.0.source.tar.bz2' +export stdenv; stdenv='/nix/store/7c8asx3yfrg5dg1gzhzyq2236zfgibnn-stdenv' +export system; system='x86_64-linux' +export _args; _args='-e /nix/store/9krlzvny65gdc8s7kpb6lkx8cd02c25c-default-builder.sh' +``` + diff --git a/doc/manual/src/command-ref/nix-store/query.md b/doc/manual/src/command-ref/nix-store/query.md new file mode 100644 index 000000000..9f7dbd3e8 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/query.md @@ -0,0 +1,220 @@ +# Name + +`nix-store --query` - display information about store paths + +# Synopsis + +`nix-store` {`--query` | `-q`} + {`--outputs` | `--requisites` | `-R` | `--references` | + `--referrers` | `--referrers-closure` | `--deriver` | `-d` | + `--graph` | `--tree` | `--binding` *name* | `-b` *name* | `--hash` | + `--size` | `--roots`} + [`--use-output`] [`-u`] [`--force-realise`] [`-f`] + *paths…* + +# Description + +The operation `--query` displays various bits of information about the +store paths . The queries are described below. At most one query can be +specified. The default query is `--outputs`. + +The paths *paths* may also be symlinks from outside of the Nix store, to +the Nix store. In that case, the query is applied to the target of the +symlink. + +# Common query options + + - `--use-output`; `-u`\ + For each argument to the query that is a [store derivation], apply the + query to the output path of the derivation instead. + + - `--force-realise`; `-f`\ + Realise each argument to the query first (see [`nix-store --realise`](./realise.md)). + +[store derivation]: @docroot@/glossary.md#gloss-store-derivation + +# Queries + + - `--outputs`\ + Prints out the [output paths] of the store + derivations *paths*. These are the paths that will be produced when + the derivation is built. + + [output paths]: ../../glossary.md#gloss-output-path + + - `--requisites`; `-R`\ + Prints out the [closure] of the store path *paths*. + + [closure]: ../../glossary.md#gloss-closure + + This query has one option: + + - `--include-outputs` + Also include the existing output paths of [store derivation]s, + and their closures. + + This query can be used to implement various kinds of deployment. A + *source deployment* is obtained by distributing the closure of a + store derivation. A *binary deployment* is obtained by distributing + the closure of an output path. A *cache deployment* (combined + source/binary deployment, including binaries of build-time-only + dependencies) is obtained by distributing the closure of a store + derivation and specifying the option `--include-outputs`. + + - `--references`\ + Prints the set of [references] of the store paths + *paths*, that is, their immediate dependencies. (For *all* + dependencies, use `--requisites`.) + + [references]: ../../glossary.md#gloss-reference + + - `--referrers`\ + Prints the set of *referrers* of the store paths *paths*, that is, + the store paths currently existing in the Nix store that refer to + one of *paths*. Note that contrary to the references, the set of + referrers is not constant; it can change as store paths are added or + removed. + + - `--referrers-closure`\ + Prints the closure of the set of store paths *paths* under the + referrers relation; that is, all store paths that directly or + indirectly refer to one of *paths*. These are all the path currently + in the Nix store that are dependent on *paths*. + + - `--deriver`; `-d`\ + Prints the [deriver] of the store paths *paths*. If + the path has no deriver (e.g., if it is a source file), or if the + deriver is not known (e.g., in the case of a binary-only + deployment), the string `unknown-deriver` is printed. + + [deriver]: ../../glossary.md#gloss-deriver + + - `--graph`\ + Prints the references graph of the store paths *paths* in the format + of the `dot` tool of AT\&T's [Graphviz + package](http://www.graphviz.org/). This can be used to visualise + dependency graphs. To obtain a build-time dependency graph, apply + this to a store derivation. To obtain a runtime dependency graph, + apply it to an output path. + + - `--tree`\ + Prints the references graph of the store paths *paths* as a nested + ASCII tree. References are ordered by descending closure size; this + tends to flatten the tree, making it more readable. The query only + recurses into a store path when it is first encountered; this + prevents a blowup of the tree representation of the graph. + + - `--graphml`\ + Prints the references graph of the store paths *paths* in the + [GraphML](http://graphml.graphdrawing.org/) file format. This can be + used to visualise dependency graphs. To obtain a build-time + dependency graph, apply this to a [store derivation]. To obtain a + runtime dependency graph, apply it to an output path. + + - `--binding` *name*; `-b` *name*\ + Prints the value of the attribute *name* (i.e., environment + variable) of the [store derivation]s *paths*. It is an error for a + derivation to not have the specified attribute. + + - `--hash`\ + Prints the SHA-256 hash of the contents of the store paths *paths* + (that is, the hash of the output of `nix-store --dump` on the given + paths). Since the hash is stored in the Nix database, this is a fast + operation. + + - `--size`\ + Prints the size in bytes of the contents of the store paths *paths* + — to be precise, the size of the output of `nix-store --dump` on + the given paths. Note that the actual disk space required by the + store paths may be higher, especially on filesystems with large + cluster sizes. + + - `--roots`\ + Prints the garbage collector roots that point, directly or + indirectly, at the store paths *paths*. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Examples + +Print the closure (runtime dependencies) of the `svn` program in the +current user environment: + +```console +$ nix-store -qR $(which svn) +/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4 +/nix/store/9lz9yc6zgmc0vlqmn2ipcpkjlmbi51vv-glibc-2.3.4 +... +``` + +Print the build-time dependencies of `svn`: + +```console +$ nix-store -qR $(nix-store -qd $(which svn)) +/nix/store/02iizgn86m42q905rddvg4ja975bk2i4-grep-2.5.1.tar.bz2.drv +/nix/store/07a2bzxmzwz5hp58nf03pahrv2ygwgs3-gcc-wrapper.sh +/nix/store/0ma7c9wsbaxahwwl04gbw3fcd806ski4-glibc-2.3.4.drv +... lots of other paths ... +``` + +The difference with the previous example is that we ask the closure of +the derivation (`-qd`), not the closure of the output path that contains +`svn`. + +Show the build-time dependencies as a tree: + +```console +$ nix-store -q --tree $(nix-store -qd $(which svn)) +/nix/store/7i5082kfb6yjbqdbiwdhhza0am2xvh6c-subversion-1.1.4.drv ++---/nix/store/d8afh10z72n8l1cr5w42366abiblgn54-builder.sh ++---/nix/store/fmzxmpjx2lh849ph0l36snfj9zdibw67-bash-3.0.drv +| +---/nix/store/570hmhmx3v57605cqg9yfvvyh0nnb8k8-bash +| +---/nix/store/p3srsbd8dx44v2pg6nbnszab5mcwx03v-builder.sh +... +``` + +Show all paths that depend on the same OpenSSL library as `svn`: + +```console +$ nix-store -q --referrers $(nix-store -q --binding openssl $(nix-store -qd $(which svn))) +/nix/store/23ny9l9wixx21632y2wi4p585qhva1q8-sylpheed-1.0.0 +/nix/store/5mbglq5ldqld8sj57273aljwkfvj22mc-subversion-1.1.4 +/nix/store/dpmvp969yhdqs7lm2r1a3gng7pyq6vy4-subversion-1.1.3 +/nix/store/l51240xqsgg8a7yrbqdx1rfzyv6l26fx-lynx-2.8.5 +``` + +Show all paths that directly or indirectly depend on the Glibc (C +library) used by `svn`: + +```console +$ nix-store -q --referrers-closure $(ldd $(which svn) | grep /libc.so | awk '{print $3}') +/nix/store/034a6h4vpz9kds5r6kzb9lhh81mscw43-libgnomeprintui-2.8.2 +/nix/store/15l3yi0d45prm7a82pcrknxdh6nzmxza-gawk-3.1.4 +... +``` + +Note that `ldd` is a command that prints out the dynamic libraries used +by an ELF executable. + +Make a picture of the runtime dependency graph of the current user +environment: + +```console +$ nix-store -q --graph ~/.nix-profile | dot -Tps > graph.ps +$ gv graph.ps +``` + +Show every garbage collector root that points to a store path that +depends on `svn`: + +```console +$ nix-store -q --roots $(which svn) +/nix/var/nix/profiles/default-81-link +/nix/var/nix/profiles/default-82-link +/home/eelco/.local/state/nix/profiles/profile-97-link +``` + diff --git a/doc/manual/src/command-ref/nix-store/read-log.md b/doc/manual/src/command-ref/nix-store/read-log.md new file mode 100644 index 000000000..4a88e9382 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/read-log.md @@ -0,0 +1,38 @@ +# Name + +`nix-store --read-log` - print build log + +# Synopsis + +`nix-store` {`--read-log` | `-l`} *paths…* + +# Description + +The operation `--read-log` prints the build log of the specified store +paths on standard output. The build log is whatever the builder of a +derivation wrote to standard output and standard error. If a store path +is not a derivation, the deriver of the store path is used. + +Build logs are kept in `/nix/var/log/nix/drvs`. However, there is no +guarantee that a build log is available for any particular store path. +For instance, if the path was downloaded as a pre-built binary through a +substitute, then the log is unavailable. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Example + +```console +$ nix-store -l $(which ktorrent) +building /nix/store/dhc73pvzpnzxhdgpimsd9sw39di66ph1-ktorrent-2.2.1 +unpacking sources +unpacking source archive /nix/store/p8n1jpqs27mgkjw07pb5269717nzf5f8-ktorrent-2.2.1.tar.gz +ktorrent-2.2.1/ +ktorrent-2.2.1/NEWS +... +``` + diff --git a/doc/manual/src/command-ref/nix-store/realise.md b/doc/manual/src/command-ref/nix-store/realise.md new file mode 100644 index 000000000..f61a20100 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/realise.md @@ -0,0 +1,118 @@ +# Name + +`nix-store --realise` - realise specified store paths + +# Synopsis + +`nix-store` {`--realise` | `-r`} *paths…* [`--dry-run`] + +# Description + +The operation `--realise` essentially “builds” the specified store +paths. Realisation is a somewhat overloaded term: + + - If the store path is a *derivation*, realisation ensures that the + output paths of the derivation are [valid] (i.e., + the output path and its closure exist in the file system). This + can be done in several ways. First, it is possible that the + outputs are already valid, in which case we are done + immediately. Otherwise, there may be [substitutes] + that produce the outputs (e.g., by downloading them). Finally, the + outputs can be produced by running the build task described + by the derivation. + + - If the store path is not a derivation, realisation ensures that the + specified path is valid (i.e., it and its closure exist in the file + system). If the path is already valid, we are done immediately. + Otherwise, the path and any missing paths in its closure may be + produced through substitutes. If there are no (successful) + substitutes, realisation fails. + +[valid]: @docroot@/glossary.md#gloss-validity +[substitutes]: @docroot@/glossary.md#gloss-substitute + +The output path of each derivation is printed on standard output. (For +non-derivations argument, the argument itself is printed.) + +The following flags are available: + + - `--dry-run`\ + Print on standard error a description of what packages would be + built or downloaded, without actually performing the operation. + + - `--ignore-unknown`\ + If a non-derivation path does not have a substitute, then silently + ignore it. + + - `--check`\ + This option allows you to check whether a derivation is + deterministic. It rebuilds the specified derivation and checks + whether the result is bitwise-identical with the existing outputs, + printing an error if that’s not the case. The outputs of the + specified derivation must already exist. When used with `-K`, if an + output path is not identical to the corresponding output from the + previous build, the new output path is left in + `/nix/store/name.check.` + +Special exit codes: + + - `100`\ + Generic build failure, the builder process returned with a non-zero + exit code. + + - `101`\ + Build timeout, the build was aborted because it did not complete + within the specified `timeout`. + + - `102`\ + Hash mismatch, the build output was rejected because it does not + match the [`outputHash` attribute of the + derivation](@docroot@/language/advanced-attributes.md). + + - `104`\ + Not deterministic, the build succeeded in check mode but the + resulting output is not binary reproducible. + +With the `--keep-going` flag it's possible for multiple failures to +occur, in this case the 1xx status codes are or combined using binary +or. + + 1100100 + ^^^^ + |||`- timeout + ||`-- output hash mismatch + |`--- build failure + `---- not deterministic + + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Examples + +This operation is typically used to build [store derivation]s produced by +[`nix-instantiate`](@docroot@/command-ref/nix-instantiate.md): + +[store derivation]: @docroot@/glossary.md#gloss-store-derivation + +```console +$ nix-store -r $(nix-instantiate ./test.nix) +/nix/store/31axcgrlbfsxzmfff1gyj1bf62hvkby2-aterm-2.3.1 +``` + +This is essentially what [`nix-build`](@docroot@/command-ref/nix-build.md) does. + +To test whether a previously-built derivation is deterministic: + +```console +$ nix-build '' -A hello --check -K +``` + +Use [`nix-store --read-log`](./read-log.md) to show the stderr and stdout of a build: + +```console +$ nix-store --read-log $(nix-instantiate ./test.nix) +``` diff --git a/doc/manual/src/command-ref/nix-store/repair-path.md b/doc/manual/src/command-ref/nix-store/repair-path.md new file mode 100644 index 000000000..9c3d9f7cd --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/repair-path.md @@ -0,0 +1,35 @@ +# Name + +`nix --repair-path` - re-download path from substituter + +# Synopsis + +`nix-store` `--repair-path` *paths…* + +# Description + +The operation `--repair-path` attempts to “repair” the specified paths +by redownloading them using the available substituters. If no +substitutes are available, then repair is not possible. + +> **Warning** +> +> During repair, there is a very small time window during which the old +> path (if it exists) is moved out of the way and replaced with the new +> path. If repair is interrupted in between, then the system may be left +> in a broken state (e.g., if the path contains a critical system +> component like the GNU C Library). + +# Example + +```console +$ nix-store --verify-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 +path `/nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13' was modified! + expected hash `2db57715ae90b7e31ff1f2ecb8c12ec1cc43da920efcbe3b22763f36a1861588', + got `481c5aa5483ebc97c20457bb8bca24deea56550d3985cda0027f67fe54b808e4' + +$ nix-store --repair-path /nix/store/dj7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13 +fetching path `/nix/store/d7a81wsm1ijwwpkks3725661h3263p5-glibc-2.13'... +… +``` + diff --git a/doc/manual/src/command-ref/nix-store/restore.md b/doc/manual/src/command-ref/nix-store/restore.md new file mode 100644 index 000000000..fcba43df4 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/restore.md @@ -0,0 +1,18 @@ +# Name + +`nix-store --restore` - extract a Nix archive + +## Synopsis + +`nix-store` `--restore` *path* + +## Description + +The operation `--restore` unpacks a NAR archive to *path*, which must +not already exist. The archive is read from standard input. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/nix-store/serve.md b/doc/manual/src/command-ref/nix-store/serve.md new file mode 100644 index 000000000..0f90f65ae --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/serve.md @@ -0,0 +1,38 @@ +# Name + +`nix-store --serve` - serve local Nix store over SSH + +# Synopsis + +`nix-store` `--serve` [`--write`] + +# Description + +The operation `--serve` provides access to the Nix store over stdin and +stdout, and is intended to be used as a means of providing Nix store +access to a restricted ssh user. + +The following flags are available: + + - `--write`\ + Allow the connected client to request the realization of + derivations. In effect, this can be used to make the host act as a + remote builder. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +# Examples + +To turn a host into a build server, the `authorized_keys` file can be +used to provide build access to a given SSH public key: + +```console +$ cat <>/root/.ssh/authorized_keys +command="nice -n20 nix-store --serve --write" ssh-rsa AAAAB3NzaC1yc2EAAAA... +EOF +``` + diff --git a/doc/manual/src/command-ref/nix-store/verify-path.md b/doc/manual/src/command-ref/nix-store/verify-path.md new file mode 100644 index 000000000..59ffe92a3 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/verify-path.md @@ -0,0 +1,29 @@ +# Name + +`nix-store --verify-path` - check path contents against Nix database + +## Synopsis + +`nix-store` `--verify-path` *paths…* + +## Description + +The operation `--verify-path` compares the contents of the given store +paths to their cryptographic hashes stored in Nix’s database. For every +changed path, it prints a warning message. The exit status is 0 if no +path has changed, and 1 otherwise. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} + +## Example + +To verify the integrity of the `svn` command and all its dependencies: + +```console +$ nix-store --verify-path $(nix-store -qR $(which svn)) +``` + diff --git a/doc/manual/src/command-ref/nix-store/verify.md b/doc/manual/src/command-ref/nix-store/verify.md new file mode 100644 index 000000000..2695b3361 --- /dev/null +++ b/doc/manual/src/command-ref/nix-store/verify.md @@ -0,0 +1,36 @@ +# Name + +`nix-store --verify` - check Nix database for consistency + +# Synopsis + +`nix-store` `--verify` [`--check-contents`] [`--repair`] + +# Description + +The operation `--verify` verifies the internal consistency of the Nix +database, and the consistency between the Nix database and the Nix +store. Any inconsistencies encountered are automatically repaired. +Inconsistencies are generally the result of the Nix store or database +being modified by non-Nix tools, or of bugs in Nix itself. + +This operation has the following options: + + - `--check-contents`\ + Checks that the contents of every valid store path has not been + altered by computing a SHA-256 hash of the contents and comparing it + with the hash stored in the Nix database at build time. Paths that + have been modified are printed out. For large stores, + `--check-contents` is obviously quite slow. + + - `--repair`\ + If any valid path is missing from the store, or (if + `--check-contents` is given) the contents of a valid path has been + modified, then try to repair the path by redownloading it. See + `nix-store --repair-path` for details. + +{{#include ./opt-common.md}} + +{{#include ../opt-common.md}} + +{{#include ../env-common.md}} diff --git a/doc/manual/src/command-ref/opt-common.md b/doc/manual/src/command-ref/opt-common.md index e612c416f..7a012250d 100644 --- a/doc/manual/src/command-ref/opt-common.md +++ b/doc/manual/src/command-ref/opt-common.md @@ -2,13 +2,13 @@ Most Nix commands accept the following command-line options: - - [`--help`]{#opt-help}\ + - [`--help`](#opt-help)\ Prints out a summary of the command syntax and exits. - - [`--version`]{#opt-version}\ + - [`--version`](#opt-version)\ Prints out the Nix version number on standard output and exits. - - [`--verbose`]{#opt-verbose} / `-v`\ + - [`--verbose`](#opt-verbose) / `-v`\ Increases the level of verbosity of diagnostic messages printed on standard error. For each Nix operation, the information printed on standard output is well-defined; any diagnostic information is @@ -37,14 +37,14 @@ Most Nix commands accept the following command-line options: - 5\ “Vomit”: print vast amounts of debug information. - - [`--quiet`]{#opt-quiet}\ + - [`--quiet`](#opt-quiet)\ Decreases the level of verbosity of diagnostic messages printed on standard error. This is the inverse option to `-v` / `--verbose`. This option may be specified repeatedly. See the previous verbosity levels list. - - [`--log-format`]{#opt-log-format} *format*\ + - [`--log-format`](#opt-log-format) *format*\ This option can be used to change the output of the log format, with *format* being one of: @@ -66,14 +66,14 @@ Most Nix commands accept the following command-line options: - bar-with-logs\ Display the raw logs, with the progress bar at the bottom. - - [`--no-build-output`]{#opt-no-build-output} / `-Q`\ + - [`--no-build-output`](#opt-no-build-output) / `-Q`\ By default, output written by builders to standard output and standard error is echoed to the Nix command's standard error. This option suppresses this behaviour. Note that the builder's standard output and error are always written to a log file in `prefix/nix/var/log/nix`. - - [`--max-jobs`]{#opt-max-jobs} / `-j` *number*\ + - [`--max-jobs`](#opt-max-jobs) / `-j` *number*\ Sets the maximum number of build jobs that Nix will perform in parallel to the specified number. Specify `auto` to use the number of CPUs in the system. The default is specified by the `max-jobs` @@ -83,7 +83,7 @@ Most Nix commands accept the following command-line options: Setting it to `0` disallows building on the local machine, which is useful when you want builds to happen only on remote builders. - - [`--cores`]{#opt-cores}\ + - [`--cores`](#opt-cores)\ Sets the value of the `NIX_BUILD_CORES` environment variable in the invocation of builders. Builders can use this variable at their discretion to control the maximum amount of parallelism. For @@ -94,18 +94,18 @@ Most Nix commands accept the following command-line options: means that the builder should use all available CPU cores in the system. - - [`--max-silent-time`]{#opt-max-silent-time}\ + - [`--max-silent-time`](#opt-max-silent-time)\ Sets the maximum number of seconds that a builder can go without producing any data on standard output or standard error. The default is specified by the `max-silent-time` configuration setting. `0` means no time-out. - - [`--timeout`]{#opt-timeout}\ + - [`--timeout`](#opt-timeout)\ Sets the maximum number of seconds that a builder can run. The default is specified by the `timeout` configuration setting. `0` means no timeout. - - [`--keep-going`]{#opt-keep-going} / `-k`\ + - [`--keep-going`](#opt-keep-going) / `-k`\ Keep going in case of failed builds, to the greatest extent possible. That is, if building an input of some derivation fails, Nix will still build the other inputs, but not the derivation @@ -113,13 +113,13 @@ Most Nix commands accept the following command-line options: for builds of substitutes), possibly killing builds in progress (in case of parallel or distributed builds). - - [`--keep-failed`]{#opt-keep-failed} / `-K`\ + - [`--keep-failed`](#opt-keep-failed) / `-K`\ Specifies that in case of a build failure, the temporary directory (usually in `/tmp`) in which the build takes place should not be deleted. The path of the build directory is printed as an informational message. - - [`--fallback`]{#opt-fallback}\ + - [`--fallback`](#opt-fallback)\ Whenever Nix attempts to build a derivation for which substitutes are known for each output path, but realising the output paths through the substitutes fails, fall back on building the derivation. @@ -134,18 +134,18 @@ Most Nix commands accept the following command-line options: failure in obtaining the substitutes to lead to a full build from source (with the related consumption of resources). - - [`--readonly-mode`]{#opt-readonly-mode}\ + - [`--readonly-mode`](#opt-readonly-mode)\ When this option is used, no attempt is made to open the Nix database. Most Nix operations do need database access, so those operations will fail. - - [`--arg`]{#opt-arg} *name* *value*\ + - [`--arg`](#opt-arg) *name* *value*\ This option is accepted by `nix-env`, `nix-instantiate`, `nix-shell` and `nix-build`. When evaluating Nix expressions, the expression evaluator will automatically try to call functions that it encounters. It can automatically call functions for which every argument has a [default - value](../language/constructs.md#functions) (e.g., + value](@docroot@/language/constructs.md#functions) (e.g., `{ argName ? defaultValue }: ...`). With `--arg`, you can also call functions that have arguments without a default value (or override a default value). That is, if the evaluator encounters a @@ -164,26 +164,26 @@ Most Nix commands accept the following command-line options: So if you call this Nix expression (e.g., when you do `nix-env -iA pkgname`), the function will be called automatically using the - value [`builtins.currentSystem`](../language/builtins.md) for + value [`builtins.currentSystem`](@docroot@/language/builtins.md) for the `system` argument. You can override this using `--arg`, e.g., `nix-env -iA pkgname --arg system \"i686-freebsd\"`. (Note that since the argument is a Nix string literal, you have to escape the quotes.) - - [`--argstr`]{#opt-argstr} *name* *value*\ + - [`--argstr`](#opt-argstr) *name* *value*\ This option is like `--arg`, only the value is not a Nix expression but a string. So instead of `--arg system \"i686-linux\"` (the outer quotes are to keep the shell happy) you can say `--argstr system i686-linux`. - - [`--attr`]{#opt-attr} / `-A` *attrPath*\ + - [`--attr`](#opt-attr) / `-A` *attrPath*\ Select an attribute from the top-level Nix expression being evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and `nix-shell` only.) The *attribute path* *attrPath* is a sequence of attribute names separated by dots. For instance, given a top-level Nix expression *e*, the attribute path `xorg.xorgserver` would cause the expression `e.xorg.xorgserver` to be used. See - [`nix-env --install`](nix-env.md#operation---install) for some + [`nix-env --install`](@docroot@/command-ref/nix-env/install.md) for some concrete examples. In addition to attribute names, you can also specify array indices. @@ -191,7 +191,7 @@ Most Nix commands accept the following command-line options: attribute of the fourth element of the array in the `foo` attribute of the top-level expression. - - [`--expr`]{#opt-expr} / `-E`\ + - [`--expr`](#opt-expr) / `-E`\ Interpret the command line arguments as a list of Nix expressions to be parsed and evaluated, rather than as a list of file names of Nix expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.) @@ -202,17 +202,16 @@ Most Nix commands accept the following command-line options: use, give your expression to the `nix-shell -p` convenience flag instead. - - [`-I`]{#opt-I} *path*\ - Add a path to the Nix expression search path. This option may be - given multiple times. See the `NIX_PATH` environment variable for - information on the semantics of the Nix search path. Paths added - through `-I` take precedence over `NIX_PATH`. + - [`-I`](#opt-I) *path*\ + Add an entry to the [Nix expression search path](@docroot@/command-ref/conf-file.md#conf-nix-path). + This option may be given multiple times. + Paths added through `-I` take precedence over [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH). - - [`--option`]{#opt-option} *name* *value*\ + - [`--option`](#opt-option) *name* *value*\ Set the Nix configuration option *name* to *value*. This overrides settings in the Nix configuration file (see nix.conf5). - - [`--repair`]{#opt-repair}\ + - [`--repair`](#opt-repair)\ Fix corrupted or missing store paths by redownloading or rebuilding them. Note that this is slow because it requires computing a cryptographic hash of the contents of every path in the closure of diff --git a/doc/manual/src/contributing/cli-guideline.md b/doc/manual/src/contributing/cli-guideline.md index 01a1b1e73..e53d2d178 100644 --- a/doc/manual/src/contributing/cli-guideline.md +++ b/doc/manual/src/contributing/cli-guideline.md @@ -389,6 +389,88 @@ colors, no emojis and using ASCII instead of Unicode symbols). The same should happen when TTY is not detected on STDERR. We should not display progress / status section, but only print warnings and errors. +## Returning future proof JSON + +The schema of JSON output should allow for backwards compatible extension. This section explains how to achieve this. + +Two definitions are helpful here, because while JSON only defines one "key-value" +object type, we use it to cover two use cases: + + - **dictionary**: a map from names to value that all have the same type. In + C++ this would be a `std::map` with string keys. + - **record**: a fixed set of attributes each with their own type. In C++, this + would be represented by a `struct`. + +It is best not to mix these use cases, as that may lead to incompatibilities when the schema changes. For example, adding a record field to a dictionary breaks consumers that assume all JSON object fields to have the same meaning and type. + +This leads to the following guidelines: + + - The top-level (root) value must be a record. + + Otherwise, one can not change the structure of a command's output. + + - The value of a dictionary item must be a record. + + Otherwise, the item type can not be extended. + + - List items should be records. + + Otherwise, one can not change the structure of the list items. + + If the order of the items does not matter, and each item has a unique key that is a string, consider representing the list as a dictionary instead. If the order of the items needs to be preserved, return a list of records. + + - Streaming JSON should return records. + + An example of a streaming JSON format is [JSON lines](https://jsonlines.org/), where each line represents a JSON value. These JSON values can be considered top-level values or list items, and they must be records. + +### Examples + + +This is bad, because all keys must be assumed to be store implementations: + +```json +{ + "local": { ... }, + "remote": { ... }, + "http": { ... } +} +``` + +This is good, because the it is extensible at the root, and is somewhat self-documenting: + +```json +{ + "storeTypes": { "local": { ... }, ... }, + "pluginSupport": true +} +``` + +While the dictionary of store types seems like a very complete response at first, a use case may arise that warrants returning additional information. +For example, the presence of plugin support may be crucial information for a client to proceed when their desired store type is missing. + + + +The following representation is bad because it is not extensible: + +```json +{ "outputs": [ "out" "bin" ] } +``` + +However, simply converting everything to records is not enough, because the order of outputs must be preserved: + +```json +{ "outputs": { "bin": {}, "out": {} } } +``` + +The first item is the default output. Deriving this information from the outputs ordering is not great, but this is how Nix currently happens to work. +While it is possible for a JSON parser to preserve the order of fields, we can not rely on this capability to be present in all JSON libraries. + +This representation is extensible and preserves the ordering: + +```json +{ "outputs": [ { "outputName": "out" }, { "outputName": "bin" } ] } +``` + ## Dialog with the user CLIs don't always make it clear when an action has taken place. For every diff --git a/doc/manual/src/contributing/experimental-features.md b/doc/manual/src/contributing/experimental-features.md new file mode 100644 index 000000000..ad5cffa91 --- /dev/null +++ b/doc/manual/src/contributing/experimental-features.md @@ -0,0 +1,95 @@ +This section describes the notion of *experimental features*, and how it fits into the big picture of the development of Nix. + +# What are experimental features? + +Experimental features are considered unstable, which means that they can be changed or removed at any time. +Users must explicitly enable them by toggling the associated [experimental feature flags](@docroot@/command-ref/conf-file.md#conf-experimental-features). +This allows accessing unstable functionality without unwittingly relying on it. + +Experimental feature flags were first introduced in [Nix 2.4](@docroot@/release-notes/rl-2.4.md). +Before that, Nix did have experimental features, but they were not guarded by flags and were merely documented as unstable. +This was a source of confusion and controversy. + +# When should a new feature be marked experimental? + +A change in the Nix codebase should be guarded by an experimental feature flag if it is considered likely to be reverted or adapted in a backwards-incompatible manner after gathering more experience with it in practice. + +Examples: + +- Changes to the Nix language, such as new built-ins, syntactic or semantic changes, etc. +- Changes to the command-line interface + +# Lifecycle of an experimental feature + +Experimental features have to be treated on a case-by-case basis. +However, the standard workflow for an experimental feature is as follows: + +- A new feature is implemented in a *pull request* + - It is guarded by an experimental feature flag that is disabled by default +- The pull request is merged, the *experimental* feature ends up in a release + - Using the feature requires explicitly enabling it, signifying awareness of the potential risks + - Being experimental, the feature can still be changed arbitrarily +- The feature can be *removed* + - The associated experimental feature flag is also removed +- The feature can be declared *stable* + - The associated experimental feature flag is removed + - There should be enough evidence of users having tried the feature, such as feedback, fixed bugs, demonstrations of how it is put to use + - Maintainers must feel confident that: + - The feature is designed and implemented sensibly, that it is fit for purpose + - Potential interactions are well-understood + - Stabilising the feature will not incur an outsized maintenance burden in the future + +The following diagram illustrates the process: + +``` + .------. + | idea | + '------' + | + discussion, design, implementation + | + | .-------. + | | | + v v | + .--------------. review + | pull request | | + '--------------' | + | ^ | | + | | '-------' + .---' '----. + | | + merge user feedback, + | (breaking) changes + | | + '---. .----' + | | + v | + +--------------+ + .---| experimental |----. + | +--------------+ | + | | +decision to stabilise decision against + | keeping the feature + | | + v v + +--------+ +---------+ + | stable | | removed | + +--------+ +---------+ +``` + +# Relation to the RFC process + +Experimental features and [RFCs](https://github.com/NixOS/rfcs/) both allow approaching substantial changes while minimizing the risk. +However they serve different purposes: + +- An experimental feature enables developers to iterate on and deliver a new idea without committing to it or requiring a costly long-running fork. + It is primarily an issue of *implementation*, targeting Nix developers and early testers. +- The goal of an RFC is to make explicit all the implications of a change: + Explain why it is wanted, which new use-cases it enables, which interface changes it requires, etc. + It is primarily an issue of *design* and *communication*, targeting the broader community. + +This means that experimental features and RFCs are orthogonal mechanisms, and can be used independently or together as needed. + +# Currently available experimental features + +{{#include ./experimental-feature-descriptions.md}} diff --git a/doc/manual/src/contributing/hacking.md b/doc/manual/src/contributing/hacking.md index 3869c37a4..ca69f076a 100644 --- a/doc/manual/src/contributing/hacking.md +++ b/doc/manual/src/contributing/hacking.md @@ -389,3 +389,35 @@ If a broken link occurs in a snippet that was inserted into multiple generated f If the `@docroot@` literal appears in an error message from the `mdbook-linkcheck` tool, the `@docroot@` replacement needs to be applied to the generated source file that mentions it. See existing `@docroot@` logic in the [Makefile]. Regular markdown files used for the manual have a base path of their own and they can use relative paths instead of `@docroot@`. + +## API documentation + +Doxygen API documentation is [available +online](https://hydra.nixos.org/job/nix/master/internal-api-docs/latest/download-by-type/doc/internal-api-docs). You +can also build and view it yourself: + +```console +# nix build .#hydraJobs.internal-api-docs +# xdg-open ./result/share/doc/nix/internal-api/html/index.html +``` + +or inside a `nix develop` shell by running: + +``` +# make internal-api-html +# xdg-open ./outputs/doc/share/doc/nix/internal-api/html/index.html +``` + +## Coverage analysis + +A coverage analysis report is [available +online](https://hydra.nixos.org/job/nix/master/coverage/latest/download-by-type/report/coverage). You +can build it yourself: + +``` +# nix build .#hydraJobs.coverage +# xdg-open ./result/coverage/index.html +``` + +Metrics about the change in line/function coverage over time are also +[available](https://hydra.nixos.org/job/nix/master/coverage#tabs-charts). diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md index d0aff34e2..eeb19ad50 100644 --- a/doc/manual/src/glossary.md +++ b/doc/manual/src/glossary.md @@ -15,7 +15,7 @@ Example: `/nix/store/g946hcz4c8mdvq2g8vxx42z51qb71rvp-git-2.38.1.drv` - See [`nix show-derivation`](./command-ref/new-cli/nix3-show-derivation.md) (experimental) for displaying the contents of store derivations. + See [`nix derivation show`](./command-ref/new-cli/nix3-derivation-show.md) (experimental) for displaying the contents of store derivations. [store derivation]: #gloss-store-derivation @@ -31,7 +31,7 @@ This means either running the `builder` executable as specified in the corresponding [derivation] or fetching a pre-built [store object] from a [substituter]. - See [`nix-build`](./command-ref/nix-build.md) and [`nix-store --realise`](./command-ref/nix-store.md#operation---realise). + See [`nix-build`](./command-ref/nix-build.md) and [`nix-store --realise`](@docroot@/command-ref/nix-store/realise.md). See [`nix build`](./command-ref/new-cli/nix3-build.md) (experimental). @@ -54,7 +54,7 @@ invoked, the Nix store can be referred to as a "_local_" or a "_remote_" one: - + A *local store* exists on the filesystem of + + A [local store]{#gloss-local-store} exists on the filesystem of the machine where Nix is invoked. You can use other local stores by passing the `--store` flag to the `nix` command. Local stores can be used for building derivations. @@ -65,17 +65,17 @@ served by the `nix-serve` Perl script. [store]: #gloss-store + [local store]: #gloss-local-store - [chroot store]{#gloss-chroot-store}\ - A local store whose canonical path is anything other than `/nix/store`. + A [local store] whose canonical path is anything other than `/nix/store`. - [binary cache]{#gloss-binary-cache}\ A *binary cache* is a Nix store which uses a different format: its metadata and signatures are kept in `.narinfo` files rather than in a - Nix database. This different format simplifies serving store objects - over the network, but cannot host builds. Examples of binary caches - include S3 buckets and the [NixOS binary - cache](https://cache.nixos.org). + [Nix database]. This different format simplifies serving store objects + over the network, but cannot host builds. Examples of binary caches + include S3 buckets and the [NixOS binary cache](https://cache.nixos.org). - [store path]{#gloss-store-path}\ The location of a [store object] in the file system, i.e., an @@ -108,7 +108,7 @@ [fixed-output derivations](#gloss-fixed-output-derivation). - [substitute]{#gloss-substitute}\ - A substitute is a command invocation stored in the Nix database that + A substitute is a command invocation stored in the [Nix database] that describes how to build a store object, bypassing the normal build mechanism (i.e., derivations). Typically, the substitute builds the store object by downloading a pre-built version of the store object @@ -127,6 +127,14 @@ builder can rely on external inputs such as the network or the system time) but the Nix model assumes it. + - [Nix database]{#gloss-nix-database}\ + An SQlite database to track [reference]s between [store object]s. + This is an implementation detail of the [local store]. + + Default location: `/nix/var/nix/db`. + + [Nix database]: #gloss-nix-database + - [Nix expression]{#gloss-nix-expression}\ A high-level description of software packages and compositions thereof. Deploying software using Nix entails writing Nix @@ -135,14 +143,13 @@ then be built. - [reference]{#gloss-reference}\ - A store path `P` is said to have a reference to a store path `Q` if - the store object at `P` contains the path `Q` somewhere. The - *references* of a store path are the set of store paths to which it - has a reference. + A [store object] `O` is said to have a *reference* to a store object `P` if a [store path] to `P` appears in the contents of `O`. - A derivation can reference other derivations and sources (but not - output paths), whereas an output path only references other output - paths. + Store objects can refer to both other store objects and themselves. + References from a store object to itself are called *self-references*. + References other than a self-reference must not form a cycle. + + [reference]: #gloss-reference - [reachable]{#gloss-reachable}\ A store path `Q` is reachable from another store path `P` if `Q` @@ -159,8 +166,8 @@ files could be missing. The command `nix-store -qR` prints out closures of store paths. - As an example, if the store object at path `P` contains a reference - to path `Q`, then `Q` is in the closure of `P`. Further, if `Q` + As an example, if the [store object] at path `P` contains a [reference] + to a store object at path `Q`, then `Q` is in the closure of `P`. Further, if `Q` references `R` then `R` is also in the closure of `P`. [closure]: #gloss-closure @@ -176,9 +183,9 @@ - [validity]{#gloss-validity}\ A store path is valid if all [store object]s in its [closure] can be read from the [store]. - For a local store, this means: + For a [local store], this means: - The store path leads to an existing [store object] in that [store]. - - The store path is listed in the Nix database as being valid. + - The store path is listed in the [Nix database] as being valid. - All paths in the store path's [closure] are valid. [validity]: #gloss-validity @@ -193,6 +200,11 @@ A symlink to the current *user environment* of a user, e.g., `/nix/var/nix/profiles/default`. + - [installable]{#gloss-installable}\ + Something that can be realised in the Nix store. + + See [installables](./command-ref/new-cli/nix.md#installables) for [`nix` commands](./command-ref/new-cli/nix.md) (experimental) for details. + - [NAR]{#gloss-nar}\ A *N*ix *AR*chive. This is a serialisation of a path in the Nix store. It can contain regular files, directories and symbolic @@ -213,3 +225,9 @@ [string]: ./language/values.md#type-string [path]: ./language/values.md#type-path [attribute name]: ./language/values.md#attribute-set + + - [experimental feature]{#gloss-experimental-feature}\ + Not yet stabilized functionality guarded by named experimental feature flags. + These flags are enabled or disabled with the [`experimental-features`](./command-ref/conf-file.html#conf-experimental-features) setting. + + See the contribution guide on the [purpose and lifecycle of experimental feaures](@docroot@/contributing/experimental-features.md). diff --git a/doc/manual/src/installation/env-variables.md b/doc/manual/src/installation/env-variables.md index fb8155a80..db98f52ff 100644 --- a/doc/manual/src/installation/env-variables.md +++ b/doc/manual/src/installation/env-variables.md @@ -42,14 +42,11 @@ export NIX_SSL_CERT_FILE=/etc/ssl/my-certificate-bundle.crt > You must not add the export and then do the install, as the Nix > installer will detect the presence of Nix configuration, and abort. -## `NIX_SSL_CERT_FILE` with macOS and the Nix daemon +If you use the Nix daemon, you should also add the following to +`/etc/nix/nix.conf`: -On macOS you must specify the environment variable for the Nix daemon -service, then restart it: - -```console -$ sudo launchctl setenv NIX_SSL_CERT_FILE /etc/ssl/my-certificate-bundle.crt -$ sudo launchctl kickstart -k system/org.nixos.nix-daemon +``` +ssl-cert-file = /etc/ssl/my-certificate-bundle.crt ``` ## Proxy Environment Variables diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md index e3fd962bd..ffabb250a 100644 --- a/doc/manual/src/installation/installing-binary.md +++ b/doc/manual/src/installation/installing-binary.md @@ -47,12 +47,6 @@ The install script will modify the first writable file from amongst `NIX_INSTALLER_NO_MODIFY_PROFILE` environment variable before executing the install script to disable this behaviour. -You can uninstall Nix simply by running: - -```console -$ rm -rf /nix -``` - # Multi User Installation The multi-user Nix installation creates system users, and a system @@ -84,154 +78,8 @@ The installer will modify `/etc/bashrc`, and `/etc/zshrc` if they exist. The installer will first back up these files with a `.backup-before-nix` extension. The installer will also create `/etc/profile.d/nix.sh`. -## Uninstalling - -### Linux - -If you are on Linux with systemd: - -1. Remove the Nix daemon service: - - ```console - sudo systemctl stop nix-daemon.service - sudo systemctl disable nix-daemon.socket nix-daemon.service - sudo systemctl daemon-reload - ``` - -1. Remove systemd service files: - - ```console - sudo rm /etc/systemd/system/nix-daemon.service /etc/systemd/system/nix-daemon.socket - ``` - -1. The installer script uses systemd-tmpfiles to create the socket directory. - You may also want to remove the configuration for that: - - ```console - sudo rm /etc/tmpfiles.d/nix-daemon.conf - ``` - -Remove files created by Nix: - -```console -sudo rm -rf /nix /etc/nix /etc/profile/nix.sh ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels -``` - -Remove build users and their group: - -```console -for i in $(seq 1 32); do - sudo userdel nixbld$i -done -sudo groupdel nixbld -``` - -There may also be references to Nix in - -- `/etc/profile` -- `/etc/bashrc` -- `/etc/zshrc` - -which you may remove. - -### macOS - -1. Edit `/etc/zshrc` and `/etc/bashrc` to remove the lines sourcing - `nix-daemon.sh`, which should look like this: - - ```bash - # Nix - if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then - . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' - fi - # End Nix - ``` - - If these files haven't been altered since installing Nix you can simply put - the backups back in place: - - ```console - sudo mv /etc/zshrc.backup-before-nix /etc/zshrc - sudo mv /etc/bashrc.backup-before-nix /etc/bashrc - ``` - - This will stop shells from sourcing the file and bringing everything you - installed using Nix in scope. - -2. Stop and remove the Nix daemon services: - - ```console - sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist - sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist - sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist - ``` - - This stops the Nix daemon and prevents it from being started next time you - boot the system. - -3. Remove the `nixbld` group and the `_nixbuildN` users: - - ```console - sudo dscl . -delete /Groups/nixbld - for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done - ``` - - This will remove all the build users that no longer serve a purpose. - -4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store - volume on `/nix`, which looks like - `UUID= /nix apfs rw,noauto,nobrowse,suid,owners` or - `LABEL=Nix\040Store /nix apfs rw,nobrowse`. This will prevent automatic - mounting of the Nix Store volume. - -5. Edit `/etc/synthetic.conf` to remove the `nix` line. If this is the only - line in the file you can remove it entirely, `sudo rm /etc/synthetic.conf`. - This will prevent the creation of the empty `/nix` directory to provide a - mountpoint for the Nix Store volume. - -6. Remove the files Nix added to your system: - - ```console - sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels - ``` - - This gets rid of any data Nix may have created except for the store which is - removed next. - -7. Remove the Nix Store volume: - - ```console - sudo diskutil apfs deleteVolume /nix - ``` - - This will remove the Nix Store volume and everything that was added to the - store. - - If the output indicates that the command couldn't remove the volume, you should - make sure you don't have an _unmounted_ Nix Store volume. Look for a - "Nix Store" volume in the output of the following command: - - ```console - diskutil list - ``` - - If you _do_ see a "Nix Store" volume, delete it by re-running the diskutil - deleteVolume command, but replace `/nix` with the store volume's `diskXsY` - identifier. - -> **Note** -> -> After you complete the steps here, you will still have an empty `/nix` -> directory. This is an expected sign of a successful uninstall. The empty -> `/nix` directory will disappear the next time you reboot. -> -> You do not have to reboot to finish uninstalling Nix. The uninstall is -> complete. macOS (Catalina+) directly controls root directories and its -> read-only root will prevent you from manually deleting the empty `/nix` -> mountpoint. - # macOS Installation + []{#sect-macos-installation-change-store-prefix}[]{#sect-macos-installation-encrypted-volume}[]{#sect-macos-installation-symlink}[]{#sect-macos-installation-recommended-notes} @@ -280,19 +128,16 @@ this to run the installer, but it may help if you run into trouble: # Installing a pinned Nix version from a URL -NixOS.org hosts version-specific installation URLs for all Nix versions -since 1.11.16, at `https://releases.nixos.org/nix/nix-version/install`. +Version-specific installation URLs for all Nix versions +since 1.11.16 can be found at [releases.nixos.org](https://releases.nixos.org/?prefix=nix/). +The corresponding SHA-256 hash can be found in the directory for the given version. -These install scripts can be used the same as the main NixOS.org -installation script: +These install scripts can be used the same as usual: ```console -$ curl -L https://nixos.org/nix/install | sh +$ curl -L https://releases.nixos.org/nix/nix-/install | sh ``` -In the same directory of the install script are sha256 sums, and gpg -signature files. - # Installing from a binary tarball You can also download a binary tarball that contains Nix and all its diff --git a/doc/manual/src/installation/prerequisites-source.md b/doc/manual/src/installation/prerequisites-source.md index 6f4eb3008..5a708f11b 100644 --- a/doc/manual/src/installation/prerequisites-source.md +++ b/doc/manual/src/installation/prerequisites-source.md @@ -71,3 +71,8 @@ . This is an optional dependency and can be disabled by providing a `--disable-cpuid` to the `configure` script. + + - Unless `./configure --disable-tests` is specified, GoogleTest (GTest) and + RapidCheck are required, which are available at + and + respectively. diff --git a/doc/manual/src/installation/uninstall.md b/doc/manual/src/installation/uninstall.md new file mode 100644 index 000000000..bd85b49ee --- /dev/null +++ b/doc/manual/src/installation/uninstall.md @@ -0,0 +1,159 @@ +# Uninstalling Nix + +## Single User + +If you have a [single-user installation](./installing-binary.md#single-user-installation) of Nix, uninstall it by running: + +```console +$ rm -rf /nix +``` + +## Multi User + +Removing a [multi-user installation](./installing-binary.md#multi-user-installation) of Nix is more involved, and depends on the operating system. + +### Linux + +If you are on Linux with systemd: + +1. Remove the Nix daemon service: + + ```console + sudo systemctl stop nix-daemon.service + sudo systemctl disable nix-daemon.socket nix-daemon.service + sudo systemctl daemon-reload + ``` + +1. Remove systemd service files: + + ```console + sudo rm /etc/systemd/system/nix-daemon.service /etc/systemd/system/nix-daemon.socket + ``` + +1. The installer script uses systemd-tmpfiles to create the socket directory. + You may also want to remove the configuration for that: + + ```console + sudo rm /etc/tmpfiles.d/nix-daemon.conf + ``` + +Remove files created by Nix: + +```console +sudo rm -rf /nix /etc/nix /etc/profile/nix.sh ~root/.nix-profile ~root/.nix-defexpr ~root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels +``` + +Remove build users and their group: + +```console +for i in $(seq 1 32); do + sudo userdel nixbld$i +done +sudo groupdel nixbld +``` + +There may also be references to Nix in + +- `/etc/profile` +- `/etc/bashrc` +- `/etc/zshrc` + +which you may remove. + +### macOS + +1. Edit `/etc/zshrc`, `/etc/bashrc`, and `/etc/bash.bashrc` to remove the lines sourcing `nix-daemon.sh`, which should look like this: + + ```bash + # Nix + if [ -e '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' ]; then + . '/nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh' + fi + # End Nix + ``` + + If these files haven't been altered since installing Nix you can simply put + the backups back in place: + + ```console + sudo mv /etc/zshrc.backup-before-nix /etc/zshrc + sudo mv /etc/bashrc.backup-before-nix /etc/bashrc + sudo mv /etc/bash.bashrc.backup-before-nix /etc/bash.bashrc + ``` + + This will stop shells from sourcing the file and bringing everything you + installed using Nix in scope. + +2. Stop and remove the Nix daemon services: + + ```console + sudo launchctl unload /Library/LaunchDaemons/org.nixos.nix-daemon.plist + sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist + sudo launchctl unload /Library/LaunchDaemons/org.nixos.darwin-store.plist + sudo rm /Library/LaunchDaemons/org.nixos.darwin-store.plist + ``` + + This stops the Nix daemon and prevents it from being started next time you + boot the system. + +3. Remove the `nixbld` group and the `_nixbuildN` users: + + ```console + sudo dscl . -delete /Groups/nixbld + for u in $(sudo dscl . -list /Users | grep _nixbld); do sudo dscl . -delete /Users/$u; done + ``` + + This will remove all the build users that no longer serve a purpose. + +4. Edit fstab using `sudo vifs` to remove the line mounting the Nix Store + volume on `/nix`, which looks like + `UUID= /nix apfs rw,noauto,nobrowse,suid,owners` or + `LABEL=Nix\040Store /nix apfs rw,nobrowse`. This will prevent automatic + mounting of the Nix Store volume. + +5. Edit `/etc/synthetic.conf` to remove the `nix` line. If this is the only + line in the file you can remove it entirely, `sudo rm /etc/synthetic.conf`. + This will prevent the creation of the empty `/nix` directory to provide a + mountpoint for the Nix Store volume. + +6. Remove the files Nix added to your system: + + ```console + sudo rm -rf /etc/nix /var/root/.nix-profile /var/root/.nix-defexpr /var/root/.nix-channels ~/.nix-profile ~/.nix-defexpr ~/.nix-channels + ``` + + This gets rid of any data Nix may have created except for the store which is + removed next. + +7. Remove the Nix Store volume: + + ```console + sudo diskutil apfs deleteVolume /nix + ``` + + This will remove the Nix Store volume and everything that was added to the + store. + + If the output indicates that the command couldn't remove the volume, you should + make sure you don't have an _unmounted_ Nix Store volume. Look for a + "Nix Store" volume in the output of the following command: + + ```console + diskutil list + ``` + + If you _do_ see a "Nix Store" volume, delete it by re-running the diskutil + deleteVolume command, but replace `/nix` with the store volume's `diskXsY` + identifier. + +> **Note** +> +> After you complete the steps here, you will still have an empty `/nix` +> directory. This is an expected sign of a successful uninstall. The empty +> `/nix` directory will disappear the next time you reboot. +> +> You do not have to reboot to finish uninstalling Nix. The uninstall is +> complete. macOS (Catalina+) directly controls root directories and its +> read-only root will prevent you from manually deleting the empty `/nix` +> mountpoint. + diff --git a/doc/manual/src/language/advanced-attributes.md b/doc/manual/src/language/advanced-attributes.md index 5a63236e5..307971434 100644 --- a/doc/manual/src/language/advanced-attributes.md +++ b/doc/manual/src/language/advanced-attributes.md @@ -198,8 +198,7 @@ Derivations can declare some infrequently used optional attributes. - `"recursive"`\ The hash is computed over the NAR archive dump of the output - (i.e., the result of [`nix-store - --dump`](../command-ref/nix-store.md#operation---dump)). In + (i.e., the result of [`nix-store --dump`](@docroot@/command-ref/nix-store/dump.md)). In this case, the output can be anything, including a directory tree. @@ -209,12 +208,26 @@ Derivations can declare some infrequently used optional attributes. about converting to and from base-32 notation.) - [`__contentAddressed`]{#adv-attr-__contentAddressed} - If this **experimental** attribute is set to true, then the derivation + > **Warning** + > This attribute is part of an [experimental feature](@docroot@/contributing/experimental-features.md). + > + > To use this attribute, you must enable the + > [`ca-derivations`](@docroot@/contributing/experimental-features.md#xp-feature-ca-derivations) experimental feature. + > For example, in [nix.conf](../command-ref/conf-file.md) you could add: + > + > ``` + > extra-experimental-features = ca-derivations + > ``` + + If this attribute is set to `true`, then the derivation outputs will be stored in a content-addressed location rather than the traditional input-addressed one. - This only has an effect if the `ca-derivations` experimental feature is enabled. - Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above). + Setting this attribute also requires setting + [`outputHashMode`](#adv-attr-outputHashMode) + and + [`outputHashAlgo`](#adv-attr-outputHashAlgo) + like for *fixed-output derivations* (see above). - [`passAsFile`]{#adv-attr-passAsFile}\ A list of names of attributes that should be passed via files rather @@ -308,9 +321,11 @@ Derivations can declare some infrequently used optional attributes. - [`unsafeDiscardReferences`]{#adv-attr-unsafeDiscardReferences}\ > **Warning** - > This is an experimental feature. + > This attribute is part of an [experimental feature](@docroot@/contributing/experimental-features.md). > - > To enable it, add the following to [nix.conf](../command-ref/conf-file.md): + > To use this attribute, you must enable the + > [`discard-references`](@docroot@/contributing/experimental-features.md#xp-feature-discard-references) experimental feature. + > For example, in [nix.conf](../command-ref/conf-file.md) you could add: > > ``` > extra-experimental-features = discard-references diff --git a/doc/manual/src/language/operators.md b/doc/manual/src/language/operators.md index 90b325597..a07d976ad 100644 --- a/doc/manual/src/language/operators.md +++ b/doc/manual/src/language/operators.md @@ -43,8 +43,8 @@ If the attribute doesn’t exist, return *value* if provided, otherwise abort ev An attribute path is a dot-separated list of attribute names. An attribute name can be an identifier or a string. -> *attrpath* = *name* [ `.` *name* ]... -> *name* = *identifier* | *string* +> *attrpath* = *name* [ `.` *name* ]... \ +> *name* = *identifier* | *string* \ > *identifier* ~ `[a-zA-Z_][a-zA-Z0-9_'-]*` [Attribute selection]: #attribute-selection diff --git a/doc/manual/src/language/values.md b/doc/manual/src/language/values.md index 3973518ca..c85124278 100644 --- a/doc/manual/src/language/values.md +++ b/doc/manual/src/language/values.md @@ -205,7 +205,7 @@ You can use arbitrary double-quoted strings as attribute names: ``` ```nix -let bar = "bar"; +let bar = "bar"; in { "foo ${bar}" = 123; }."foo ${bar}" ``` diff --git a/doc/manual/src/package-management/s3-substituter.md b/doc/manual/src/package-management/s3-substituter.md index 30f2b2e11..d8a1d9105 100644 --- a/doc/manual/src/package-management/s3-substituter.md +++ b/doc/manual/src/package-management/s3-substituter.md @@ -1,41 +1,11 @@ # Serving a Nix store via S3 -Nix has built-in support for storing and fetching store paths from +Nix has [built-in support](@docroot@/command-ref/new-cli/nix3-help-stores.md#s3-binary-cache-store) +for storing and fetching store paths from Amazon S3 and S3-compatible services. This uses the same *binary* cache mechanism that Nix usually uses to fetch prebuilt binaries from [cache.nixos.org](https://cache.nixos.org/). -The following options can be specified as URL parameters to the S3 URL: - - - `profile`\ - The name of the AWS configuration profile to use. By default Nix - will use the `default` profile. - - - `region`\ - The region of the S3 bucket. `us–east-1` by default. - - If your bucket is not in `us–east-1`, you should always explicitly - specify the region parameter. - - - `endpoint`\ - The URL to your S3-compatible service, for when not using Amazon S3. - Do not specify this value if you're using Amazon S3. - - > **Note** - > - > This endpoint must support HTTPS and will use path-based - > addressing instead of virtual host based addressing. - - - `scheme`\ - The scheme used for S3 requests, `https` (default) or `http`. This - option allows you to disable HTTPS for binary caches which don't - support it. - - > **Note** - > - > HTTPS should be used if the cache might contain sensitive - > information. - In this example we will use the bucket named `example-nix-cache`. ## Anonymous Reads to your S3-compatible binary cache diff --git a/doc/manual/src/quick-start.md b/doc/manual/src/quick-start.md index 651134c25..1d2688ede 100644 --- a/doc/manual/src/quick-start.md +++ b/doc/manual/src/quick-start.md @@ -19,7 +19,7 @@ to subsequent chapters. channel: ```console - $ nix-env -qaP + $ nix-env --query --available --attr-path nixpkgs.docbook_xml_dtd_43 docbook-xml-4.3 nixpkgs.docbook_xml_dtd_45 docbook-xml-4.5 nixpkgs.firefox firefox-33.0.2 @@ -31,7 +31,7 @@ to subsequent chapters. 1. Install some packages from the channel: ```console - $ nix-env -iA nixpkgs.hello + $ nix-env --install --attr nixpkgs.hello ``` This should download pre-built packages; it should not build them @@ -49,13 +49,13 @@ to subsequent chapters. 1. Uninstall a package: ```console - $ nix-env -e hello + $ nix-env --uninstall hello ``` 1. You can also test a package without installing it: ```console - $ nix-shell -p hello + $ nix-shell --packages hello ``` This builds or downloads GNU Hello and its dependencies, then drops @@ -76,7 +76,7 @@ to subsequent chapters. ```console $ nix-channel --update nixpkgs - $ nix-env -u '*' + $ nix-env --upgrade '*' ``` The latter command will upgrade each installed package for which @@ -95,5 +95,5 @@ to subsequent chapters. them: ```console - $ nix-collect-garbage -d + $ nix-collect-garbage --delete-old ``` diff --git a/doc/manual/src/release-notes/rl-2.14.md b/doc/manual/src/release-notes/rl-2.14.md new file mode 100644 index 000000000..705c118bb --- /dev/null +++ b/doc/manual/src/release-notes/rl-2.14.md @@ -0,0 +1,22 @@ +# Release 2.14 (2023-02-28) + +* A new function `builtins.readFileType` is available. It is similar to + `builtins.readDir` but acts on a single file or directory. + +* In flakes, the `.outPath` attribute of a flake now always refers to + the directory containing the `flake.nix`. This was not the case for + when `flake.nix` was in a subdirectory of e.g. a Git repository. + The root of the source of a flake in a subdirectory is still + available in `.sourceInfo.outPath`. + +* In derivations that use structured attributes, you can now use `unsafeDiscardReferences` + to disable scanning a given output for runtime dependencies: + ```nix + __structuredAttrs = true; + unsafeDiscardReferences.out = true; + ``` + This is useful e.g. when generating self-contained filesystem images with + their own embedded Nix store: hashes found inside such an image refer + to the embedded store and not to the host's Nix store. + + This requires the `discard-references` experimental feature. diff --git a/doc/manual/src/release-notes/rl-2.15.md b/doc/manual/src/release-notes/rl-2.15.md new file mode 100644 index 000000000..133121999 --- /dev/null +++ b/doc/manual/src/release-notes/rl-2.15.md @@ -0,0 +1,58 @@ +# Release 2.15 (2023-04-11) + +* Commands which take installables on the command line can now read them from the standard input if + passed the `--stdin` flag. This is primarily useful when you have a large amount of paths which + exceed the OS argument limit. + +* The `nix-hash` command now supports Base64 and SRI. Use the flags `--base64` + or `--sri` to specify the format of output hash as Base64 or SRI, and `--to-base64` + or `--to-sri` to convert a hash to Base64 or SRI format, respectively. + + As the choice of hash formats is no longer binary, the `--base16` flag is also added + to explicitly specify the Base16 format, which is still the default. + +* The special handling of an [installable](../command-ref/new-cli/nix.md#installables) with `.drv` suffix being interpreted as all of the given [store derivation](../glossary.md#gloss-store-derivation)'s output paths is removed, and instead taken as the literal store path that it represents. + + The new `^` syntax for store paths introduced in Nix 2.13 allows explicitly referencing output paths of a derivation. + Using this is better and more clear than relying on the now-removed `.drv` special handling. + + For example, + ```shell-session + $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv + ``` + + now gives info about the derivation itself, while + + ```shell-session + $ nix path-info /nix/store/gzaflydcr6sb3567hap9q6srzx8ggdgg-glibc-2.33-78.drv^* + ``` + provides information about each of its outputs. + +* The experimental command `nix describe-stores` has been removed. + +* Nix stores and their settings are now documented in [`nix help-stores`](@docroot@/command-ref/new-cli/nix3-help-stores.md). + +* Documentation for operations of `nix-store` and `nix-env` are now available on separate pages of the manual. + They include all common options that can be specified and common environment variables that affect these commands. + + These pages can be viewed offline with `man` using + + * `man nix-store-` and `man nix-env-` + * `nix-store --help --` and `nix-env --help --`. + +* Nix when used as a client now checks whether the store (the server) trusts the client. + (The store always had to check whether it trusts the client, but now the client is informed of the store's decision.) + This is useful for scripting interactions with (non-legacy-ssh) remote Nix stores. + + `nix store ping` and `nix doctor` now display this information. + +* The new command `nix derivation add` allows adding derivations to the store without involving the Nix language. + It exists to round out our collection of basic utility/plumbing commands, and allow for a low barrier-to-entry way of experimenting with alternative front-ends to the Nix Store. + It uses the same JSON layout as `nix derivation show`, and is its inverse. + +* `nix show-derivation` has been renamed to `nix derivation show`. + This matches `nix derivation add`, and avoids bloating the top-level namespace. + The old name is still kept as an alias for compatibility, however. + +* The `nix derivation {add,show}` JSON format now includes the derivation name as a top-level field. + This is useful in general, but especially necessary for the `add` direction, as otherwise we would need to pass in the name out of band for certain cases. diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 7e8344e63..78ae99f4b 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -1,22 +1,2 @@ # Release X.Y (202?-??-??) -* A new function `builtins.readFileType` is available. It is similar to - `builtins.readDir` but acts on a single file or directory. - -* The `builtins.readDir` function has been optimized when encountering not-yet-known - file types from POSIX's `readdir`. In such cases the type of each file is/was - discovered by making multiple syscalls. This change makes these operations - lazy such that these lookups will only be performed if the attribute is used. - This optimization affects a minority of filesystems and operating systems. - -* In derivations that use structured attributes, you can now use `unsafeDiscardReferences` - to disable scanning a given output for runtime dependencies: - ```nix - __structuredAttrs = true; - unsafeDiscardReferences.out = true; - ``` - This is useful e.g. when generating self-contained filesystem images with - their own embedded Nix store: hashes found inside such an image refer - to the embedded store and not to the host's Nix store. - - This requires the `discard-references` experimental feature. diff --git a/doc/manual/utils.nix b/doc/manual/utils.nix index d0643ef46..9043dd8cd 100644 --- a/doc/manual/utils.nix +++ b/doc/manual/utils.nix @@ -5,6 +5,9 @@ rec { concatStrings = concatStringsSep ""; + attrsToList = a: + map (name: { inherit name; value = a.${name}; }) (builtins.attrNames a); + replaceStringsRec = from: to: string: # recursively replace occurrences of `from` with `to` within `string` # example: @@ -38,4 +41,66 @@ rec { filterAttrs = pred: set: listToAttrs (concatMap (name: let v = set.${name}; in if pred name v then [(nameValuePair name v)] else []) (attrNames set)); + + optionalString = cond: string: if cond then string else ""; + + showSetting = { useAnchors }: name: { description, documentDefault, defaultValue, aliases, value, experimentalFeature }: + let + result = squash '' + - ${if useAnchors + then ''[`${name}`](#conf-${name})'' + else ''`${name}`''} + + ${indent " " body} + ''; + + experimentalFeatureNote = optionalString (experimentalFeature != null) '' + > **Warning** + > This setting is part of an + > [experimental feature](@docroot@/contributing/experimental-features.md). + + To change this setting, you need to make sure the corresponding experimental feature, + [`${experimentalFeature}`](@docroot@/contributing/experimental-features.md#xp-feature-${experimentalFeature}), + is enabled. + For example, include the following in [`nix.conf`](#): + + ``` + extra-experimental-features = ${experimentalFeature} + ${name} = ... + ``` + ''; + + # separate body to cleanly handle indentation + body = '' + ${description} + + ${experimentalFeatureNote} + + **Default:** ${showDefault documentDefault defaultValue} + + ${showAliases aliases} + ''; + + showDefault = documentDefault: defaultValue: + if documentDefault then + # a StringMap value type is specified as a string, but + # this shows the value type. The empty stringmap is `null` in + # JSON, but that converts to `{ }` here. + if defaultValue == "" || defaultValue == [] || isAttrs defaultValue + then "*empty*" + else if isBool defaultValue then + if defaultValue then "`true`" else "`false`" + else "`${toString defaultValue}`" + else "*machine-specific*"; + + showAliases = aliases: + optionalString (aliases != []) + "**Deprecated alias:** ${(concatStringsSep ", " (map (s: "`${s}`") aliases))}"; + + in result; + + indent = prefix: s: + concatStringsSep "\n" (map (x: if x == "" then x else "${prefix}${x}") (splitLines s)); + + showSettings = args: settingsInfo: concatStrings (attrValues (mapAttrs (showSetting args) settingsInfo)); } diff --git a/docker.nix b/docker.nix index 203a06b53..52199af66 100644 --- a/docker.nix +++ b/docker.nix @@ -8,6 +8,7 @@ , extraPkgs ? [] , maxLayers ? 100 , nixConf ? {} +, flake-registry ? null }: let defaultPkgs = with pkgs; [ @@ -247,7 +248,16 @@ let mkdir -p $out/bin $out/usr/bin ln -s ${pkgs.coreutils}/bin/env $out/usr/bin/env ln -s ${pkgs.bashInteractive}/bin/bash $out/bin/sh - ''; + + '' + (lib.optionalString (flake-registry != null) '' + nixCacheDir="/root/.cache/nix" + mkdir -p $out$nixCacheDir + globalFlakeRegistryPath="$nixCacheDir/flake-registry.json" + ln -s ${flake-registry}/flake-registry.json $out$globalFlakeRegistryPath + mkdir -p $out/nix/var/nix/gcroots/auto + rootName=$(${pkgs.nix}/bin/nix --extra-experimental-features nix-command hash file --type sha1 --base32 <(echo -n $globalFlakeRegistryPath)) + ln -s $globalFlakeRegistryPath $out/nix/var/nix/gcroots/auto/$rootName + ''); in pkgs.dockerTools.buildLayeredImageWithNixDb { diff --git a/flake.lock b/flake.lock index 4490b5ead..1d2aab5ed 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,21 @@ { "nodes": { + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "lowdown-src": { "flake": false, "locked": { @@ -50,6 +66,7 @@ }, "root": { "inputs": { + "flake-compat": "flake-compat", "lowdown-src": "lowdown-src", "nixpkgs": "nixpkgs", "nixpkgs-regression": "nixpkgs-regression" diff --git a/flake.nix b/flake.nix index 814bafe39..666ceb42b 100644 --- a/flake.nix +++ b/flake.nix @@ -4,8 +4,9 @@ inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.11-small"; inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2"; inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; }; + inputs.flake-compat = { url = "github:edolstra/flake-compat"; flake = false; }; - outputs = { self, nixpkgs, nixpkgs-regression, lowdown-src }: + outputs = { self, nixpkgs, nixpkgs-regression, lowdown-src, flake-compat }: let inherit (nixpkgs) lib; @@ -89,9 +90,7 @@ }); configureFlags = - [ - "CXXFLAGS=-I${lib.getDev rapidcheck}/extras/gtest/include" - ] ++ lib.optionals stdenv.isLinux [ + lib.optionals stdenv.isLinux [ "--with-boost=${boost}/lib" "--with-sandbox-shell=${sh}/bin/busybox" ] @@ -99,6 +98,14 @@ "LDFLAGS=-fuse-ld=gold" ]; + testConfigureFlags = [ + "RAPIDCHECK_HEADERS=${lib.getDev rapidcheck}/extras/gtest/include" + ]; + + internalApiDocsConfigureFlags = [ + "--enable-internal-api-docs" + ]; + nativeBuildDeps = [ buildPackages.bison @@ -124,13 +131,20 @@ libarchive boost lowdown-nix - gtest - rapidcheck ] ++ lib.optionals stdenv.isLinux [libseccomp] ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid; + checkDeps = [ + gtest + rapidcheck + ]; + + internalApiDocsDeps = [ + buildPackages.doxygen + ]; + awsDeps = lib.optional (stdenv.isLinux || stdenv.isDarwin) (aws-sdk-cpp.override { apis = ["s3" "transfer"]; @@ -200,11 +214,12 @@ VERSION_SUFFIX = versionSuffix; nativeBuildInputs = nativeBuildDeps; - buildInputs = buildDeps ++ awsDeps; + buildInputs = buildDeps ++ awsDeps ++ checkDeps; propagatedBuildInputs = propagatedDeps; enableParallelBuilding = true; + configureFlags = testConfigureFlags; # otherwise configure fails dontBuild = true; doInstallCheck = true; @@ -305,12 +320,18 @@ }; let canRunInstalled = currentStdenv.buildPlatform.canExecute currentStdenv.hostPlatform; - in currentStdenv.mkDerivation { + + sourceByRegexInverted = rxs: origSrc: final.lib.cleanSourceWith { + filter = (path: type: + let relPath = final.lib.removePrefix (toString origSrc + "/") (toString path); + in ! lib.any (re: builtins.match re relPath != null) rxs); + src = origSrc; + }; + in currentStdenv.mkDerivation (finalAttrs: { name = "nix-super-${version}"; inherit version; - src = self; - + src = sourceByRegexInverted [ "tests/nixos/.*" "tests/installer/.*" ] self; VERSION_SUFFIX = versionSuffix; outputs = [ "out" "dev" "doc" ]; @@ -318,7 +339,8 @@ nativeBuildInputs = nativeBuildDeps; buildInputs = buildDeps # There have been issues building these dependencies - ++ lib.optionals (currentStdenv.hostPlatform == currentStdenv.buildPlatform) awsDeps; + ++ lib.optionals (currentStdenv.hostPlatform == currentStdenv.buildPlatform) awsDeps + ++ lib.optionals finalAttrs.doCheck checkDeps; propagatedBuildInputs = propagatedDeps; @@ -348,6 +370,8 @@ configureFlags = configureFlags ++ [ "--sysconfdir=/etc" ] ++ lib.optional stdenv.hostPlatform.isStatic "--enable-embedded-sandbox-shell" ++ + [ (lib.enableFeature finalAttrs.doCheck "tests") ] ++ + lib.optionals finalAttrs.doCheck testConfigureFlags ++ lib.optional (!canRunInstalled) "--disable-doc-gen"; enableParallelBuilding = true; @@ -361,6 +385,10 @@ postInstall = '' mkdir -p $doc/nix-support echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products + ${lib.optionalString currentStdenv.hostPlatform.isStatic '' + mkdir -p $out/nix-support + echo "file binary-dist $out/bin/nix" >> $out/nix-support/hydra-build-products + ''} ${lib.optionalString currentStdenv.isDarwin '' install_name_tool \ -change ${boost}/lib/libboost_context.dylib \ @@ -369,8 +397,9 @@ ''} ''; - doInstallCheck = true; + doInstallCheck = finalAttrs.doCheck; installCheckFlags = "sysconfdir=$(out)/etc"; + installCheckTarget = "installcheck"; # work around buggy detection in stdenv separateDebugInfo = !currentStdenv.hostPlatform.isStatic; @@ -411,7 +440,7 @@ }); meta.platforms = lib.platforms.unix; - }; + }); lowdown-nix = with final; currentStdenv.mkDerivation rec { name = "lowdown-0.9.0"; @@ -444,8 +473,6 @@ }; in { - inherit nixpkgsFor; - # A Nixpkgs overlay that overrides the 'nix' and # 'nix.perl-bindings' packages. overlays.default = overlayFor (p: p.stdenv); @@ -462,6 +489,14 @@ buildNoGc = forAllSystems (system: self.packages.${system}.nix.overrideAttrs (a: { configureFlags = (a.configureFlags or []) ++ ["--enable-gc=no"];})); + buildNoTests = forAllSystems (system: + self.packages.${system}.nix.overrideAttrs (a: { + doCheck = + assert ! a?dontCheck; + false; + }) + ); + # Perl bindings for various platforms. perlBindings = forAllSystems (system: nixpkgsFor.${system}.native.nix.perl-bindings); @@ -496,25 +531,48 @@ src = self; - configureFlags = [ - "CXXFLAGS=-I${lib.getDev pkgs.rapidcheck}/extras/gtest/include" - ]; + configureFlags = testConfigureFlags; enableParallelBuilding = true; nativeBuildInputs = nativeBuildDeps; - buildInputs = buildDeps ++ propagatedDeps ++ awsDeps; + buildInputs = buildDeps ++ propagatedDeps ++ awsDeps ++ checkDeps; dontInstall = false; doInstallCheck = true; + installCheckTarget = "installcheck"; # work around buggy detection in stdenv lcovFilter = [ "*/boost/*" "*-tab.*" ]; - # We call `dot', and even though we just use it to - # syntax-check generated dot files, it still requires some - # fonts. So provide those. - FONTCONFIG_FILE = texFunctions.fontsConf; + hardeningDisable = ["fortify"]; + }; + + # API docs for Nix's unstable internal C++ interfaces. + internal-api-docs = + with nixpkgsFor.x86_64-linux.native; + with commonDeps { inherit pkgs; }; + + stdenv.mkDerivation { + pname = "nix-internal-api-docs"; + inherit version; + + src = self; + + configureFlags = testConfigureFlags ++ internalApiDocsConfigureFlags; + + nativeBuildInputs = nativeBuildDeps; + buildInputs = buildDeps ++ propagatedDeps + ++ awsDeps ++ checkDeps ++ internalApiDocsDeps; + + dontBuild = true; + + installTargets = [ "internal-api-html" ]; + + postInstall = '' + mkdir -p $out/nix-support + echo "doc internal-api-docs $out/share/doc/nix/internal-api/html" >> $out/nix-support/hydra-build-products + ''; }; # System tests. @@ -524,6 +582,8 @@ tests.nix-copy-closure = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy-closure.nix; + tests.nix-copy = runNixOSTestFor "x86_64-linux" ./tests/nixos/nix-copy.nix; + tests.nssPreload = runNixOSTestFor "x86_64-linux" ./tests/nixos/nss-preload.nix; tests.githubFlakes = runNixOSTestFor "x86_64-linux" ./tests/nixos/github-flakes.nix; @@ -634,9 +694,11 @@ nativeBuildInputs = nativeBuildDeps ++ (lib.optionals stdenv.cc.isClang [ pkgs.bear pkgs.clang-tools ]); - buildInputs = buildDeps ++ propagatedDeps ++ awsDeps; + buildInputs = buildDeps ++ propagatedDeps + ++ awsDeps ++ checkDeps ++ internalApiDocsDeps; - inherit configureFlags; + configureFlags = configureFlags + ++ testConfigureFlags ++ internalApiDocsConfigureFlags; enableParallelBuilding = true; diff --git a/local.mk b/local.mk index 6a7074e8e..6951c179e 100644 --- a/local.mk +++ b/local.mk @@ -1,6 +1,8 @@ clean-files += Makefile.config -GLOBAL_CXXFLAGS += -Wno-deprecated-declarations +GLOBAL_CXXFLAGS += -Wno-deprecated-declarations -Werror=switch +# Allow switch-enum to be overridden for files that do not support it, usually because of dependency headers. +ERROR_SWITCH_ENUM = -Werror=switch-enum $(foreach i, config.h $(wildcard src/lib*/*.hh), \ $(eval $(call install-file-in, $(i), $(includedir)/nix, 0644))) diff --git a/maintainers/README.md b/maintainers/README.md index 08d197c1b..d13349438 100644 --- a/maintainers/README.md +++ b/maintainers/README.md @@ -2,7 +2,30 @@ ## Motivation -The goal of the team is to help other people to contribute to Nix. +The team's main responsibility is to set a direction for the development of Nix and ensure that the code is in good shape. + +We aim to achieve this by improving the contributor experience and attracting more maintainers – that is, by helping other people contributing to Nix and eventually taking responsibility – in order to scale the development process to match users' needs. + +### Objectives + +- It is obvious what is worthwhile to work on. +- It is easy to find the right place in the code to make a change. +- It is clear what is expected of a pull request. +- It is predictable how to get a change merged and released. + +### Tasks + +- Establish, communicate, and maintain a technical roadmap +- Improve documentation targeted at contributors + - Record architecture and design decisions + - Elaborate contribution guides and abide to them + - Define and assert quality criteria for contributions +- Maintain the issue tracker and triage pull requests +- Help contributors succeed with pull requests that address roadmap milestones +- Manage the release lifecycle +- Regularly publish reports on work done +- Engage with third parties in the interest of the project +- Ensure the required maintainer capacity for all of the above ## Members @@ -11,6 +34,7 @@ The goal of the team is to help other people to contribute to Nix. - Valentin Gagarin (@fricklerhandwerk) - Thomas Bereknyei (@tomberek) - Robert Hensing (@roberth) +- John Ericson (@Ericson2314) ## Meeting protocol @@ -18,12 +42,12 @@ The team meets twice a week: - Discussion meeting: [Fridays 13:00-14:00 CET](https://calendar.google.com/calendar/event?eid=MHNtOGVuNWtrZXNpZHR2bW1sM3QyN2ZjaGNfMjAyMjExMjVUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn) - 1. Triage issues and pull requests from the _No Status_ column (30 min) - 2. Discuss issues and pull requests from the _To discuss_ column (30 min) + 1. Triage issues and pull requests from the [No Status](#no-status) column (30 min) + 2. Discuss issues and pull requests from the [To discuss](#to-discuss) column (30 min) - Work meeting: [Mondays 13:00-15:00 CET](https://calendar.google.com/calendar/event?eid=NTM1MG1wNGJnOGpmOTZhYms3bTB1bnY5cWxfMjAyMjExMjFUMTIwMDAwWiBiOW81MmZvYnFqYWs4b3E4bGZraGczdDBxZ0Bn) - 1. Code review on pull requests from _In review_. + 1. Code review on pull requests from [In review](#in-review). 2. Other chores and tasks. Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.us/Cv7FpYx-Ri-4VjUykQOLAw), and published on Discourse under the [Nix category](https://discourse.nixos.org/c/dev/nix/50). @@ -32,62 +56,76 @@ Meeting notes are collected on a [collaborative scratchpad](https://pad.lassul.u The team uses a [GitHub project board](https://github.com/orgs/NixOS/projects/19/views/1) for tracking its work. -Issues on the board progress through the following states: +Items on the board progress through the following states: -- No Status +### No Status - During the discussion meeting, the team triages new items. - To be considered, issues and pull requests must have a high-level description to provide the whole team with the necessary context at a glance. +During the discussion meeting, the team triages new items. +To be considered, issues and pull requests must have a high-level description to provide the whole team with the necessary context at a glance. - On every meeting, at least one item from each of the following categories is inspected: +On every meeting, at least one item from each of the following categories is inspected: - 1. [critical](https://github.com/NixOS/nix/labels/critical) - 2. [security](https://github.com/NixOS/nix/labels/security) - 3. [regression](https://github.com/NixOS/nix/labels/regression) - 4. [bug](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Abug+sort%3Areactions-%2B1-desc) +1. [critical](https://github.com/NixOS/nix/labels/critical) +2. [security](https://github.com/NixOS/nix/labels/security) +3. [regression](https://github.com/NixOS/nix/labels/regression) +4. [bug](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Abug+sort%3Areactions-%2B1-desc) +5. [tests of existing functionality](https://github.com/NixOS/nix/issues?q=is%3Aopen+label%3Atests+-label%3Afeature+sort%3Areactions-%2B1-desc) - - [oldest pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Acreated-asc) - - [most popular pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Areactions-%2B1-desc) - - [oldest issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Acreated-asc) - - [most popular issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc) +- [oldest pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Acreated-asc) +- [most popular pull requests](https://github.com/NixOS/nix/pulls?q=is%3Apr+is%3Aopen+sort%3Areactions-%2B1-desc) +- [oldest issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Acreated-asc) +- [most popular issues](https://github.com/NixOS/nix/issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc) - Team members can also add pull requests or issues they would like the whole team to consider. +Team members can also add pull requests or issues they would like the whole team to consider. +To ensure process quality and reliability, all non-trivial pull requests must be triaged before merging. - If there is disagreement on the general idea behind an issue or pull request, it is moved to _To discuss_, otherwise to _In review_. +If there is disagreement on the general idea behind an issue or pull request, it is moved to [To discuss](#to-discuss). +Otherwise, the issue or pull request in questions get the label [`idea approved`](https://github.com/NixOS/nix/labels/idea%20approved). +For issues this means that an implementation is welcome and will be prioritised for review. +For pull requests this means that: +- Unfinished work is encouraged to be continued. +- A reviewer is assigned to take responsibility for getting the pull request merged. + The item is moved to the [Assigned](#assigned) column. +- If needed, the team can decide to do a collarorative review. + Then the item is moved to the [In review](#in-review) column, and review session is scheduled. -- To discuss +What constitutes a trivial pull request is up to maintainers' judgement. - Pull requests and issues that are deemed important and controversial are discussed by the team during discussion meetings. +### To discuss - This may be where the merit of the change itself or the implementation strategy is contested by a team member. +Pull requests and issues that are deemed important and controversial are discussed by the team during discussion meetings. - As a general guideline, the order of items is determined as follows: +This may be where the merit of the change itself or the implementation strategy is contested by a team member. - - Prioritise pull requests over issues +As a general guideline, the order of items is determined as follows: - Contributors who took the time to implement concrete change proposals should not wait indefinitely. +- Prioritise pull requests over issues - - Prioritise fixing bugs over documentation, improvements or new features + Contributors who took the time to implement concrete change proposals should not wait indefinitely. - The team values stability and accessibility higher than raw functionality. +- Prioritise fixing bugs and testing over documentation, improvements or new features - - Interleave issues and PRs + The team values stability and accessibility higher than raw functionality. - This way issues without attempts at a solution get a chance to get addressed. +- Interleave issues and PRs -- In review + This way issues without attempts at a solution get a chance to get addressed. - Pull requests in this column are reviewed together during work meetings. - This is both for spreading implementation knowledge and for establishing common values in code reviews. +### In review - When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member. +Pull requests in this column are reviewed together during work meetings. +This is both for spreading implementation knowledge and for establishing common values in code reviews. -- Assigned for merging +When the overall direction is agreed upon, even when further changes are required, the pull request is assigned to one team member. - One team member is assigned to each of these pull requests. - They will communicate with the authors, and make the final approval once all remaining issues are addressed. +### Assigned - If more substantive issues arise, the assignee can move the pull request back to _To discuss_ to involve the team again. +One team member is assigned to each of these pull requests. +They will communicate with the authors, and make the final approval once all remaining issues are addressed. + +If more substantive issues arise, the assignee can move the pull request back to [To discuss](#to-discuss) or [In review](#in-review) to involve the team again. + +### Flowchart The process is illustrated in the following diagram: diff --git a/maintainers/release-process.md b/maintainers/release-process.md index b52d218f5..ec9e96489 100644 --- a/maintainers/release-process.md +++ b/maintainers/release-process.md @@ -123,6 +123,7 @@ release: `/home/eelco/Dev/nixpkgs-pristine`. TODO: trigger nixos.org netlify: https://docs.netlify.com/configure-builds/build-hooks/ + * Prepare for the next point release by editing `.version` to e.g. @@ -152,7 +153,7 @@ release: from the previous milestone, and close the previous milestone. Set the date for the next milestone 6 weeks from now. -* Create a backport label +* Create a backport label. * Post an [announcement on Discourse](https://discourse.nixos.org/c/announcements/8), including the contents of `rl-$VERSION.md`. diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in index 5fa489b20..e1470cf99 100644 --- a/misc/launchd/org.nixos.nix-daemon.plist.in +++ b/misc/launchd/org.nixos.nix-daemon.plist.in @@ -4,8 +4,6 @@ EnvironmentVariables - NIX_SSL_CERT_FILE - /nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt OBJC_DISABLE_INITIALIZE_FORK_SAFETY YES diff --git a/mk/debug-test.sh b/mk/debug-test.sh index 6299e68a0..b5b628ecd 100755 --- a/mk/debug-test.sh +++ b/mk/debug-test.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -eu +set -eu -o pipefail test=$1 diff --git a/mk/disable-tests.mk b/mk/disable-tests.mk new file mode 100644 index 000000000..f72f84412 --- /dev/null +++ b/mk/disable-tests.mk @@ -0,0 +1,12 @@ +# This file is only active for `./configure --disable-tests`. +# Running `make check` or `make installcheck` would indicate a mistake in the +# caller. + +installcheck: + @echo "Tests are disabled. Configure without '--disable-tests', or avoid calling 'make installcheck'." + @exit 1 + +# This currently has little effect. +check: + @echo "Tests are disabled. Configure without '--disable-tests', or avoid calling 'make check'." + @exit 1 diff --git a/mk/libraries.mk b/mk/libraries.mk index 02e4d47f9..1bc73d7f7 100644 --- a/mk/libraries.mk +++ b/mk/libraries.mk @@ -126,7 +126,7 @@ define build-library $(1)_PATH := $$(_d)/$$($(1)_NAME).a $$($(1)_PATH): $$($(1)_OBJS) | $$(_d)/ - +$$(trace-ld) $(LD) -Ur -o $$(_d)/$$($(1)_NAME).o $$^ + $$(trace-ld) $(LD) $$(ifndef $(HOST_DARWIN),-U) -r -o $$(_d)/$$($(1)_NAME).o $$^ $$(trace-ar) $(AR) crs $$@ $$(_d)/$$($(1)_NAME).o $(1)_LDFLAGS_USE += $$($(1)_PATH) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE)) diff --git a/mk/patterns.mk b/mk/patterns.mk index 86a724806..c81150260 100644 --- a/mk/patterns.mk +++ b/mk/patterns.mk @@ -1,10 +1,10 @@ $(buildprefix)%.o: %.cc @mkdir -p "$(dir $@)" - $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP + $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) $(ERROR_SWITCH_ENUM) -MMD -MF $(call filename-to-dep, $@) -MP $(buildprefix)%.o: %.cpp @mkdir -p "$(dir $@)" - $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) -MMD -MF $(call filename-to-dep, $@) -MP + $(trace-cxx) $(CXX) -o $@ -c $< $(CPPFLAGS) $(GLOBAL_CXXFLAGS_PCH) $(GLOBAL_CXXFLAGS) $(CXXFLAGS) $($@_CXXFLAGS) $(ERROR_SWITCH_ENUM) -MMD -MF $(call filename-to-dep, $@) -MP $(buildprefix)%.o: %.c @mkdir -p "$(dir $@)" diff --git a/mk/run-test.sh b/mk/run-test.sh index 219c8577f..1a1d65930 100755 --- a/mk/run-test.sh +++ b/mk/run-test.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -u +set -eu -o pipefail red="" green="" @@ -22,24 +22,11 @@ fi run_test () { (init_test 2>/dev/null > /dev/null) - log="$(run_test_proper 2>&1)" - status=$? + log="$(run_test_proper 2>&1)" && status=0 || status=$? } run_test -# Hack: Retry the test if it fails with “unexpected EOF reading a line” as these -# appear randomly without anyone knowing why. -# See https://github.com/NixOS/nix/issues/3605 for more info -if [[ $status -ne 0 && $status -ne 99 && \ - "$(uname)" == "Darwin" && \ - "$log" =~ "unexpected EOF reading a line" \ -]]; then - echo "$post_run_msg [${yellow}FAIL$normal] (possibly flaky, so will be retried)" - echo "$log" | sed 's/^/ /' - run_test -fi - if [ $status -eq 0 ]; then echo "$post_run_msg [${green}PASS$normal]" elif [ $status -eq 99 ]; then diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index de91dc28d..41ecbbeb4 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -27,8 +27,6 @@ static ref store() if (!_store) { try { initLibStore(); - loadConfFile(); - settings.lockCPU = false; _store = openStore(); } catch (Error & e) { croak("%s", e.what()); @@ -295,7 +293,13 @@ SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name) try { auto h = Hash::parseAny(hash, parseHashType(algo)); auto method = recursive ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat; - auto path = store()->makeFixedOutputPath(method, h, name); + auto path = store()->makeFixedOutputPath(name, FixedOutputInfo { + .hash = { + .method = method, + .hash = h, + }, + .references = {}, + }); XPUSHs(sv_2mortal(newSVpv(store()->printStorePath(path).c_str(), 0))); } catch (Error & e) { croak("%s", e.what()); diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index 235536c65..8cfd3149e 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -3,12 +3,12 @@ if [ -n "${__ETC_PROFILE_NIX_SOURCED:-}" ]; then return; fi __ETC_PROFILE_NIX_SOURCED=1 NIX_LINK=$HOME/.nix-profile -if [ -n "$XDG_STATE_HOME" ]; then +if [ -n "${XDG_STATE_HOME-}" ]; then NIX_LINK_NEW="$XDG_STATE_HOME/nix/profile" else NIX_LINK_NEW=$HOME/.local/state/nix/profile fi -if ! [ -e "$NIX_LINK" ]; then +if [ -e "$NIX_LINK_NEW" ]; then NIX_LINK="$NIX_LINK_NEW" else if [ -t 2 ] && [ -e "$NIX_LINK_NEW" ]; then diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index 264d9a8e2..c4d60cf37 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -3,12 +3,12 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then # Set up the per-user profile. NIX_LINK="$HOME/.nix-profile" - if [ -n "$XDG_STATE_HOME" ]; then + if [ -n "${XDG_STATE_HOME-}" ]; then NIX_LINK_NEW="$XDG_STATE_HOME/nix/profile" else NIX_LINK_NEW="$HOME/.local/state/nix/profile" fi - if ! [ -e "$NIX_LINK" ]; then + if [ -e "$NIX_LINK_NEW" ]; then NIX_LINK="$NIX_LINK_NEW" else if [ -t 2 ] && [ -e "$NIX_LINK_NEW" ]; then diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc index 174435e7c..ce9c7f45a 100644 --- a/src/build-remote/build-remote.cc +++ b/src/build-remote/build-remote.cc @@ -219,7 +219,7 @@ static int main_build_remote(int argc, char * * argv) % concatStringsSep(", ", m.supportedFeatures) % concatStringsSep(", ", m.mandatoryFeatures); - printMsg(couldBuildLocally ? lvlChatty : lvlWarn, error); + printMsg(couldBuildLocally ? lvlChatty : lvlWarn, error.str()); std::cerr << "# decline\n"; } @@ -305,14 +305,15 @@ connected: std::set missingRealisations; StorePathSet missingPaths; - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) && !drv.type().hasKnownOutputPaths()) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && !drv.type().hasKnownOutputPaths()) { for (auto & outputName : wantedOutputs) { auto thisOutputHash = outputHashes.at(outputName); auto thisOutputId = DrvOutput{ thisOutputHash, outputName }; if (!store->queryRealisation(thisOutputId)) { debug("missing output %s", outputName); - assert(result.builtOutputs.count(thisOutputId)); - auto newRealisation = result.builtOutputs.at(thisOutputId); + auto i = result.builtOutputs.find(outputName); + assert(i != result.builtOutputs.end()); + auto & newRealisation = i->second; missingRealisations.insert(newRealisation); missingPaths.insert(newRealisation.outPath); } @@ -337,7 +338,7 @@ connected: for (auto & realisation : missingRealisations) { // Should hold, because if the feature isn't enabled the set // of missing realisations should be empty - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); store->registerDrvOutput(realisation); } diff --git a/src/libcmd/activatables.cc b/src/libcmd/activatables.cc index 25bcf23b9..4cf95e99b 100644 --- a/src/libcmd/activatables.cc +++ b/src/libcmd/activatables.cc @@ -8,12 +8,12 @@ ActivatableCommand::ActivatableCommand(std::string activationPackageAttrPath) : activationPackageAttrPath(activationPackageAttrPath) { } -void ActivatableCommand::prepare() { - InstallableCommand::prepare(); - auto installableFlake = std::dynamic_pointer_cast(installable); +void ActivatableCommand::run(ref store, ref installable) { + auto _installable = installable; + auto installableFlake = installable.dynamic_pointer_cast();; if (installableFlake) { auto fragment = *installableFlake->attrPaths.begin(); - installable = std::make_shared( + auto _installable = std::make_shared( this, getEvalState(), std::move(installableFlake->flakeRef), @@ -24,11 +24,11 @@ void ActivatableCommand::prepare() { installableFlake->lockFlags ); } + runActivatable(store, _installable); } -StorePath ActivatableCommand::buildActivatable(nix::ref store, bool dryRun) { - auto installableFlake = std::dynamic_pointer_cast(installable); - std::vector> installableContext; +StorePath ActivatableCommand::buildActivatable(nix::ref store, ref installable, bool dryRun) { + std::vector> installableContext; installableContext.emplace_back(installable); auto buildables = Installable::build( getEvalStore(), store, diff --git a/src/libcmd/activatables.hh b/src/libcmd/activatables.hh index c2b79d9e0..8ec6304e2 100644 --- a/src/libcmd/activatables.hh +++ b/src/libcmd/activatables.hh @@ -33,12 +33,11 @@ ActivatableBuildCommand::ActivatableBuildCommand() }; template -void ActivatableBuildCommand::run(nix::ref store) +void ActivatableBuildCommand::runActivatable(ref store, ref installable) { - auto installableFlake = std::dynamic_pointer_cast(ActivatableCommand::installable); if (dryRun) { std::vector> installableContext; - installableContext.emplace_back(ActivatableCommand::installable); + installableContext.emplace_back(installable); std::vector pathsToBuild; for (auto & i : installableContext) { @@ -50,7 +49,7 @@ void ActivatableBuildCommand::run(nix::ref store) return; } - auto out = ActivatableCommand::buildActivatable(store); + auto out = ActivatableCommand::buildActivatable(store, installable); if (outLink != "") { if (auto store2 = store.dynamic_pointer_cast()) { diff --git a/src/libcmd/command-installable-value.cc b/src/libcmd/command-installable-value.cc new file mode 100644 index 000000000..7e0c15eb8 --- /dev/null +++ b/src/libcmd/command-installable-value.cc @@ -0,0 +1,11 @@ +#include "command-installable-value.hh" + +namespace nix { + +void InstallableValueCommand::run(ref store, ref installable) +{ + auto installableValue = InstallableValue::require(installable); + run(store, installableValue); +} + +} diff --git a/src/libcmd/command-installable-value.hh b/src/libcmd/command-installable-value.hh new file mode 100644 index 000000000..7880d4119 --- /dev/null +++ b/src/libcmd/command-installable-value.hh @@ -0,0 +1,23 @@ +#pragma once +///@file + +#include "installable-value.hh" +#include "command.hh" + +namespace nix { + +/** + * An InstallableCommand where the single positional argument must be an + * InstallableValue in particular. + */ +struct InstallableValueCommand : InstallableCommand +{ + /** + * Entry point to this command + */ + virtual void run(ref store, ref installable) = 0; + + void run(ref store, ref installable) override; +}; + +} diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index ab51c229d..6c4648b34 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -121,6 +121,8 @@ ref EvalCommand::getEvalState() #endif ; + evalState->repair = repair; + if (startReplOnEvalErrors) { evalState->debugRepl = &AbstractNixRepl::runSimple; }; @@ -165,7 +167,7 @@ BuiltPathsCommand::BuiltPathsCommand(bool recursive) }); } -void BuiltPathsCommand::run(ref store) +void BuiltPathsCommand::run(ref store, Installables && installables) { BuiltPaths paths; if (all) { @@ -211,7 +213,7 @@ void StorePathsCommand::run(ref store, BuiltPaths && paths) run(store, std::move(sorted)); } -void StorePathCommand::run(ref store, std::vector && storePaths) +void StorePathCommand::run(ref store, StorePaths && storePaths) { if (storePaths.size() != 1) throw UsageError("this command requires exactly one store path"); @@ -246,7 +248,7 @@ void MixProfile::updateProfile(const BuiltPaths & buildables) { if (!profile) return; - std::vector result; + StorePaths result; for (auto & buildable : buildables) { std::visit(overloaded { diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh index aa047ac21..2a145e038 100644 --- a/src/libcmd/command.hh +++ b/src/libcmd/command.hh @@ -1,7 +1,7 @@ #pragma once +///@file -#include "common-args.hh" -#include "installables.hh" +#include "installable-value.hh" #include "args.hh" #include "common-eval-args.hh" #include "outputs-spec.hh" @@ -20,17 +20,21 @@ class EvalState; struct Pos; class Store; +static constexpr Command::Category catHelp = -1; static constexpr Command::Category catSecondary = 100; static constexpr Command::Category catUtility = 101; static constexpr Command::Category catNixInstallation = 102; -static constexpr auto installablesCategory = "Options that change the interpretation of installables"; +static constexpr auto installablesCategory = "Options that change the interpretation of [installables](@docroot@/command-ref/new-cli/nix.md#installables)"; struct NixMultiCommand : virtual MultiCommand, virtual Command { nlohmann::json toJSON() override; }; +// For the overloaded run methods +#pragma GCC diagnostic ignored "-Woverloaded-virtual" + /* A command that requires a Nix store. */ struct StoreCommand : virtual Command { @@ -104,19 +108,19 @@ struct SourceExprCommand : virtual Args, MixFlakeOptions SourceExprCommand(); - std::vector> parseInstallables( - ref store, std::vector ss, - bool applyOverrides = true, bool nestedIsExprOk = true); - - std::shared_ptr modifyInstallable( + ref modifyInstallable( ref store, ref state, - std::shared_ptr installable, + ref installable, std::string_view installableName, std::string_view prefix, ExtendedOutputsSpec extendedOutputsSpec); Bindings * getOverrideArgs(EvalState & state, ref store); - std::shared_ptr parseInstallable( + Installables parseInstallables( + ref store, std::vector ss, + bool applyOverrides = true, bool nestedIsExprOk = true); + + ref parseInstallable( ref store, const std::string & installable, bool applyOverrides = true, bool nestedIsExprOk = true); @@ -132,34 +136,43 @@ struct MixReadOnlyOption : virtual Args MixReadOnlyOption(); }; -/* A command that operates on a list of "installables", which can be - store paths, attribute paths, Nix expressions, etc. */ -struct InstallablesCommand : virtual Args, SourceExprCommand +/* Like InstallablesCommand but the installables are not loaded */ +struct RawInstallablesCommand : virtual Args, SourceExprCommand { - std::vector> installables; + RawInstallablesCommand(); - InstallablesCommand(); + virtual void run(ref store, std::vector && rawInstallables) = 0; - void prepare() override; - Installables load(); + void run(ref store) override; - virtual bool useDefaultInstallables() { return true; } + // FIXME make const after CmdRepl's override is fixed up + virtual void applyDefaultInstallables(std::vector & rawInstallables); + + bool readFromStdIn = false; std::vector getFlakesForCompletion() override; -protected: +private: - std::vector _installables; + std::vector rawInstallables; +}; +/* A command that operates on a list of "installables", which can be + store paths, attribute paths, Nix expressions, etc. */ +struct InstallablesCommand : RawInstallablesCommand +{ + virtual void run(ref store, Installables && installables) = 0; + + void run(ref store, std::vector && rawInstallables) override; }; /* A command that operates on exactly one "installable" */ struct InstallableCommand : virtual Args, SourceExprCommand { - std::shared_ptr installable; - InstallableCommand(); - void prepare() override; + virtual void run(ref store, ref installable) = 0; + + void run(ref store) override; std::vector getFlakesForCompletion() override { @@ -194,22 +207,18 @@ public: BuiltPathsCommand(bool recursive = false); - using StoreCommand::run; - virtual void run(ref store, BuiltPaths && paths) = 0; - void run(ref store) override; + void run(ref store, Installables && installables) override; - bool useDefaultInstallables() override { return !all; } + void applyDefaultInstallables(std::vector & rawInstallables) override; }; struct StorePathsCommand : public BuiltPathsCommand { StorePathsCommand(bool recursive = false); - using BuiltPathsCommand::run; - - virtual void run(ref store, std::vector && storePaths) = 0; + virtual void run(ref store, StorePaths && storePaths) = 0; void run(ref store, BuiltPaths && paths) override; }; @@ -217,11 +226,9 @@ struct StorePathsCommand : public BuiltPathsCommand /* A command that operates on exactly one store path. */ struct StorePathCommand : public StorePathsCommand { - using StorePathsCommand::run; - virtual void run(ref store, const StorePath & storePath) = 0; - void run(ref store, std::vector && storePaths) override; + void run(ref store, StorePaths && storePaths) override; }; /* A helper class for registering commands globally. */ @@ -289,8 +296,12 @@ struct ActivatableCommand : public InstallableCommand { ActivatableCommand(std::string activationPackageAttrPath); std::string activationPackageAttrPath; - void prepare() override; - StorePath buildActivatable(nix::ref store, bool dryRun = false); + StorePath buildActivatable(ref store, ref installable, bool dryRun = false); + + // user entrypoint + virtual void runActivatable(ref store, ref installable) = 0; + + void run(ref store, ref installable) override; }; template @@ -299,7 +310,7 @@ struct ActivatableBuildCommand : public ActivatableCommand, public MixDryRun ActivatableBuildCommand(); Path outLink = "result"; bool printOutputPaths = false; - void run(nix::ref) override; + void runActivatable(nix::ref, ref installable) override; }; /* A command that manages the activation of exactly one "activatable" */ diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 908127b4d..ff3abd534 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -136,7 +136,11 @@ MixEvalArgs::MixEvalArgs() addFlag({ .longName = "eval-store", - .description = "The Nix store to use for evaluations.", + .description = + R"( + The [URL of the Nix store](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) + to use for evaluation, i.e. to store derivations (`.drv` files) and inputs referenced by them. + )", .category = category, .labels = {"store-url"}, .handler = {&evalStoreUrl}, @@ -149,7 +153,7 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) for (auto & i : autoArgs) { auto v = state.allocValue(); if (i.second[0] == 'E') - state.mkThunk_(*v, state.parseExprFromString(i.second.substr(1), absPath("."))); + state.mkThunk_(*v, state.parseExprFromString(i.second.substr(1), state.rootPath(CanonPath::fromCwd()))); else v->mkString(((std::string_view) i.second).substr(1)); res.insert(state.symbols.create(i.first), v); @@ -157,19 +161,19 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) return res.finish(); } -Path lookupFileArg(EvalState & state, std::string_view s) +SourcePath lookupFileArg(EvalState & state, std::string_view s) { if (EvalSettings::isPseudoUrl(s)) { auto storePath = fetchers::downloadTarball( state.store, EvalSettings::resolvePseudoUrl(s), "source", false).first.storePath; - return state.store->toRealPath(storePath); + return state.rootPath(CanonPath(state.store->toRealPath(storePath))); } else if (hasPrefix(s, "flake:")) { - settings.requireExperimentalFeature(Xp::Flakes); + experimentalFeatureSettings.require(Xp::Flakes); auto flakeRef = parseFlakeRef(std::string(s.substr(6)), {}, true, false); auto storePath = flakeRef.resolve(state.store).fetchTree(state.store).first.storePath; - return state.store->toRealPath(storePath); + return state.rootPath(CanonPath(state.store->toRealPath(storePath))); } else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') { @@ -178,7 +182,7 @@ Path lookupFileArg(EvalState & state, std::string_view s) } else - return absPath(std::string(s)); + return state.rootPath(CanonPath::fromCwd(s)); } } diff --git a/src/libcmd/common-eval-args.hh b/src/libcmd/common-eval-args.hh index 1ec800613..b65cb5b20 100644 --- a/src/libcmd/common-eval-args.hh +++ b/src/libcmd/common-eval-args.hh @@ -1,14 +1,17 @@ #pragma once +///@file #include "args.hh" +#include "common-args.hh" namespace nix { class Store; class EvalState; class Bindings; +struct SourcePath; -struct MixEvalArgs : virtual Args +struct MixEvalArgs : virtual Args, virtual MixRepair { static constexpr auto category = "Common evaluation options"; @@ -24,6 +27,6 @@ private: std::map autoArgs; }; -Path lookupFileArg(EvalState & state, std::string_view s); +SourcePath lookupFileArg(EvalState & state, std::string_view s); } diff --git a/src/libcmd/editor-for.cc b/src/libcmd/editor-for.cc index f674f32bd..a17c6f12a 100644 --- a/src/libcmd/editor-for.cc +++ b/src/libcmd/editor-for.cc @@ -3,8 +3,11 @@ namespace nix { -Strings editorFor(const Path & file, uint32_t line) +Strings editorFor(const SourcePath & file, uint32_t line) { + auto path = file.getPhysicalPath(); + if (!path) + throw Error("cannot open '%s' in an editor because it has no physical path", file); auto editor = getEnv("EDITOR").value_or("cat"); auto args = tokenizeString(editor); if (line > 0 && ( @@ -13,7 +16,7 @@ Strings editorFor(const Path & file, uint32_t line) editor.find("vim") != std::string::npos || editor.find("kak") != std::string::npos)) args.push_back(fmt("+%d", line)); - args.push_back(file); + args.push_back(path->abs()); return args; } diff --git a/src/libcmd/editor-for.hh b/src/libcmd/editor-for.hh index 8fbd08792..fbf4307c9 100644 --- a/src/libcmd/editor-for.hh +++ b/src/libcmd/editor-for.hh @@ -1,11 +1,15 @@ #pragma once +///@file #include "types.hh" +#include "input-accessor.hh" namespace nix { -/* Helper function to generate args that invoke $EDITOR on - filename:lineno. */ -Strings editorFor(const Path & file, uint32_t line); +/** + * Helper function to generate args that invoke $EDITOR on + * filename:lineno. + */ +Strings editorFor(const SourcePath & file, uint32_t line); } diff --git a/src/libcmd/installable-attr-path.cc b/src/libcmd/installable-attr-path.cc index d9377f0d6..cf513126d 100644 --- a/src/libcmd/installable-attr-path.cc +++ b/src/libcmd/installable-attr-path.cc @@ -87,6 +87,10 @@ DerivedPathsWithInfo InstallableAttrPath::toDerivedPaths() .drvPath = drvPath, .outputs = outputs, }, + .info = make_ref(ExtraPathInfoValue::Value { + /* FIXME: reconsider backwards compatibility above + so we can fill in this info. */ + }), }); return res; diff --git a/src/libcmd/installable-attr-path.hh b/src/libcmd/installable-attr-path.hh index c53b90e59..df373db2b 100644 --- a/src/libcmd/installable-attr-path.hh +++ b/src/libcmd/installable-attr-path.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "globals.hh" #include "installable-value.hh" #include "outputs-spec.hh" diff --git a/src/libcmd/installable-derived-path.cc b/src/libcmd/installable-derived-path.cc index a9921b901..6ecf54b7c 100644 --- a/src/libcmd/installable-derived-path.cc +++ b/src/libcmd/installable-derived-path.cc @@ -10,7 +10,10 @@ std::string InstallableDerivedPath::what() const DerivedPathsWithInfo InstallableDerivedPath::toDerivedPaths() { - return {{.path = derivedPath, .info = {} }}; + return {{ + .path = derivedPath, + .info = make_ref(), + }}; } std::optional InstallableDerivedPath::getStorePath() @@ -31,27 +34,24 @@ InstallableDerivedPath InstallableDerivedPath::parse( ExtendedOutputsSpec extendedOutputsSpec) { auto derivedPath = std::visit(overloaded { - // If the user did not use ^, we treat the output more liberally. + // If the user did not use ^, we treat the output more + // liberally: we accept a symlink chain or an actual + // store path. [&](const ExtendedOutputsSpec::Default &) -> DerivedPath { - // First, we accept a symlink chain or an actual store path. auto storePath = store->followLinksToStorePath(prefix); - // Second, we see if the store path ends in `.drv` to decide what sort - // of derived path they want. - // - // This handling predates the `^` syntax. The `^*` in - // `/nix/store/hash-foo.drv^*` unambiguously means "do the - // `DerivedPath::Built` case", so plain `/nix/store/hash-foo.drv` could - // also unambiguously mean "do the DerivedPath::Opaque` case". - // - // Issue #7261 tracks reconsidering this `.drv` dispatching. - return storePath.isDerivation() - ? (DerivedPath) DerivedPath::Built { - .drvPath = std::move(storePath), - .outputs = OutputsSpec::All {}, - } - : (DerivedPath) DerivedPath::Opaque { - .path = std::move(storePath), + // Remove this prior to stabilizing the new CLI. + if (storePath.isDerivation()) { + auto oldDerivedPath = DerivedPath::Built { + .drvPath = storePath, + .outputs = OutputsSpec::All { }, }; + warn( + "The interpretation of store paths arguments ending in `.drv` recently changed. If this command is now failing try again with '%s'", + oldDerivedPath.to_string(*store)); + }; + return DerivedPath::Opaque { + .path = std::move(storePath), + }; }, // If the user did use ^, we just do exactly what is written. [&](const ExtendedOutputsSpec::Explicit & outputSpec) -> DerivedPath { diff --git a/src/libcmd/installable-derived-path.hh b/src/libcmd/installable-derived-path.hh index 042878b91..e0b4f18b3 100644 --- a/src/libcmd/installable-derived-path.hh +++ b/src/libcmd/installable-derived-path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "installables.hh" diff --git a/src/libcmd/installable-flake.cc b/src/libcmd/installable-flake.cc index 60a97deaf..f0d322e6d 100644 --- a/src/libcmd/installable-flake.cc +++ b/src/libcmd/installable-flake.cc @@ -96,24 +96,25 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() auto v = attr->forceValue(); if (v.type() == nPath) { - PathSet context; - auto storePath = state->copyPathToStore(context, Path(v.path)); + auto storePath = v.path().fetchToStore(state->store); return {{ .path = DerivedPath::Opaque { .path = std::move(storePath), - } + }, + .info = make_ref(), }}; } else if (v.type() == nString) { - PathSet context; + NixStringContext context; auto s = state->forceString(v, context, noPos, fmt("while evaluating the flake output attribute '%s'", attrPath)); auto storePath = state->store->maybeParseStorePath(s); - if (storePath && context.count(std::string(s))) { + if (storePath && context.count(NixStringContextElem::Opaque { .path = *storePath })) { return {{ .path = DerivedPath::Opaque { .path = std::move(*storePath), - } + }, + .info = make_ref(), }}; } else throw Error("flake output attribute '%s' evaluates to the string '%s' which is not a store path", attrPath, s); @@ -160,13 +161,16 @@ DerivedPathsWithInfo InstallableFlake::toDerivedPaths() }, }, extendedOutputsSpec.raw()), }, - .info = { - .priority = priority, - .originalRef = flakeRef, - .resolvedRef = getLockedFlake()->flake.lockedRef, - .attrPath = attrPath, - .extendedOutputsSpec = extendedOutputsSpec, - } + .info = make_ref( + ExtraPathInfoValue::Value { + .priority = priority, + .attrPath = attrPath, + .extendedOutputsSpec = extendedOutputsSpec, + }, + ExtraPathInfoFlake::Flake { + .originalRef = flakeRef, + .resolvedRef = getLockedFlake()->flake.lockedRef, + }), }}; } @@ -178,8 +182,7 @@ std::pair InstallableFlake::toValue(EvalState & state) std::vector> InstallableFlake::getCursors(EvalState & state) { - auto evalCache = openEvalCache(state, - std::make_shared(lockFlake(state, flakeRef, lockFlags))); + auto evalCache = openEvalCache(state, getLockedFlake()); auto root = evalCache->getRoot(); @@ -213,6 +216,7 @@ std::shared_ptr InstallableFlake::getLockedFlake() const { if (!_lockedFlake) { flake::LockFlags lockFlagsApplyConfig = lockFlags; + // FIXME why this side effect? lockFlagsApplyConfig.applyNixConfig = true; _lockedFlake = std::make_shared(lockFlake(*state, flakeRef, lockFlagsApplyConfig)); } @@ -230,7 +234,7 @@ FlakeRef InstallableFlake::nixpkgsFlakeRef() const } } - return Installable::nixpkgsFlakeRef(); + return InstallableValue::nixpkgsFlakeRef(); } } diff --git a/src/libcmd/installable-flake.hh b/src/libcmd/installable-flake.hh index c75765086..afe64d977 100644 --- a/src/libcmd/installable-flake.hh +++ b/src/libcmd/installable-flake.hh @@ -1,9 +1,34 @@ #pragma once +///@file #include "installable-value.hh" namespace nix { +/** + * Extra info about a \ref DerivedPath "derived path" that ultimately + * come from a Flake. + * + * Invariant: every ExtraPathInfo gotten from an InstallableFlake should + * be possible to downcast to an ExtraPathInfoFlake. + */ +struct ExtraPathInfoFlake : ExtraPathInfoValue +{ + /** + * Extra struct to get around C++ designated initializer limitations + */ + struct Flake { + FlakeRef originalRef; + FlakeRef resolvedRef; + }; + + Flake flake; + + ExtraPathInfoFlake(Value && v, Flake && f) + : ExtraPathInfoValue(std::move(v)), flake(f) + { } +}; + struct InstallableFlake : InstallableValue { FlakeRef flakeRef; @@ -33,8 +58,10 @@ struct InstallableFlake : InstallableValue std::pair toValue(EvalState & state) override; - /* Get a cursor to every attrpath in getActualAttrPaths() - that exists. However if none exists, throw an exception. */ + /** + * Get a cursor to every attrpath in getActualAttrPaths() that + * exists. However if none exists, throw an exception. + */ std::vector> getCursors(EvalState & state) override; diff --git a/src/libcmd/installable-value.cc b/src/libcmd/installable-value.cc new file mode 100644 index 000000000..3a7ede4e2 --- /dev/null +++ b/src/libcmd/installable-value.cc @@ -0,0 +1,44 @@ +#include "installable-value.hh" +#include "eval-cache.hh" + +namespace nix { + +std::vector> +InstallableValue::getCursors(EvalState & state) +{ + auto evalCache = + std::make_shared(std::nullopt, state, + [&]() { return toValue(state).first; }); + return {evalCache->getRoot()}; +} + +ref +InstallableValue::getCursor(EvalState & state) +{ + /* Although getCursors should return at least one element, in case it doesn't, + bound check to avoid an undefined behavior for vector[0] */ + return getCursors(state).at(0); +} + +static UsageError nonValueInstallable(Installable & installable) +{ + return UsageError("installable '%s' does not correspond to a Nix language value", installable.what()); +} + +InstallableValue & InstallableValue::require(Installable & installable) +{ + auto * castedInstallable = dynamic_cast(&installable); + if (!castedInstallable) + throw nonValueInstallable(installable); + return *castedInstallable; +} + +ref InstallableValue::require(ref installable) +{ + auto castedInstallable = installable.dynamic_pointer_cast(); + if (!castedInstallable) + throw nonValueInstallable(*installable); + return ref { castedInstallable }; +} + +} diff --git a/src/libcmd/installable-value.hh b/src/libcmd/installable-value.hh index c6cdc4797..bfb3bfeed 100644 --- a/src/libcmd/installable-value.hh +++ b/src/libcmd/installable-value.hh @@ -1,14 +1,108 @@ #pragma once +///@file #include "installables.hh" +#include "flake/flake.hh" namespace nix { +struct DrvInfo; +struct SourceExprCommand; + +namespace eval_cache { class EvalCache; class AttrCursor; } + +struct App +{ + std::vector context; + Path program; + // FIXME: add args, sandbox settings, metadata, ... +}; + +struct UnresolvedApp +{ + App unresolved; + App resolve(ref evalStore, ref store); +}; + +/** + * Extra info about a \ref DerivedPath "derived path" that ultimately + * come from a Nix language value. + * + * Invariant: every ExtraPathInfo gotten from an InstallableValue should + * be possible to downcast to an ExtraPathInfoValue. + */ +struct ExtraPathInfoValue : ExtraPathInfo +{ + /** + * Extra struct to get around C++ designated initializer limitations + */ + struct Value { + /** + * An optional priority for use with "build envs". See Package + */ + std::optional priority; + + /** + * The attribute path associated with this value. The idea is + * that an installable referring to a value typically refers to + * a larger value, from which we project a smaller value out + * with this. + */ + std::string attrPath; + + /** + * \todo merge with DerivedPath's 'outputs' field? + */ + ExtendedOutputsSpec extendedOutputsSpec; + }; + + Value value; + + ExtraPathInfoValue(Value && v) + : value(v) + { } + + virtual ~ExtraPathInfoValue() = default; +}; + +/** + * An Installable which corresponds a Nix langauge value, in addition to + * a collection of \ref DerivedPath "derived paths". + */ struct InstallableValue : Installable { ref state; InstallableValue(ref state) : state(state) {} + + virtual ~InstallableValue() { } + + virtual std::pair toValue(EvalState & state) = 0; + + /** + * Get a cursor to each value this Installable could refer to. + * However if none exists, throw exception instead of returning + * empty vector. + */ + virtual std::vector> + getCursors(EvalState & state); + + /** + * Get the first and most preferred cursor this Installable could + * refer to, or throw an exception if none exists. + */ + virtual ref + getCursor(EvalState & state); + + UnresolvedApp toApp(EvalState & state); + + virtual FlakeRef nixpkgsFlakeRef() const + { + return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}}); + } + + static InstallableValue & require(Installable & installable); + static ref require(ref installable); }; } diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 7e99f7138..a1bef2593 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -117,6 +117,28 @@ MixFlakeOptions::MixFlakeOptions() }} }); + addFlag({ + .longName = "reference-lock-file", + .description = "Read the given lock file instead of `flake.lock` within the top-level flake.", + .category = category, + .labels = {"flake-lock-path"}, + .handler = {[&](std::string lockFilePath) { + lockFlags.referenceLockFilePath = lockFilePath; + }}, + .completer = completePath + }); + + addFlag({ + .longName = "output-lock-file", + .description = "Write the given lock file instead of `flake.lock` within the top-level flake.", + .category = category, + .labels = {"flake-lock-path"}, + .handler = {[&](std::string lockFilePath) { + lockFlags.outputLockFilePath = lockFilePath; + }}, + .completer = completePath + }); + addFlag({ .longName = "inputs-from", .description = "Use the inputs of the specified flake as registry entries.", @@ -168,7 +190,7 @@ SourceExprCommand::SourceExprCommand() .longName = "file", .shortName = 'f', .description = - "Interpret installables as attribute paths relative to the Nix expression stored in *file*. " + "Interpret [*installables*](@docroot@/command-ref/new-cli/nix.md#installables) as attribute paths relative to the Nix expression stored in *file*. " "If *file* is the character -, then a Nix expression will be read from standard input. " "Implies `--impure`.", .category = installablesCategory, @@ -179,7 +201,7 @@ SourceExprCommand::SourceExprCommand() addFlag({ .longName = "expr", - .description = "Interpret installables as attribute paths relative to the Nix expression *expr*.", + .description = "Interpret [*installables*](@docroot@/command-ref/new-cli/nix.md#installables) as attribute paths relative to the Nix expression *expr*.", .category = installablesCategory, .labels = {"expr"}, .handler = {&expr} @@ -408,7 +430,7 @@ void completeFlakeRefWithFragment( void completeFlakeRef(ref store, std::string_view prefix) { - if (!settings.isExperimentalFeatureEnabled(Xp::Flakes)) + if (!experimentalFeatureSettings.isEnabled(Xp::Flakes)) return; if (prefix == "") @@ -440,23 +462,6 @@ DerivedPathWithInfo Installable::toDerivedPath() return std::move(buildables[0]); } -std::vector> -Installable::getCursors(EvalState & state) -{ - auto evalCache = - std::make_shared(std::nullopt, state, - [&]() { return toValue(state).first; }); - return {evalCache->getRoot()}; -} - -ref -Installable::getCursor(EvalState & state) -{ - /* Although getCursors should return at least one element, in case it doesn't, - bound check to avoid an undefined behavior for vector[0] */ - return getCursors(state).at(0); -} - static StorePath getDeriver( ref store, const Installable & i, @@ -505,17 +510,19 @@ Bindings * SourceExprCommand::getOverrideArgs(EvalState & state, ref stor Value * v = state.allocValue(); if (i.second[0] == 'E') { // is a raw expression, parse - state.mkThunk_(*v, state.parseExprFromString(std::string(i.second, 1), absPath("."))); + state.mkThunk_(*v, state.parseExprFromString(std::string(i.second, 1), state.rootPath(CanonPath::fromCwd()))); } else if (i.second[0] == 'F') { // is a flakeref - auto [vNew, pos] = parseInstallable(store, std::string(i.second, 1), false, false)->toValue(state); + auto iV = InstallableValue::require(parseInstallable(store, std::string(i.second, 1), false, false)); + auto [vNew, pos] = iV->toValue(state); v = vNew; } else if (i.second[0] == 'X') { // is not a flakeref, use attrPath from file // error because --override covers expressions already if (!file) throw Error("no file to reference override from"); - auto [vNew, pos] = parseInstallable(store, std::string(i.second, 1), false, true)->toValue(state); + auto iV = InstallableValue::require(parseInstallable(store, std::string(i.second, 1), false, true)); + auto [vNew, pos] = iV->toValue(state); v = vNew; } else { throw Error("[BUG] unknown argtype %s",i.second[0]); @@ -526,11 +533,11 @@ Bindings * SourceExprCommand::getOverrideArgs(EvalState & state, ref stor return res; } -std::vector> SourceExprCommand::parseInstallables( +Installables SourceExprCommand::parseInstallables( ref store, std::vector ss, bool applyOverrides, bool nestedIsExprOk) { - std::vector> result; + Installables result; auto doModifyInstallable = applyOverrides && ( applyToInstallable || installableOverrideAttrs || installableWithPackages || overrideArgs.size() > 0 ); @@ -552,13 +559,13 @@ std::vector> SourceExprCommand::parseInstallables( else if (file) state->evalFile(lookupFileArg(*state, *file), *vFile); else { - auto e = state->parseExprFromString(*expr, absPath(".")); + auto e = state->parseExprFromString(*expr, state->rootPath(CanonPath::fromCwd())); state->eval(e, *vFile); } for (auto & s : ss) { auto [prefix, extendedOutputsSpec] = ExtendedOutputsSpec::parse(s); - auto installableAttr = std::make_shared(InstallableAttrPath::parse( + auto installableAttr = make_ref(InstallableAttrPath::parse( state, *this, vFile, prefix, extendedOutputsSpec )); if (doModifyInstallable) { @@ -586,7 +593,7 @@ std::vector> SourceExprCommand::parseInstallables( if (prefix.find('/') != std::string::npos) { try { - result.push_back(std::make_shared( + result.push_back(make_ref( InstallableDerivedPath::parse(store, prefix, extendedOutputsSpec))); continue; } catch (BadStorePath &) { @@ -607,7 +614,7 @@ std::vector> SourceExprCommand::parseInstallables( auto state = getEvalState(); - auto installableFlake = std::make_shared( + auto installableFlake = make_ref( this, state, std::move(flakeRef), @@ -640,7 +647,7 @@ std::vector> SourceExprCommand::parseInstallables( return result; } -std::shared_ptr SourceExprCommand::parseInstallable( +ref SourceExprCommand::parseInstallable( ref store, const std::string & installable, bool applyOverrides, bool nestedIsExprOk) { @@ -653,7 +660,7 @@ std::vector Installable::build( ref evalStore, ref store, Realise mode, - const std::vector> & installables, + const Installables & installables, BuildMode bMode) { std::vector res; @@ -662,11 +669,11 @@ std::vector Installable::build( return res; } -std::vector, BuiltPathWithResult>> Installable::build2( +std::vector, BuiltPathWithResult>> Installable::build2( ref evalStore, ref store, Realise mode, - const std::vector> & installables, + const Installables & installables, BuildMode bMode) { if (mode == Realise::Nothing) @@ -674,8 +681,8 @@ std::vector, BuiltPathWithResult>> Instal struct Aux { - ExtraPathInfo info; - std::shared_ptr installable; + ref info; + ref installable; }; std::vector pathsToBuild; @@ -688,7 +695,7 @@ std::vector, BuiltPathWithResult>> Instal } } - std::vector, BuiltPathWithResult>> res; + std::vector, BuiltPathWithResult>> res; switch (mode) { @@ -728,8 +735,8 @@ std::vector, BuiltPathWithResult>> Instal std::visit(overloaded { [&](const DerivedPath::Built & bfd) { std::map outputs; - for (auto & path : buildResult.builtOutputs) - outputs.emplace(path.first.outputName, path.second.outPath); + for (auto & [outputName, realisation] : buildResult.builtOutputs) + outputs.emplace(outputName, realisation.outPath); res.push_back({aux.installable, { .path = BuiltPath::Built { bfd.drvPath, outputs }, .info = aux.info, @@ -760,7 +767,7 @@ BuiltPaths Installable::toBuiltPaths( ref store, Realise mode, OperateOn operateOn, - const std::vector> & installables) + const Installables & installables) { if (operateOn == OperateOn::Output) { BuiltPaths res; @@ -782,7 +789,7 @@ StorePathSet Installable::toStorePaths( ref evalStore, ref store, Realise mode, OperateOn operateOn, - const std::vector> & installables) + const Installables & installables) { StorePathSet outPaths; for (auto & path : toBuiltPaths(evalStore, store, mode, operateOn, installables)) { @@ -796,7 +803,7 @@ StorePath Installable::toStorePath( ref evalStore, ref store, Realise mode, OperateOn operateOn, - std::shared_ptr installable) + ref installable) { auto paths = toStorePaths(evalStore, store, mode, operateOn, {installable}); @@ -808,7 +815,7 @@ StorePath Installable::toStorePath( StorePathSet Installable::toDerivations( ref store, - const std::vector> & installables, + const Installables & installables, bool useDeriver) { StorePathSet drvPaths; @@ -817,9 +824,12 @@ StorePathSet Installable::toDerivations( for (const auto & b : i->toDerivedPaths()) std::visit(overloaded { [&](const DerivedPath::Opaque & bo) { - if (!useDeriver) - throw Error("argument '%s' did not evaluate to a derivation", i->what()); - drvPaths.insert(getDeriver(store, *i, bo.path)); + drvPaths.insert( + bo.path.isDerivation() + ? bo.path + : useDeriver + ? getDeriver(store, *i, bo.path) + : throw Error("argument '%s' did not evaluate to a derivation", i->what())); }, [&](const DerivedPath::Built & bfd) { drvPaths.insert(bfd.drvPath); @@ -829,36 +839,55 @@ StorePathSet Installable::toDerivations( return drvPaths; } -InstallablesCommand::InstallablesCommand() +RawInstallablesCommand::RawInstallablesCommand() { + addFlag({ + .longName = "stdin", + .description = "Read installables from the standard input.", + .handler = {&readFromStdIn, true} + }); + expectArgs({ .label = "installables", - .handler = {&_installables}, + .handler = {&rawInstallables}, .completer = {[&](size_t, std::string_view prefix) { completeInstallable(prefix); }} }); } -void InstallablesCommand::prepare() +void RawInstallablesCommand::applyDefaultInstallables(std::vector & rawInstallables) { - installables = load(); -} - -Installables InstallablesCommand::load() -{ - if (_installables.empty() && useDefaultInstallables()) + if (rawInstallables.empty()) { // FIXME: commands like "nix profile install" should not have a // default, probably. - _installables.push_back("."); - return parseInstallables(getStore(), _installables); + rawInstallables.push_back("."); + } } -std::vector InstallablesCommand::getFlakesForCompletion() +void RawInstallablesCommand::run(ref store) { - if (_installables.empty() && useDefaultInstallables()) - return {"."}; - return _installables; + if (readFromStdIn && !isatty(STDIN_FILENO)) { + std::string word; + while (std::cin >> word) { + rawInstallables.emplace_back(std::move(word)); + } + } + + applyDefaultInstallables(rawInstallables); + run(store, std::move(rawInstallables)); +} + +std::vector RawInstallablesCommand::getFlakesForCompletion() +{ + applyDefaultInstallables(rawInstallables); + return rawInstallables; +} + +void InstallablesCommand::run(ref store, std::vector && rawInstallables) +{ + auto installables = parseInstallables(store, rawInstallables); + run(store, std::move(installables)); } InstallableCommand::InstallableCommand() @@ -874,9 +903,16 @@ InstallableCommand::InstallableCommand() }); } -void InstallableCommand::prepare() +void InstallableCommand::run(ref store) { - installable = parseInstallable(getStore(), _installable); + auto installable = parseInstallable(store, _installable); + run(store, std::move(installable)); +} + +void BuiltPathsCommand::applyDefaultInstallables(std::vector & rawInstallables) +{ + if (rawInstallables.empty() && !all) + rawInstallables.push_back("."); } } diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh index be77fdc81..42d6c7c7c 100644 --- a/src/libcmd/installables.hh +++ b/src/libcmd/installables.hh @@ -1,12 +1,11 @@ #pragma once +///@file #include "util.hh" #include "path.hh" #include "outputs-spec.hh" #include "derived-path.hh" -#include "eval.hh" #include "store-api.hh" -#include "flake/flake.hh" #include "build-result.hh" #include @@ -14,122 +13,156 @@ namespace nix { struct DrvInfo; -struct SourceExprCommand; - -namespace eval_cache { class EvalCache; class AttrCursor; } - -struct App -{ - std::vector context; - Path program; - // FIXME: add args, sandbox settings, metadata, ... -}; - -struct UnresolvedApp -{ - App unresolved; - App resolve(ref evalStore, ref store); -}; enum class Realise { - /* Build the derivation. Postcondition: the - derivation outputs exist. */ + /** + * Build the derivation. + * + * Postcondition: the derivation outputs exist. + */ Outputs, - /* Don't build the derivation. Postcondition: the store derivation - exists. */ + /** + * Don't build the derivation. + * + * Postcondition: the store derivation exists. + */ Derivation, - /* Evaluate in dry-run mode. Postcondition: nothing. */ - // FIXME: currently unused, but could be revived if we can - // evaluate derivations in-memory. + /** + * Evaluate in dry-run mode. + * + * Postcondition: nothing. + * + * \todo currently unused, but could be revived if we can evaluate + * derivations in-memory. + */ Nothing }; -/* How to handle derivations in commands that operate on store paths. */ +/** + * How to handle derivations in commands that operate on store paths. + */ enum class OperateOn { - /* Operate on the output path. */ + /** + * Operate on the output path. + */ Output, - /* Operate on the .drv path. */ + /** + * Operate on the .drv path. + */ Derivation }; +/** + * Extra info about a DerivedPath + * + * Yes, this is empty, but that is intended. It will be sub-classed by + * the subclasses of Installable to allow those to provide more info. + * Certain commands will make use of this info. + */ struct ExtraPathInfo { - std::optional priority; - std::optional originalRef; - std::optional resolvedRef; - std::optional attrPath; - // FIXME: merge with DerivedPath's 'outputs' field? - std::optional extendedOutputsSpec; + virtual ~ExtraPathInfo() = default; }; -/* A derived path with any additional info that commands might - need from the derivation. */ +/** + * A DerivedPath with \ref ExtraPathInfo "any additional info" that + * commands might need from the derivation. + */ struct DerivedPathWithInfo { DerivedPath path; - ExtraPathInfo info; + ref info; }; +/** + * Like DerivedPathWithInfo but extending BuiltPath with \ref + * ExtraPathInfo "extra info" and also possibly the \ref BuildResult + * "result of building". + */ struct BuiltPathWithResult { BuiltPath path; - ExtraPathInfo info; + ref info; std::optional result; }; +/** + * Shorthand, for less typing and helping us keep the choice of + * collection in sync. + */ typedef std::vector DerivedPathsWithInfo; +struct Installable; + +/** + * Shorthand, for less typing and helping us keep the choice of + * collection in sync. + */ +typedef std::vector> Installables; + +/** + * Installables are the main positional arguments for the Nix + * Command-line. + * + * This base class is very flexible, and just assumes and the + * Installable refers to a collection of \ref DerivedPath "derived paths" with + * \ref ExtraPathInfo "extra info". + */ struct Installable { virtual ~Installable() { } + /** + * What Installable is this? + * + * Prints back valid CLI syntax that would result in this same + * installable. It doesn't need to be exactly what the user wrote, + * just something that means the same thing. + */ virtual std::string what() const = 0; + /** + * Get the collection of \ref DerivedPathWithInfo "derived paths + * with info" that this \ref Installable instalallable denotes. + * + * This is the main method of this class + */ virtual DerivedPathsWithInfo toDerivedPaths() = 0; + /** + * A convenience wrapper of the above for when we expect an + * installable to produce a single \ref DerivedPath "derived path" + * only. + * + * If no or multiple \ref DerivedPath "derived paths" are produced, + * and error is raised. + */ DerivedPathWithInfo toDerivedPath(); - UnresolvedApp toApp(EvalState & state); - - virtual std::pair toValue(EvalState & state) - { - throw Error("argument '%s' cannot be evaluated", what()); - } - - /* Return a value only if this installable is a store path or a - symlink to it. */ + /** + * Return a value only if this installable is a store path or a + * symlink to it. + * + * \todo should we move this to InstallableDerivedPath? It is only + * supposed to work there anyways. Can always downcast. + */ virtual std::optional getStorePath() { return {}; } - /* Get a cursor to each value this Installable could refer to. However - if none exists, throw exception instead of returning empty vector. */ - virtual std::vector> - getCursors(EvalState & state); - - /* Get the first and most preferred cursor this Installable could refer - to, or throw an exception if none exists. */ - virtual ref - getCursor(EvalState & state); - - virtual FlakeRef nixpkgsFlakeRef() const - { - return FlakeRef::fromAttrs({{"type","indirect"}, {"id", "nixpkgs"}}); - } - static std::vector build( ref evalStore, ref store, Realise mode, - const std::vector> & installables, + const Installables & installables, BuildMode bMode = bmNormal); - static std::vector, BuiltPathWithResult>> build2( + static std::vector, BuiltPathWithResult>> build2( ref evalStore, ref store, Realise mode, - const std::vector> & installables, + const Installables & installables, BuildMode bMode = bmNormal); static std::set toStorePaths( @@ -137,18 +170,18 @@ struct Installable ref store, Realise mode, OperateOn operateOn, - const std::vector> & installables); + const Installables & installables); static StorePath toStorePath( ref evalStore, ref store, Realise mode, OperateOn operateOn, - std::shared_ptr installable); + ref installable); static std::set toDerivations( ref store, - const std::vector> & installables, + const Installables & installables, bool useDeriver = false); static BuiltPaths toBuiltPaths( @@ -156,9 +189,7 @@ struct Installable ref store, Realise mode, OperateOn operateOn, - const std::vector> & installables); + const Installables & installables); }; -typedef std::vector> Installables; - } diff --git a/src/libcmd/legacy.hh b/src/libcmd/legacy.hh index f503b0da3..357500a4d 100644 --- a/src/libcmd/legacy.hh +++ b/src/libcmd/legacy.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include diff --git a/src/libcmd/markdown.hh b/src/libcmd/markdown.hh index 78320fcf5..a04d32a4f 100644 --- a/src/libcmd/markdown.hh +++ b/src/libcmd/markdown.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "types.hh" namespace nix { diff --git a/src/libcmd/modify-installables.cc b/src/libcmd/modify-installables.cc index 187460ee8..a2e6a2537 100644 --- a/src/libcmd/modify-installables.cc +++ b/src/libcmd/modify-installables.cc @@ -28,9 +28,9 @@ namespace nix { -std::shared_ptr SourceExprCommand::modifyInstallable ( +ref SourceExprCommand::modifyInstallable ( ref store, ref state, - std::shared_ptr installable, + ref installable, std::string_view installableName, std::string_view prefix, ExtendedOutputsSpec extendedOutputsSpec ) @@ -41,7 +41,7 @@ std::shared_ptr SourceExprCommand::modifyInstallable ( auto overrideSet = getOverrideArgs(*state, store); if (applyToInstallable) { - state->eval(state->parseExprFromString(*applyToInstallable, absPath(".")), *vApply); + state->eval(state->parseExprFromString(*applyToInstallable, state->rootPath(CanonPath::fromCwd())), *vApply); state->callFunction(*vApply, *v, *vRes, noPos); } else if (overrideSet->size() > 0) { Value * overrideValues = state->allocValue(); @@ -56,7 +56,7 @@ std::shared_ptr SourceExprCommand::modifyInstallable ( auto vOverrideFunctor = vOverrideFunctorAttr->value; state->callFunction(*vOverrideFunctor, *overrideValues, *vRes, noPos); } else if (installableOverrideAttrs) { - state->eval(state->parseExprFromString(fmt("old: with old; %s",*installableOverrideAttrs), absPath(".")), *vApply); + state->eval(state->parseExprFromString(fmt("old: with old; %s",*installableOverrideAttrs), state->rootPath(CanonPath::fromCwd())), *vApply); auto vOverrideFunctorAttr = v->attrs->get(state->symbols.create("overrideAttrs")); if (!vOverrideFunctorAttr) { throw Error("%s is not overrideAttrs-capable", installableName); @@ -64,7 +64,7 @@ std::shared_ptr SourceExprCommand::modifyInstallable ( auto vOverrideFunctor = vOverrideFunctorAttr->value; state->callFunction(*vOverrideFunctor, *vApply, *vRes, noPos); } else if (installableWithPackages) { - state->eval(state->parseExprFromString(fmt("ps: with ps; %s",*installableWithPackages), absPath(".")), *vApply); + state->eval(state->parseExprFromString(fmt("ps: with ps; %s",*installableWithPackages), state->rootPath(CanonPath::fromCwd())), *vApply); auto vOverrideFunctorAttr = v->attrs->get(state->symbols.create("withPackages")); if (!vOverrideFunctorAttr) { throw Error("%s cannot be extended with additional packages", installableName); @@ -73,7 +73,7 @@ std::shared_ptr SourceExprCommand::modifyInstallable ( state->callFunction(*vOverrideFunctor, *vApply, *vRes, noPos); } return - std::make_shared(InstallableAttrPath::parse( + make_ref(InstallableAttrPath::parse( state, *this, vRes, prefix, extendedOutputsSpec )); } diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index c674d2308..d9c00ff91 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -40,6 +40,7 @@ extern "C" { #include "markdown.hh" #include "local-fs-store.hh" #include "progress-bar.hh" +#include "print.hh" #if HAVE_BOEHMGC #define GC_INCLUDE_NEW @@ -54,8 +55,6 @@ struct NixRepl , gc #endif { - std::string curDir; - size_t debugTraceIndex; Strings loadedFiles; @@ -113,7 +112,6 @@ NixRepl::NixRepl(const Strings & searchPath, nix::ref store, refstaticBaseEnv.get())) , historyFile(getDataDir() + "/nix/repl-history") { - curDir = absPath("."); } @@ -252,7 +250,9 @@ void NixRepl::mainLoop() el_hist_size = 1000; #endif read_history(historyFile.c_str()); + auto oldRepl = curRepl; curRepl = this; + Finally restoreRepl([&] { curRepl = oldRepl; }); #ifndef READLINE rl_set_complete_func(completionCallback); rl_set_list_possib_func(listPossibleCallback); @@ -423,6 +423,7 @@ StringSet NixRepl::completePrefix(const std::string & prefix) } +// FIXME: DRY and match or use the parser static bool isVarName(std::string_view s) { if (s.size() == 0) return false; @@ -590,14 +591,14 @@ bool NixRepl::processLine(std::string line) Value v; evalString(arg, v); - const auto [path, line] = [&] () -> std::pair { + const auto [path, line] = [&] () -> std::pair { if (v.type() == nPath || v.type() == nString) { - PathSet context; + NixStringContext context; auto path = state->coerceToPath(noPos, v, context, "while evaluating the filename to edit"); return {path, 0}; } else if (v.isLambda()) { auto pos = state->positions[v.lambda.fun->pos]; - if (auto path = std::get_if(&pos.origin)) + if (auto path = std::get_if(&pos.origin)) return {*path, pos.line}; else throw Error("'%s' cannot be shown in an editor", pos); @@ -872,8 +873,7 @@ void NixRepl::addVarToScope(const Symbol name, Value & v) Expr * NixRepl::parseString(std::string s) { - Expr * e = state->parseExprFromString(std::move(s), curDir, staticEnv); - return e; + return state->parseExprFromString(std::move(s), state->rootPath(CanonPath::fromCwd()), staticEnv); } @@ -892,17 +892,6 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m } -std::ostream & printStringValue(std::ostream & str, const char * string) { - str << "\""; - for (const char * i = string; *i; i++) - if (*i == '\"' || *i == '\\') str << "\\" << *i; - else if (*i == '\n') str << "\\n"; - else if (*i == '\r') str << "\\r"; - else if (*i == '\t') str << "\\t"; - else str << *i; - str << "\""; - return str; -} // FIXME: lot of cut&paste from Nix's eval.cc. @@ -920,17 +909,19 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m break; case nBool: - str << ANSI_CYAN << (v.boolean ? "true" : "false") << ANSI_NORMAL; + str << ANSI_CYAN; + printLiteralBool(str, v.boolean); + str << ANSI_NORMAL; break; case nString: str << ANSI_WARNING; - printStringValue(str, v.string.s); + printLiteralString(str, v.string.s); str << ANSI_NORMAL; break; case nPath: - str << ANSI_GREEN << v.path << ANSI_NORMAL; // !!! escaping? + str << ANSI_GREEN << v.path().to_string() << ANSI_NORMAL; // !!! escaping? break; case nNull: @@ -945,7 +936,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m if (isDrv) { str << "«derivation "; Bindings::iterator i = v.attrs->find(state->sDrvPath); - PathSet context; + NixStringContext context; if (i != v.attrs->end()) str << state->store->printStorePath(state->coerceToStorePath(i->pos, *i->value, context, "while evaluating the drvPath of a derivation")); else @@ -962,10 +953,7 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m sorted.emplace(state->symbols[i.name], i.value); for (auto & i : sorted) { - if (isVarName(i.first)) - str << i.first; - else - printStringValue(str, i.first.c_str()); + printAttributeName(str, i.first); str << " = "; if (seen.count(i.second)) str << "«repeated»"; @@ -1024,6 +1012,8 @@ std::ostream & NixRepl::printValue(std::ostream & str, Value & v, unsigned int m str << v.fpoint; break; + case nThunk: + case nExternal: default: str << ANSI_RED "«unknown»" ANSI_NORMAL; break; diff --git a/src/libcmd/repl.hh b/src/libcmd/repl.hh index dfccc93e7..731c8e6db 100644 --- a/src/libcmd/repl.hh +++ b/src/libcmd/repl.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "eval.hh" diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index 7c0705091..ab654c1b0 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -106,7 +106,7 @@ std::pair findAlongAttrPath(EvalState & state, const std::strin } -std::pair findPackageFilename(EvalState & state, Value & v, std::string what) +std::pair findPackageFilename(EvalState & state, Value & v, std::string what) { Value * v2; try { @@ -118,21 +118,25 @@ std::pair findPackageFilename(EvalState & state, Value & // FIXME: is it possible to extract the Pos object instead of doing this // toString + parsing? - auto pos = state.forceString(*v2, noPos, "while evaluating the 'meta.position' attribute of a derivation"); + NixStringContext context; + auto path = state.coerceToPath(noPos, *v2, context, "while evaluating the 'meta.position' attribute of a derivation"); - auto colon = pos.rfind(':'); - if (colon == std::string::npos) - throw ParseError("cannot parse meta.position attribute '%s'", pos); + auto fn = path.path.abs(); + + auto fail = [fn]() { + throw ParseError("cannot parse 'meta.position' attribute '%s'", fn); + }; - std::string filename(pos, 0, colon); - unsigned int lineno; try { - lineno = std::stoi(std::string(pos, colon + 1, std::string::npos)); + auto colon = fn.rfind(':'); + if (colon == std::string::npos) fail(); + std::string filename(fn, 0, colon); + auto lineno = std::stoi(std::string(fn, colon + 1, std::string::npos)); + return {CanonPath(fn.substr(0, colon)), lineno}; } catch (std::invalid_argument & e) { - throw ParseError("cannot parse line number '%s'", pos); + fail(); + abort(); } - - return { std::move(filename), lineno }; } diff --git a/src/libexpr/attr-path.hh b/src/libexpr/attr-path.hh index 117e0051b..eb00ffb93 100644 --- a/src/libexpr/attr-path.hh +++ b/src/libexpr/attr-path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "eval.hh" @@ -16,8 +17,10 @@ std::pair findAlongAttrPath( Bindings & autoArgs, Value & vIn); -/* Heuristic to find the filename and lineno or a nix value. */ -std::pair findPackageFilename(EvalState & state, Value & v, std::string what); +/** + * Heuristic to find the filename and lineno or a nix value. + */ +std::pair findPackageFilename(EvalState & state, Value & v, std::string what); std::vector parseAttrPath(EvalState & state, std::string_view s); diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh index dcc73b506..31215f880 100644 --- a/src/libexpr/attr-set.hh +++ b/src/libexpr/attr-set.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "nixexpr.hh" #include "symbol-table.hh" @@ -12,7 +13,9 @@ namespace nix { class EvalState; struct Value; -/* Map one attribute name to its value. */ +/** + * Map one attribute name to its value. + */ struct Attr { /* the placement of `name` and `pos` in this struct is important. @@ -36,10 +39,12 @@ static_assert(sizeof(Attr) == 2 * sizeof(uint32_t) + sizeof(Value *), "avoid introducing any padding into Attr if at all possible, and do not " "introduce new fields that need not be present for almost every instance."); -/* Bindings contains all the attributes of an attribute set. It is defined - by its size and its capacity, the capacity being the number of Attr - elements allocated after this structure, while the size corresponds to - the number of elements already inserted in this structure. */ +/** + * Bindings contains all the attributes of an attribute set. It is defined + * by its size and its capacity, the capacity being the number of Attr + * elements allocated after this structure, while the size corresponds to + * the number of elements already inserted in this structure. + */ class Bindings { public: @@ -94,7 +99,9 @@ public: size_t capacity() { return capacity_; } - /* Returns the attributes in lexicographically sorted order. */ + /** + * Returns the attributes in lexicographically sorted order. + */ std::vector lexicographicOrder(const SymbolTable & symbols) const { std::vector res; @@ -111,9 +118,11 @@ public: friend class EvalState; }; -/* A wrapper around Bindings that ensures that its always in sorted - order at the end. The only way to consume a BindingsBuilder is to - call finish(), which sorts the bindings. */ +/** + * A wrapper around Bindings that ensures that its always in sorted + * order at the end. The only way to consume a BindingsBuilder is to + * call finish(), which sorts the bindings. + */ class BindingsBuilder { Bindings * bindings; diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 1219b2471..9e734e654 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -47,7 +47,7 @@ struct AttrDb { auto state(_state->lock()); - Path cacheDir = getCacheDir() + "/nix/eval-cache-v4"; + Path cacheDir = getCacheDir() + "/nix/eval-cache-v5"; createDirs(cacheDir); Path dbPath = cacheDir + "/" + fingerprint.to_string(Base16, false) + ".sqlite"; @@ -300,7 +300,7 @@ struct AttrDb NixStringContext context; if (!queryAttribute.isNull(3)) for (auto & s : tokenizeString>(queryAttribute.getStr(3), ";")) - context.push_back(NixStringContextElem::parse(cfg, s)); + context.insert(NixStringContextElem::parse(s)); return {{rowId, string_t{queryAttribute.getStr(2), context}}}; } case AttrType::Bool: @@ -442,8 +442,10 @@ Value & AttrCursor::forceValue() if (v.type() == nString) cachedValue = {root->db->setString(getKey(), v.string.s, v.string.context), string_t{v.string.s, {}}}; - else if (v.type() == nPath) - cachedValue = {root->db->setString(getKey(), v.path), string_t{v.path, {}}}; + else if (v.type() == nPath) { + auto path = v.path().path; + cachedValue = {root->db->setString(getKey(), path.abs()), string_t{path.abs(), {}}}; + } else if (v.type() == nBool) cachedValue = {root->db->setBool(getKey(), v.boolean), v.boolean}; else if (v.type() == nInt) @@ -580,7 +582,7 @@ std::string AttrCursor::getString() if (v.type() != nString && v.type() != nPath) root->state.error("'%s' is not a string but %s", getAttrPathStr()).debugThrow(); - return v.type() == nString ? v.string.s : v.path; + return v.type() == nString ? v.string.s : v.path().to_string(); } string_t AttrCursor::getStringWithContext() @@ -619,10 +621,13 @@ string_t AttrCursor::getStringWithContext() auto & v = forceValue(); - if (v.type() == nString) - return {v.string.s, v.getContext(*root->state.store)}; + if (v.type() == nString) { + NixStringContext context; + copyContext(v, context); + return {v.string.s, std::move(context)}; + } else if (v.type() == nPath) - return {v.path, {}}; + return {v.path().to_string(), {}}; else root->state.error("'%s' is not a string but %s", getAttrPathStr()).debugThrow(); } diff --git a/src/libexpr/eval-cache.hh b/src/libexpr/eval-cache.hh index c93e55b93..46c4999c8 100644 --- a/src/libexpr/eval-cache.hh +++ b/src/libexpr/eval-cache.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "sync.hh" #include "hash.hh" @@ -109,8 +110,10 @@ public: ref getAttr(std::string_view name); - /* Get an attribute along a chain of attrsets. Note that this does - not auto-call functors or functions. */ + /** + * Get an attribute along a chain of attrsets. Note that this does + * not auto-call functors or functions. + */ OrSuggestions> findAlongAttrPath(const std::vector & attrPath, bool force = false); std::string getString(); @@ -129,7 +132,9 @@ public: Value & forceValue(); - /* Force creation of the .drv file in the Nix store. */ + /** + * Force creation of the .drv file in the Nix store. + */ StorePath forceDerivation(); }; diff --git a/src/libexpr/eval-inline.hh b/src/libexpr/eval-inline.hh index f0da688db..a988fa40c 100644 --- a/src/libexpr/eval-inline.hh +++ b/src/libexpr/eval-inline.hh @@ -1,10 +1,13 @@ #pragma once +///@file #include "eval.hh" namespace nix { -/* Note: Various places expect the allocated memory to be zeroed. */ +/** + * Note: Various places expect the allocated memory to be zeroed. + */ [[gnu::always_inline]] inline void * allocBytes(size_t n) { diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index 3e37c7f60..e2b455b91 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -8,6 +8,8 @@ #include "eval-inline.hh" #include "filetransfer.hh" #include "function-trace.hh" +#include "profiles.hh" +#include "print.hh" #include #include @@ -103,21 +105,13 @@ void Value::print(const SymbolTable & symbols, std::ostream & str, str << integer; break; case tBool: - str << (boolean ? "true" : "false"); + printLiteralBool(str, boolean); break; case tString: - str << "\""; - for (const char * i = string.s; *i; i++) - if (*i == '\"' || *i == '\\') str << "\\" << *i; - else if (*i == '\n') str << "\\n"; - else if (*i == '\r') str << "\\r"; - else if (*i == '\t') str << "\\t"; - else if (*i == '$' && *(i+1) == '{') str << "\\" << *i; - else str << *i; - str << "\""; + printLiteralString(str, string.s); break; case tPath: - str << path; // !!! escaping? + str << path().to_string(); // !!! escaping? break; case tNull: str << "null"; @@ -172,7 +166,17 @@ void Value::print(const SymbolTable & symbols, std::ostream & str, case tFloat: str << fpoint; break; + case tBlackhole: + // Although we know for sure that it's going to be an infinite recursion + // when this value is accessed _in the current context_, it's likely + // that the user will misinterpret a simpler «infinite recursion» output + // as a definitive statement about the value, while in fact it may be + // a valid value after `builtins.trace` and perhaps some other steps + // have completed. + str << "«potential infinite recursion»"; + break; default: + printError("Nix evaluator internal error: Value::print(): invalid value type %1%", internalType); abort(); } } @@ -228,6 +232,9 @@ std::string_view showType(ValueType type) std::string showType(const Value & v) { + // Allow selecting a subset of enum values + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" switch (v.internalType) { case tString: return v.string.context ? "a string with context" : "a string"; case tPrimOp: @@ -241,16 +248,21 @@ std::string showType(const Value & v) default: return std::string(showType(v.type())); } + #pragma GCC diagnostic pop } PosIdx Value::determinePos(const PosIdx pos) const { + // Allow selecting a subset of enum values + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" switch (internalType) { case tAttrs: return attrs->pos; case tLambda: return lambda.fun->pos; case tApp: return app.left->determinePos(pos); default: return pos; } + #pragma GCC diagnostic pop } bool Value::isTrivial() const @@ -325,6 +337,22 @@ static Symbol getName(const AttrName & name, EvalState & state, Env & env) } } +#if HAVE_BOEHMGC +/* Disable GC while this object lives. Used by CoroutineContext. + * + * Boehm keeps a count of GC_disable() and GC_enable() calls, + * and only enables GC when the count matches. + */ +class BoehmDisableGC { +public: + BoehmDisableGC() { + GC_disable(); + }; + ~BoehmDisableGC() { + GC_enable(); + }; +}; +#endif static bool gcInitialised = false; @@ -349,6 +377,15 @@ void initGC() StackAllocator::defaultAllocator = &boehmGCStackAllocator; + +#if NIX_BOEHM_PATCH_VERSION != 1 + printTalkative("Unpatched BoehmGC, disabling GC inside coroutines"); + /* Used to disable GC when entering coroutines on macOS */ + create_coro_gc_hook = []() -> std::shared_ptr { + return std::make_shared(); + }; +#endif + /* Set the initial heap size to something fairly big (25% of physical RAM, up to a maximum of 384 MiB) so that in most cases we don't need to garbage collect at all. (Collection has a @@ -368,7 +405,7 @@ void initGC() size = (pageSize * pages) / 4; // 25% of RAM if (size > maxSize) size = maxSize; #endif - debug(format("setting initial heap size to %1% bytes") % size); + debug("setting initial heap size to %1% bytes", size); GC_expand_hp(size); } @@ -498,6 +535,7 @@ EvalState::EvalState( , sOutputSpecified(symbols.create("outputSpecified")) , repair(NoRepair) , emptyBindings(0) + , derivationInternal(rootPath(CanonPath("/builtin/derivation.nix"))) , store(store) , buildStore(buildStore ? buildStore : store) , debugRepl(nullptr) @@ -519,7 +557,6 @@ EvalState::EvalState( static_assert(sizeof(Env) <= 16, "environment must be <= 16 bytes"); /* Initialise the Nix expression search path. */ - evalSettings.nixPath.setDefault(evalSettings.getDefaultNixPath()); if (!evalSettings.pureEval) { for (auto & i : _searchPath) addToSearchPath(i); for (auto & i : evalSettings.nixPath.get()) addToSearchPath(i); @@ -573,15 +610,14 @@ void EvalState::allowAndSetStorePathString(const StorePath & storePath, Value & { allowPath(storePath); - auto path = store->printStorePath(storePath); - v.mkString(path, PathSet({path})); + mkStorePathString(storePath, v); } -Path EvalState::checkSourcePath(const Path & path_) +SourcePath EvalState::checkSourcePath(const SourcePath & path_) { if (!allowedPaths) return path_; - auto i = resolvedPaths.find(path_); + auto i = resolvedPaths.find(path_.path.abs()); if (i != resolvedPaths.end()) return i->second; @@ -591,9 +627,9 @@ Path EvalState::checkSourcePath(const Path & path_) * attacker can't append ../../... to a path that would be in allowedPaths * and thus leak symlink targets. */ - Path abspath = canonPath(path_); + Path abspath = canonPath(path_.path.abs()); - if (hasPrefix(abspath, corepkgsPrefix)) return abspath; + if (hasPrefix(abspath, corepkgsPrefix)) return CanonPath(abspath); for (auto & i : *allowedPaths) { if (isDirOrInDir(abspath, i)) { @@ -610,12 +646,12 @@ Path EvalState::checkSourcePath(const Path & path_) } /* Resolve symlinks. */ - debug(format("checking access to '%s'") % abspath); - Path path = canonPath(abspath, true); + debug("checking access to '%s'", abspath); + SourcePath path = CanonPath(canonPath(abspath, true)); for (auto & i : *allowedPaths) { - if (isDirOrInDir(path, i)) { - resolvedPaths[path_] = path; + if (isDirOrInDir(path.path.abs(), i)) { + resolvedPaths.insert_or_assign(path_.path.abs(), path); return path; } } @@ -643,12 +679,12 @@ void EvalState::checkURI(const std::string & uri) /* If the URI is a path, then check it against allowedPaths as well. */ if (hasPrefix(uri, "/")) { - checkSourcePath(uri); + checkSourcePath(CanonPath(uri)); return; } if (hasPrefix(uri, "file://")) { - checkSourcePath(std::string(uri, 7)); + checkSourcePath(CanonPath(std::string(uri, 7))); return; } @@ -656,7 +692,7 @@ void EvalState::checkURI(const std::string & uri) } -Path EvalState::toRealPath(const Path & path, const PathSet & context) +Path EvalState::toRealPath(const Path & path, const NixStringContext & context) { // FIXME: check whether 'path' is in 'context'. return @@ -908,34 +944,34 @@ void Value::mkString(std::string_view s) } -static void copyContextToValue(Value & v, const PathSet & context) +static void copyContextToValue(Value & v, const NixStringContext & context) { if (!context.empty()) { size_t n = 0; v.string.context = (const char * *) allocBytes((context.size() + 1) * sizeof(char *)); for (auto & i : context) - v.string.context[n++] = dupString(i.c_str()); + v.string.context[n++] = dupString(i.to_string().c_str()); v.string.context[n] = 0; } } -void Value::mkString(std::string_view s, const PathSet & context) +void Value::mkString(std::string_view s, const NixStringContext & context) { mkString(s); copyContextToValue(*this, context); } -void Value::mkStringMove(const char * s, const PathSet & context) +void Value::mkStringMove(const char * s, const NixStringContext & context) { mkString(s); copyContextToValue(*this, context); } -void Value::mkPath(std::string_view s) +void Value::mkPath(const SourcePath & path) { - mkPath(makeImmutableString(s)); + mkPath(makeImmutableString(path.path.abs())); } @@ -991,9 +1027,9 @@ void EvalState::mkThunk_(Value & v, Expr * expr) void EvalState::mkPos(Value & v, PosIdx p) { auto pos = positions[p]; - if (auto path = std::get_if(&pos.origin)) { + if (auto path = std::get_if(&pos.origin)) { auto attrs = buildBindings(3); - attrs.alloc(sFile).mkString(*path); + attrs.alloc(sFile).mkString(path->path.abs()); attrs.alloc(sLine).mkInt(pos.line); attrs.alloc(sColumn).mkInt(pos.column); v.mkAttrs(attrs); @@ -1002,6 +1038,16 @@ void EvalState::mkPos(Value & v, PosIdx p) } +void EvalState::mkStorePathString(const StorePath & p, Value & v) +{ + v.mkString( + store->printStorePath(p), + NixStringContext { + NixStringContextElem::Opaque { .path = p }, + }); +} + + /* Create a thunk for the delayed computation of the given expression in the given environment. But if the expression is a variable, then look it up right away. This significantly reduces the number @@ -1049,7 +1095,7 @@ Value * ExprPath::maybeThunk(EvalState & state, Env & env) } -void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial) +void EvalState::evalFile(const SourcePath & path_, Value & v, bool mustBeTrivial) { auto path = checkSourcePath(path_); @@ -1059,7 +1105,7 @@ void EvalState::evalFile(const Path & path_, Value & v, bool mustBeTrivial) return; } - Path resolvedPath = resolveExprPath(path); + auto resolvedPath = resolveExprPath(path); if ((i = fileEvalCache.find(resolvedPath)) != fileEvalCache.end()) { v = i->second; return; @@ -1087,8 +1133,8 @@ void EvalState::resetFileCache() void EvalState::cacheFile( - const Path & path, - const Path & resolvedPath, + const SourcePath & path, + const SourcePath & resolvedPath, Expr * e, Value & v, bool mustBeTrivial) @@ -1102,7 +1148,7 @@ void EvalState::cacheFile( *e, this->baseEnv, e->getPos() ? static_cast>(positions[e->getPos()]) : nullptr, - "while evaluating the file '%1%':", resolvedPath) + "while evaluating the file '%1%':", resolvedPath.to_string()) : nullptr; // Enforce that 'flake.nix' is a direct attrset, not a @@ -1112,7 +1158,7 @@ void EvalState::cacheFile( error("file '%s' must be an attribute set", path).debugThrow(); eval(e, v); } catch (Error & e) { - addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath); + addErrorTrace(e, "while evaluating the file '%1%':", resolvedPath.to_string()); throw; } @@ -1373,8 +1419,8 @@ void ExprSelect::eval(EvalState & state, Env & env, Value & v) } catch (Error & e) { if (pos2) { auto pos2r = state.positions[pos2]; - auto origin = std::get_if(&pos2r.origin); - if (!(origin && *origin == state.derivationNixPath)) + auto origin = std::get_if(&pos2r.origin); + if (!(origin && *origin == state.derivationInternal)) state.addErrorTrace(e, pos2, "while evaluating the attribute '%1%'", showAttrPath(state, env, attrPath)); } @@ -1864,7 +1910,7 @@ void EvalState::concatLists(Value & v, size_t nrLists, Value * * lists, const Po void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) { - PathSet context; + NixStringContext context; std::vector s; size_t sSize = 0; NixInt n = 0; @@ -1947,7 +1993,7 @@ void ExprConcatStrings::eval(EvalState & state, Env & env, Value & v) else if (firstType == nPath) { if (!context.empty()) state.error("a string that refers to a store path cannot be appended to a path").atPos(pos).withFrame(env, *this).debugThrow(); - v.mkPath(canonPath(str())); + v.mkPath(CanonPath(canonPath(str()))); } else v.mkStringMove(c_str(), context); } @@ -2073,26 +2119,15 @@ std::string_view EvalState::forceString(Value & v, const PosIdx pos, std::string } -void copyContext(const Value & v, PathSet & context) +void copyContext(const Value & v, NixStringContext & context) { if (v.string.context) for (const char * * p = v.string.context; *p; ++p) - context.insert(*p); + context.insert(NixStringContextElem::parse(*p)); } -NixStringContext Value::getContext(const Store & store) -{ - NixStringContext res; - assert(internalType == tString); - if (string.context) - for (const char * * p = string.context; *p; ++p) - res.push_back(NixStringContextElem::parse(store, *p)); - return res; -} - - -std::string_view EvalState::forceString(Value & v, PathSet & context, const PosIdx pos, std::string_view errorCtx) +std::string_view EvalState::forceString(Value & v, NixStringContext & context, const PosIdx pos, std::string_view errorCtx) { auto s = forceString(v, pos, errorCtx); copyContext(v, context); @@ -2122,7 +2157,7 @@ bool EvalState::isDerivation(Value & v) std::optional EvalState::tryAttrsToString(const PosIdx pos, Value & v, - PathSet & context, bool coerceMore, bool copyToStore) + NixStringContext & context, bool coerceMore, bool copyToStore) { auto i = v.attrs->find(sToString); if (i != v.attrs->end()) { @@ -2136,8 +2171,14 @@ std::optional EvalState::tryAttrsToString(const PosIdx pos, Value & return {}; } -BackedStringView EvalState::coerceToString(const PosIdx pos, Value &v, PathSet &context, - std::string_view errorCtx, bool coerceMore, bool copyToStore, bool canonicalizePath) +BackedStringView EvalState::coerceToString( + const PosIdx pos, + Value & v, + NixStringContext & context, + std::string_view errorCtx, + bool coerceMore, + bool copyToStore, + bool canonicalizePath) { forceValue(v, pos); @@ -2147,12 +2188,14 @@ BackedStringView EvalState::coerceToString(const PosIdx pos, Value &v, PathSet & } if (v.type() == nPath) { - BackedStringView path(PathView(v.path)); - if (canonicalizePath) - path = canonPath(*path); - if (copyToStore) - path = store->printStorePath(copyPathToStore(context, std::move(path).toOwned())); - return path; + return + !canonicalizePath && !copyToStore + ? // FIXME: hack to preserve path literals that end in a + // slash, as in /foo/${x}. + v._path + : copyToStore + ? store->printStorePath(copyPathToStore(context, v.path())) + : std::string(v.path().path.abs()); } if (v.type() == nAttrs) { @@ -2213,40 +2256,40 @@ BackedStringView EvalState::coerceToString(const PosIdx pos, Value &v, PathSet & } -StorePath EvalState::copyPathToStore(PathSet & context, const Path & path) +StorePath EvalState::copyPathToStore(NixStringContext & context, const SourcePath & path) { - if (nix::isDerivation(path)) + if (nix::isDerivation(path.path.abs())) error("file names are not allowed to end in '%1%'", drvExtension).debugThrow(); - auto dstPath = [&]() -> StorePath - { - auto i = srcToStore.find(path); - if (i != srcToStore.end()) return i->second; + auto i = srcToStore.find(path); - auto dstPath = settings.readOnlyMode - ? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first - : store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair); - allowPath(dstPath); - srcToStore.insert_or_assign(path, dstPath); - printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath)); - return dstPath; - }(); + auto dstPath = i != srcToStore.end() + ? i->second + : [&]() { + auto dstPath = path.fetchToStore(store, path.baseName(), nullptr, repair); + allowPath(dstPath); + srcToStore.insert_or_assign(path, dstPath); + printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, store->printStorePath(dstPath)); + return dstPath; + }(); - context.insert(store->printStorePath(dstPath)); + context.insert(NixStringContextElem::Opaque { + .path = dstPath + }); return dstPath; } -Path EvalState::coerceToPath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx) +SourcePath EvalState::coerceToPath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx) { auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (path == "" || path[0] != '/') error("string '%1%' doesn't represent an absolute path", path).withTrace(pos, errorCtx).debugThrow(); - return path; + return CanonPath(path); } -StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx) +StorePath EvalState::coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx) { auto path = coerceToString(pos, v, context, errorCtx, false, false, true).toOwned(); if (auto storePath = store->maybeParseStorePath(path)) @@ -2285,7 +2328,7 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v return strcmp(v1.string.s, v2.string.s) == 0; case nPath: - return strcmp(v1.path, v2.path) == 0; + return strcmp(v1._path, v2._path) == 0; case nNull: return true; @@ -2327,6 +2370,7 @@ bool EvalState::eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_v case nFloat: return v1.fpoint == v2.fpoint; + case nThunk: // Must not be left by forceValue default: error("cannot compare %1% with %2%", showType(v1), showType(v2)).withTrace(pos, errorCtx).debugThrow(); } @@ -2411,8 +2455,8 @@ void EvalState::printStats() else obj["name"] = nullptr; if (auto pos = positions[fun->pos]) { - if (auto path = std::get_if(&pos.origin)) - obj["file"] = *path; + if (auto path = std::get_if(&pos.origin)) + obj["file"] = path->to_string(); obj["line"] = pos.line; obj["column"] = pos.column; } @@ -2426,8 +2470,8 @@ void EvalState::printStats() for (auto & i : attrSelects) { json obj = json::object(); if (auto pos = positions[i.first]) { - if (auto path = std::get_if(&pos.origin)) - obj["file"] = *path; + if (auto path = std::get_if(&pos.origin)) + obj["file"] = path->to_string(); obj["line"] = pos.line; obj["column"] = pos.column; } @@ -2452,7 +2496,7 @@ void EvalState::printStats() } -std::string ExternalValueBase::coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const +std::string ExternalValueBase::coerceToString(const Pos & pos, NixStringContext & context, bool copyMore, bool copyToStore) const { throw TypeError({ .msg = hintfmt("cannot coerce %1% to a string", showType()) @@ -2473,35 +2517,30 @@ std::ostream & operator << (std::ostream & str, const ExternalValueBase & v) { EvalSettings::EvalSettings() { + auto var = getEnv("NIX_PATH"); + if (var) nixPath = parseNixPath(*var); } -/* impure => NIX_PATH or a default path - * restrict-eval => NIX_PATH - * pure-eval => empty - */ Strings EvalSettings::getDefaultNixPath() { - if (pureEval) - return {}; - - auto var = getEnv("NIX_PATH"); - if (var) { - return parseNixPath(*var); - } else if (restrictEval) { - return {}; - } else { - Strings res; - auto add = [&](const Path & p, const std::optional & s = std::nullopt) { - if (pathExists(p)) - res.push_back(s ? *s + "=" + p : p); - }; + Strings res; + auto add = [&](const Path & p, const std::string & s = std::string()) { + if (pathExists(p)) { + if (s.empty()) { + res.push_back(p); + } else { + res.push_back(s + "=" + p); + } + } + }; + if (!evalSettings.restrictEval && !evalSettings.pureEval) { add(settings.useXDGBaseDirectories ? getStateDir() + "/nix/defexpr/channels" : getHome() + "/.nix-defexpr/channels"); - add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs"); - add(settings.nixStateDir + "/profiles/per-user/root/channels"); - - return res; + add(rootChannelsDir() + "/nixpkgs", "nixpkgs"); + add(rootChannelsDir()); } + + return res; } bool EvalSettings::isPseudoUrl(std::string_view s) diff --git a/src/libexpr/eval.hh b/src/libexpr/eval.hh index 2340ef67b..bb3ac2b22 100644 --- a/src/libexpr/eval.hh +++ b/src/libexpr/eval.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "attr-set.hh" #include "types.hh" @@ -7,6 +8,7 @@ #include "symbol-table.hh" #include "config.hh" #include "experimental-features.hh" +#include "input-accessor.hh" #include #include @@ -42,7 +44,10 @@ struct PrimOp struct Env { Env * up; - unsigned short prevWith:14; // nr of levels up to next `with' environment + /** + * Number of of levels up to next `with` environment + */ + unsigned short prevWith:14; enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2; Value * values[0]; }; @@ -52,23 +57,21 @@ void printEnvBindings(const SymbolTable & st, const StaticEnv & se, const Env & std::unique_ptr mapStaticEnvBindings(const SymbolTable & st, const StaticEnv & se, const Env & env); -void copyContext(const Value & v, PathSet & context); - - -/* Cache for calls to addToStore(); maps source paths to the store - paths. */ -typedef std::map SrcToStore; +void copyContext(const Value & v, NixStringContext & context); std::string printValue(const EvalState & state, const Value & v); std::ostream & operator << (std::ostream & os, const ValueType t); +// FIXME: maybe change this to an std::variant. typedef std::pair SearchPathElem; typedef std::list SearchPath; -/* Initialise the Boehm GC, if applicable. */ +/** + * Initialise the Boehm GC, if applicable. + */ void initGC(); @@ -129,8 +132,6 @@ public: SymbolTable symbols; PosTable positions; - static inline std::string derivationNixPath = "//builtin/derivation.nix"; - const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue, sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls, sFile, sLine, sColumn, sFunctor, sToString, @@ -141,28 +142,39 @@ public: sDescription, sSelf, sEpsilon, sStartSet, sOperator, sKey, sPath, sPrefix, sOutputSpecified; - Symbol sDerivationNix; - /* If set, force copying files to the Nix store even if they - already exist there. */ + /** + * If set, force copying files to the Nix store even if they + * already exist there. + */ RepairFlag repair; - /* The allowed filesystem paths in restricted or pure evaluation - mode. */ + /** + * The allowed filesystem paths in restricted or pure evaluation + * mode. + */ std::optional allowedPaths; Bindings emptyBindings; - /* Store used to materialise .drv files. */ + const SourcePath derivationInternal; + + /** + * Store used to materialise .drv files. + */ const ref store; - /* Store used to build stuff. */ + /** + * Store used to build stuff. + */ const ref buildStore; RootValue vCallFlake = nullptr; RootValue vImportedDrvToDerivation = nullptr; - /* Debugger */ + /** + * Debugger + */ void (* debugRepl)(ref es, const ValMap & extraEnv); bool debugStop; bool debugQuit; @@ -216,21 +228,28 @@ public: } private: - SrcToStore srcToStore; - /* A cache from path names to parse trees. */ + /* Cache for calls to addToStore(); maps source paths to the store + paths. */ + std::map srcToStore; + + /** + * A cache from path names to parse trees. + */ #if HAVE_BOEHMGC - typedef std::map, traceable_allocator>> FileParseCache; + typedef std::map, traceable_allocator>> FileParseCache; #else - typedef std::map FileParseCache; + typedef std::map FileParseCache; #endif FileParseCache fileParseCache; - /* A cache from path names to values. */ + /** + * A cache from path names to values. + */ #if HAVE_BOEHMGC - typedef std::map, traceable_allocator>> FileEvalCache; + typedef std::map, traceable_allocator>> FileEvalCache; #else - typedef std::map FileEvalCache; + typedef std::map FileEvalCache; #endif FileEvalCache fileEvalCache; @@ -238,17 +257,25 @@ private: std::map> searchPathResolved; - /* Cache used by checkSourcePath(). */ - std::unordered_map resolvedPaths; + /** + * Cache used by checkSourcePath(). + */ + std::unordered_map resolvedPaths; - /* Cache used by prim_match(). */ + /** + * Cache used by prim_match(). + */ std::shared_ptr regexCache; #if HAVE_BOEHMGC - /* Allocation cache for GC'd Value objects. */ + /** + * Allocation cache for GC'd Value objects. + */ std::shared_ptr valueAllocCache; - /* Allocation cache for size-1 Env objects. */ + /** + * Allocation cache for size-1 Env objects. + */ std::shared_ptr env1AllocCache; #endif @@ -264,87 +291,126 @@ public: SearchPath getSearchPath() { return searchPath; } - /* Allow access to a path. */ + /** + * Return a `SourcePath` that refers to `path` in the root + * filesystem. + */ + SourcePath rootPath(CanonPath path); + + /** + * Allow access to a path. + */ void allowPath(const Path & path); - /* Allow access to a store path. Note that this gets remapped to - the real store path if `store` is a chroot store. */ + /** + * Allow access to a store path. Note that this gets remapped to + * the real store path if `store` is a chroot store. + */ void allowPath(const StorePath & storePath); - /* Allow access to a store path and return it as a string. */ + /** + * Allow access to a store path and return it as a string. + */ void allowAndSetStorePathString(const StorePath & storePath, Value & v); - /* Check whether access to a path is allowed and throw an error if - not. Otherwise return the canonicalised path. */ - Path checkSourcePath(const Path & path); + /** + * Check whether access to a path is allowed and throw an error if + * not. Otherwise return the canonicalised path. + */ + SourcePath checkSourcePath(const SourcePath & path); void checkURI(const std::string & uri); - /* When using a diverted store and 'path' is in the Nix store, map - 'path' to the diverted location (e.g. /nix/store/foo is mapped - to /home/alice/my-nix/nix/store/foo). However, this is only - done if the context is not empty, since otherwise we're - probably trying to read from the actual /nix/store. This is - intended to distinguish between import-from-derivation and - sources stored in the actual /nix/store. */ - Path toRealPath(const Path & path, const PathSet & context); + /** + * When using a diverted store and 'path' is in the Nix store, map + * 'path' to the diverted location (e.g. /nix/store/foo is mapped + * to /home/alice/my-nix/nix/store/foo). However, this is only + * done if the context is not empty, since otherwise we're + * probably trying to read from the actual /nix/store. This is + * intended to distinguish between import-from-derivation and + * sources stored in the actual /nix/store. + */ + Path toRealPath(const Path & path, const NixStringContext & context); - /* Parse a Nix expression from the specified file. */ - Expr * parseExprFromFile(const Path & path); - Expr * parseExprFromFile(const Path & path, std::shared_ptr & staticEnv); + /** + * Parse a Nix expression from the specified file. + */ + Expr * parseExprFromFile(const SourcePath & path); + Expr * parseExprFromFile(const SourcePath & path, std::shared_ptr & staticEnv); - /* Parse a Nix expression from the specified string. */ - Expr * parseExprFromString(std::string s, const Path & basePath, std::shared_ptr & staticEnv); - Expr * parseExprFromString(std::string s, const Path & basePath); + /** + * Parse a Nix expression from the specified string. + */ + Expr * parseExprFromString(std::string s, const SourcePath & basePath, std::shared_ptr & staticEnv); + Expr * parseExprFromString(std::string s, const SourcePath & basePath); Expr * parseStdin(); - /* Evaluate an expression read from the given file to normal - form. Optionally enforce that the top-level expression is - trivial (i.e. doesn't require arbitrary computation). */ - void evalFile(const Path & path, Value & v, bool mustBeTrivial = false); + /** + * Evaluate an expression read from the given file to normal + * form. Optionally enforce that the top-level expression is + * trivial (i.e. doesn't require arbitrary computation). + */ + void evalFile(const SourcePath & path, Value & v, bool mustBeTrivial = false); - /* Like `evalFile`, but with an already parsed expression. */ + /** + * Like `evalFile`, but with an already parsed expression. + */ void cacheFile( - const Path & path, - const Path & resolvedPath, + const SourcePath & path, + const SourcePath & resolvedPath, Expr * e, Value & v, bool mustBeTrivial = false); void resetFileCache(); - /* Look up a file in the search path. */ - Path findFile(const std::string_view path); - Path findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos = noPos); + /** + * Look up a file in the search path. + */ + SourcePath findFile(const std::string_view path); + SourcePath findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos = noPos); - /* If the specified search path element is a URI, download it. */ + /** + * If the specified search path element is a URI, download it. + */ std::pair resolveSearchPathElem(const SearchPathElem & elem); - /* Evaluate an expression to normal form, storing the result in - value `v'. */ + /** + * Evaluate an expression to normal form + * + * @param [out] v The resulting is stored here. + */ void eval(Expr * e, Value & v); - /* Evaluation the expression, then verify that it has the expected - type. */ + /** + * Evaluation the expression, then verify that it has the expected + * type. + */ inline bool evalBool(Env & env, Expr * e); inline bool evalBool(Env & env, Expr * e, const PosIdx pos, std::string_view errorCtx); inline void evalAttrs(Env & env, Expr * e, Value & v, const PosIdx pos, std::string_view errorCtx); - /* If `v' is a thunk, enter it and overwrite `v' with the result - of the evaluation of the thunk. If `v' is a delayed function - application, call the function and overwrite `v' with the - result. Otherwise, this is a no-op. */ + /** + * If `v` is a thunk, enter it and overwrite `v` with the result + * of the evaluation of the thunk. If `v` is a delayed function + * application, call the function and overwrite `v` with the + * result. Otherwise, this is a no-op. + */ inline void forceValue(Value & v, const PosIdx pos); template inline void forceValue(Value & v, Callable getPos); - /* Force a value, then recursively force list elements and - attributes. */ + /** + * Force a value, then recursively force list elements and + * attributes. + */ void forceValueDeep(Value & v); - /* Force `v', and then verify that it has the expected type. */ + /** + * Force `v`, and then verify that it has the expected type. + */ NixInt forceInt(Value & v, const PosIdx pos, std::string_view errorCtx); NixFloat forceFloat(Value & v, const PosIdx pos, std::string_view errorCtx); bool forceBool(Value & v, const PosIdx pos, std::string_view errorCtx); @@ -355,9 +421,12 @@ public: inline void forceAttrs(Value & v, Callable getPos, std::string_view errorCtx); inline void forceList(Value & v, const PosIdx pos, std::string_view errorCtx); - void forceFunction(Value & v, const PosIdx pos, std::string_view errorCtx); // either lambda or primop + /** + * @param v either lambda or primop + */ + void forceFunction(Value & v, const PosIdx pos, std::string_view errorCtx); std::string_view forceString(Value & v, const PosIdx pos, std::string_view errorCtx); - std::string_view forceString(Value & v, PathSet & context, const PosIdx pos, std::string_view errorCtx); + std::string_view forceString(Value & v, NixStringContext & context, const PosIdx pos, std::string_view errorCtx); std::string_view forceStringNoCtx(Value & v, const PosIdx pos, std::string_view errorCtx); [[gnu::noinline]] @@ -366,39 +435,55 @@ public: void addErrorTrace(Error & e, const PosIdx pos, const char * s, const std::string & s2, bool frame = false) const; public: - /* Return true iff the value `v' denotes a derivation (i.e. a - set with attribute `type = "derivation"'). */ + /** + * @return true iff the value `v` denotes a derivation (i.e. a + * set with attribute `type = "derivation"`). + */ bool isDerivation(Value & v); std::optional tryAttrsToString(const PosIdx pos, Value & v, - PathSet & context, bool coerceMore = false, bool copyToStore = true); + NixStringContext & context, bool coerceMore = false, bool copyToStore = true); - /* String coercion. Converts strings, paths and derivations to a - string. If `coerceMore' is set, also converts nulls, integers, - booleans and lists to a string. If `copyToStore' is set, - referenced paths are copied to the Nix store as a side effect. */ - BackedStringView coerceToString(const PosIdx pos, Value & v, PathSet & context, + /** + * String coercion. + * + * Converts strings, paths and derivations to a + * string. If `coerceMore` is set, also converts nulls, integers, + * booleans and lists to a string. If `copyToStore` is set, + * referenced paths are copied to the Nix store as a side effect. + */ + BackedStringView coerceToString(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx, bool coerceMore = false, bool copyToStore = true, bool canonicalizePath = true); - StorePath copyPathToStore(PathSet & context, const Path & path); + StorePath copyPathToStore(NixStringContext & context, const SourcePath & path); - /* Path coercion. Converts strings, paths and derivations to a - path. The result is guaranteed to be a canonicalised, absolute - path. Nothing is copied to the store. */ - Path coerceToPath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx); + /** + * Path coercion. + * + * Converts strings, paths and derivations to a + * path. The result is guaranteed to be a canonicalised, absolute + * path. Nothing is copied to the store. + */ + SourcePath coerceToPath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx); - /* Like coerceToPath, but the result must be a store path. */ - StorePath coerceToStorePath(const PosIdx pos, Value & v, PathSet & context, std::string_view errorCtx); + /** + * Like coerceToPath, but the result must be a store path. + */ + StorePath coerceToStorePath(const PosIdx pos, Value & v, NixStringContext & context, std::string_view errorCtx); public: - /* The base environment, containing the builtin functions and - values. */ + /** + * The base environment, containing the builtin functions and + * values. + */ Env & baseEnv; - /* The same, but used during parsing to resolve variables. */ + /** + * The same, but used during parsing to resolve variables. + */ std::shared_ptr staticBaseEnv; // !!! should be private private: @@ -443,13 +528,15 @@ private: char * text, size_t length, Pos::Origin origin, - Path basePath, + const SourcePath & basePath, std::shared_ptr & staticEnv); public: - /* Do a deep equality test between two values. That is, list - elements and attributes are compared recursively. */ + /** + * Do a deep equality test between two values. That is, list + * elements and attributes are compared recursively. + */ bool eqValues(Value & v1, Value & v2, const PosIdx pos, std::string_view errorCtx); bool isFunctor(Value & fun); @@ -463,11 +550,15 @@ public: callFunction(fun, 1, args, vRes, pos); } - /* Automatically call a function for which each argument has a - default value or has a binding in the `args' map. */ + /** + * Automatically call a function for which each argument has a + * default value or has a binding in the `args` map. + */ void autoCallFunction(Bindings & args, Value & fun, Value & res); - /* Allocation primitives. */ + /** + * Allocation primitives. + */ inline Value * allocValue(); inline Env & allocEnv(size_t size); @@ -485,15 +576,24 @@ public: void mkThunk_(Value & v, Expr * expr); void mkPos(Value & v, PosIdx pos); + /* Create a string representing a store path. + + The string is the printed store path with a context containing a single + `Opaque` element of that store path. */ + void mkStorePathString(const StorePath & storePath, Value & v); + void concatLists(Value & v, size_t nrLists, Value * * lists, const PosIdx pos, std::string_view errorCtx); - /* Print statistics. */ + /** + * Print statistics. + */ void printStats(); - /* Realise the given context, and return a mapping from the placeholders + /** + * Realise the given context, and return a mapping from the placeholders * used to construct the associated value to their final store path */ - [[nodiscard]] StringMap realiseContext(const PathSet & context); + [[nodiscard]] StringMap realiseContext(const NixStringContext & context); private: @@ -550,12 +650,16 @@ struct DebugTraceStacker { DebugTrace trace; }; -/* Return a string representing the type of the value `v'. */ +/** + * @return A string representing the type of the value `v`. + */ std::string_view showType(ValueType type); std::string showType(const Value & v); -/* If `path' refers to a directory, then append "/default.nix". */ -Path resolveExprPath(Path path); +/** + * If `path` refers to a directory, then append "/default.nix". + */ +SourcePath resolveExprPath(const SourcePath & path); struct InvalidPathError : EvalError { @@ -570,7 +674,7 @@ struct EvalSettings : Config { EvalSettings(); - Strings getDefaultNixPath(); + static Strings getDefaultNixPath(); static bool isPseudoUrl(std::string_view s); @@ -580,15 +684,8 @@ struct EvalSettings : Config "Whether builtin functions that allow executing native code should be enabled."}; Setting nixPath{ - this, {}, "nix-path", - R"( - List of directories to be searched for `<...>` file references. - - If [pure evaluation](#conf-pure-eval) is disabled, - this is initialised using the [`NIX_PATH`](@docroot@/command-ref/env-common.md#env-NIX_PATH) - environment variable, or, if it is unset and [restricted evaluation](#conf-restrict-eval) - is disabled, a default search path including the user's and `root`'s channels. - )"}; + this, getDefaultNixPath(), "nix-path", + "List of directories to be searched for `<...>` file references."}; Setting restrictEval{ this, false, "restrict-eval", diff --git a/src/libexpr/flake/call-flake.nix b/src/libexpr/flake/call-flake.nix index 8061db3df..4beb0b0fe 100644 --- a/src/libexpr/flake/call-flake.nix +++ b/src/libexpr/flake/call-flake.nix @@ -16,7 +16,9 @@ let subdir = if key == lockFile.root then rootSubdir else node.locked.dir or ""; - flake = import (sourceInfo + (if subdir != "" then "/" else "") + subdir + "/flake.nix"); + outPath = sourceInfo + ((if subdir == "" then "" else "/") + subdir); + + flake = import (outPath + "/flake.nix"); inputs = builtins.mapAttrs (inputName: inputSpec: allNodes.${resolveInput inputSpec}) @@ -43,7 +45,21 @@ let outputs = flake.outputs (inputs // { self = result; }); - result = outputs // sourceInfo // { inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; }; + result = + outputs + # We add the sourceInfo attribute for its metadata, as they are + # relevant metadata for the flake. However, the outPath of the + # sourceInfo does not necessarily match the outPath of the flake, + # as the flake may be in a subdirectory of a source. + # This is shadowed in the next // + // sourceInfo + // { + # This shadows the sourceInfo.outPath + inherit outPath; + + inherit inputs; inherit outputs; inherit sourceInfo; _type = "flake"; + }; + in if node.flake or true then assert builtins.isFunction flake.outputs; diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc index 89ddbde7e..e89014862 100644 --- a/src/libexpr/flake/config.cc +++ b/src/libexpr/flake/config.cc @@ -31,7 +31,7 @@ static void writeTrustedList(const TrustedList & trustedList) void ConfigFile::apply() { - std::set whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry"}; + std::set whitelist{"bash-prompt", "bash-prompt-prefix", "bash-prompt-suffix", "flake-registry", "commit-lockfile-summary"}; for (auto & [name, value] : settings) { diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index f659b5ca4..1dd23c5ca 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -125,6 +125,9 @@ static FlakeInput parseFlakeInput(EvalState & state, follows.insert(follows.begin(), lockRootPath.begin(), lockRootPath.end()); input.follows = follows; } else { + // Allow selecting a subset of enum values + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" switch (attr.value->type()) { case nString: attrs.emplace(state.symbols[attr.name], attr.value->string.s); @@ -139,6 +142,7 @@ static FlakeInput parseFlakeInput(EvalState & state, throw TypeError("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", state.symbols[attr.name], showType(*attr.value)); } + #pragma GCC diagnostic pop } } catch (Error & e) { e.addTrace( @@ -218,9 +222,9 @@ static Flake getFlake( throw Error("source tree referenced by '%s' does not contain a '%s/flake.nix' file", lockedRef, lockedRef.subdir); Value vInfo; - state.evalFile(flakeFile, vInfo, true); // FIXME: symlink attack + state.evalFile(CanonPath(flakeFile), vInfo, true); // FIXME: symlink attack - expectType(state, nAttrs, vInfo, state.positions.add({flakeFile}, 1, 1)); + expectType(state, nAttrs, vInfo, state.positions.add({CanonPath(flakeFile)}, 1, 1)); if (auto description = vInfo.attrs->get(state.sDescription)) { expectType(state, nString, *description->value, description->pos); @@ -261,7 +265,7 @@ static Flake getFlake( state.symbols[setting.name], std::string(state.forceStringNoCtx(*setting.value, setting.pos, ""))); else if (setting.value->type() == nPath) { - PathSet emptyContext = {}; + NixStringContext emptyContext = {}; flake.config.settings.emplace( state.symbols[setting.name], state.coerceToString(setting.pos, *setting.value, emptyContext, "", false, true, true) .toOwned()); @@ -320,7 +324,7 @@ LockedFlake lockFlake( const FlakeRef & topRef, const LockFlags & lockFlags) { - settings.requireExperimentalFeature(Xp::Flakes); + experimentalFeatureSettings.require(Xp::Flakes); FlakeCache flakeCache; @@ -334,10 +338,14 @@ LockedFlake lockFlake( } try { + if (!fetchSettings.allowDirty && lockFlags.referenceLockFilePath) { + throw Error("reference lock file was provided, but the `allow-dirty` setting is set to false"); + } // FIXME: symlink attack auto oldLockFile = LockFile::read( - flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir + "/flake.lock"); + lockFlags.referenceLockFilePath.value_or( + flake.sourceInfo->actualPath + "/" + flake.lockedRef.subdir + "/flake.lock")); debug("old lock file: %s", oldLockFile); @@ -619,13 +627,20 @@ LockedFlake lockFlake( debug("new lock file: %s", newLockFile); + auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"; + auto sourcePath = topRef.input.getSourcePath(); + auto outputLockFilePath = sourcePath ? std::optional{*sourcePath + "/" + relPath} : std::nullopt; + if (lockFlags.outputLockFilePath) { + outputLockFilePath = lockFlags.outputLockFilePath; + } + /* Check whether we need to / can write the new lock file. */ - if (!(newLockFile == oldLockFile)) { + if (newLockFile != oldLockFile || lockFlags.outputLockFilePath) { auto diff = LockFile::diff(oldLockFile, newLockFile); if (lockFlags.writeLockFile) { - if (auto sourcePath = topRef.input.getSourcePath()) { + if (outputLockFilePath) { if (auto unlockedInput = newLockFile.isUnlocked()) { if (fetchSettings.warnDirty) warn("will not write lock file of flake '%s' because it has an unlocked input ('%s')", topRef, *unlockedInput); @@ -633,25 +648,24 @@ LockedFlake lockFlake( if (!lockFlags.updateLockFile) throw Error("flake '%s' requires lock file changes but they're not allowed due to '--no-update-lock-file'", topRef); - auto relPath = (topRef.subdir == "" ? "" : topRef.subdir + "/") + "flake.lock"; - - auto path = *sourcePath + "/" + relPath; - - bool lockFileExists = pathExists(path); + bool lockFileExists = pathExists(*outputLockFilePath); if (lockFileExists) { auto s = chomp(diff); if (s.empty()) - warn("updating lock file '%s'", path); + warn("updating lock file '%s'", *outputLockFilePath); else - warn("updating lock file '%s':\n%s", path, s); + warn("updating lock file '%s':\n%s", *outputLockFilePath, s); } else - warn("creating lock file '%s'", path); + warn("creating lock file '%s'", *outputLockFilePath); - newLockFile.write(path); + newLockFile.write(*outputLockFilePath); std::optional commitMessage = std::nullopt; if (lockFlags.commitLockFile) { + if (lockFlags.outputLockFilePath) { + throw Error("--commit-lock-file and --output-lock-file are currently incompatible"); + } std::string cm; cm = fetchSettings.commitLockFileSummary.get(); @@ -731,7 +745,7 @@ void callFlake(EvalState & state, state.vCallFlake = allocRootValue(state.allocValue()); state.eval(state.parseExprFromString( #include "call-flake.nix.gen.hh" - , "/"), **state.vCallFlake); + , CanonPath::root), **state.vCallFlake); } state.callFunction(**state.vCallFlake, *vLocks, *vTmp1, noPos); diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh index 10301d8aa..c1d1b71e5 100644 --- a/src/libexpr/flake/flake.hh +++ b/src/libexpr/flake/flake.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "flakeref.hh" @@ -17,7 +18,8 @@ struct FlakeInput; typedef std::map FlakeInputs; -/* FlakeInput is the 'Flake'-level parsed form of the "input" entries +/** + * FlakeInput is the 'Flake'-level parsed form of the "input" entries * in the flake file. * * A FlakeInput is normally constructed by the 'parseFlakeInput' @@ -41,7 +43,12 @@ typedef std::map FlakeInputs; struct FlakeInput { std::optional ref; - bool isFlake = true; // true = process flake to get outputs, false = (fetched) static source path + /** + * true = process flake to get outputs + * + * false = (fetched) static source path + */ + bool isFlake = true; std::optional follows; FlakeInputs overrides; }; @@ -55,23 +62,42 @@ struct ConfigFile void apply(); }; -/* The contents of a flake.nix file. */ +/** + * The contents of a flake.nix file. + */ struct Flake { - FlakeRef originalRef; // the original flake specification (by the user) - FlakeRef resolvedRef; // registry references and caching resolved to the specific underlying flake - FlakeRef lockedRef; // the specific local store result of invoking the fetcher - bool forceDirty = false; // pretend that 'lockedRef' is dirty + /** + * The original flake specification (by the user) + */ + FlakeRef originalRef; + /** + * registry references and caching resolved to the specific underlying flake + */ + FlakeRef resolvedRef; + /** + * the specific local store result of invoking the fetcher + */ + FlakeRef lockedRef; + /** + * pretend that 'lockedRef' is dirty + */ + bool forceDirty = false; std::optional description; std::shared_ptr sourceInfo; FlakeInputs inputs; - ConfigFile config; // 'nixConfig' attribute + /** + * 'nixConfig' attribute + */ + ConfigFile config; ~Flake(); }; Flake getFlake(EvalState & state, const FlakeRef & flakeRef, bool allowLookup); -/* Fingerprint of a locked flake; used as a cache key. */ +/** + * Fingerprint of a locked flake; used as a cache key. + */ typedef Hash Fingerprint; struct LockedFlake @@ -84,44 +110,72 @@ struct LockedFlake struct LockFlags { - /* Whether to ignore the existing lock file, creating a new one - from scratch. */ + /** + * Whether to ignore the existing lock file, creating a new one + * from scratch. + */ bool recreateLockFile = false; - /* Whether to update the lock file at all. If set to false, if any - change to the lock file is needed (e.g. when an input has been - added to flake.nix), you get a fatal error. */ + /** + * Whether to update the lock file at all. If set to false, if any + * change to the lock file is needed (e.g. when an input has been + * added to flake.nix), you get a fatal error. + */ bool updateLockFile = true; - /* Whether to write the lock file to disk. If set to true, if the - any changes to the lock file are needed and the flake is not - writable (i.e. is not a local Git working tree or similar), you - get a fatal error. If set to false, Nix will use the modified - lock file in memory only, without writing it to disk. */ + /** + * Whether to write the lock file to disk. If set to true, if the + * any changes to the lock file are needed and the flake is not + * writable (i.e. is not a local Git working tree or similar), you + * get a fatal error. If set to false, Nix will use the modified + * lock file in memory only, without writing it to disk. + */ bool writeLockFile = true; - /* Whether to use the registries to lookup indirect flake - references like 'nixpkgs'. */ + /** + * Whether to use the registries to lookup indirect flake + * references like 'nixpkgs'. + */ std::optional useRegistries = std::nullopt; - /* Whether to apply flake's nixConfig attribute to the configuration */ + /** + * Whether to apply flake's nixConfig attribute to the configuration + */ bool applyNixConfig = false; - /* Whether unlocked flake references (i.e. those without a Git - revision or similar) without a corresponding lock are - allowed. Unlocked flake references with a lock are always - allowed. */ + /** + * Whether unlocked flake references (i.e. those without a Git + * revision or similar) without a corresponding lock are + * allowed. Unlocked flake references with a lock are always + * allowed. + */ bool allowUnlocked = true; - /* Whether to commit changes to flake.lock. */ + /** + * Whether to commit changes to flake.lock. + */ bool commitLockFile = false; - /* Flake inputs to be overridden. */ + /** + * The path to a lock file to read instead of the `flake.lock` file in the top-level flake + */ + std::optional referenceLockFilePath; + + /** + * The path to a lock file to write to instead of the `flake.lock` file in the top-level flake + */ + std::optional outputLockFilePath; + + /** + * Flake inputs to be overridden. + */ std::map inputOverrides; - /* Flake inputs to be updated. This means that any existing lock - for those inputs will be ignored. */ + /** + * Flake inputs to be updated. This means that any existing lock + * for those inputs will be ignored. + */ std::set inputUpdates; }; diff --git a/src/libexpr/flake/flakeref.hh b/src/libexpr/flake/flakeref.hh index c4142fc20..a7c9208c0 100644 --- a/src/libexpr/flake/flakeref.hh +++ b/src/libexpr/flake/flakeref.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "hash.hh" @@ -13,7 +14,8 @@ class Store; typedef std::string FlakeId; -/* A flake reference specifies how to fetch a flake or raw source +/** + * A flake reference specifies how to fetch a flake or raw source * (e.g. from a Git repository). It is created from a URL-like syntax * (e.g. 'github:NixOS/patchelf'), an attrset representation (e.g. '{ * type="github"; owner = "NixOS"; repo = "patchelf"; }'), or a local @@ -32,14 +34,17 @@ typedef std::string FlakeId; * be lazy), but the fetcher can be invoked at any time via the * FlakeRef to ensure the store is populated with this input. */ - struct FlakeRef { - /* Fetcher-specific representation of the input, sufficient to - perform the fetch operation. */ + /** + * Fetcher-specific representation of the input, sufficient to + * perform the fetch operation. + */ fetchers::Input input; - /* sub-path within the fetched input that represents this input */ + /** + * sub-path within the fetched input that represents this input + */ Path subdir; bool operator==(const FlakeRef & other) const; diff --git a/src/libexpr/flake/lockfile.cc b/src/libexpr/flake/lockfile.cc index a74e68c9c..ba2fd46f0 100644 --- a/src/libexpr/flake/lockfile.cc +++ b/src/libexpr/flake/lockfile.cc @@ -234,6 +234,11 @@ bool LockFile::operator ==(const LockFile & other) const return toJSON() == other.toJSON(); } +bool LockFile::operator !=(const LockFile & other) const +{ + return !(*this == other); +} + InputPath parseInputPath(std::string_view s) { InputPath path; diff --git a/src/libexpr/flake/lockfile.hh b/src/libexpr/flake/lockfile.hh index 02e9bdfbc..ba4c0c848 100644 --- a/src/libexpr/flake/lockfile.hh +++ b/src/libexpr/flake/lockfile.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "flakeref.hh" @@ -15,9 +16,11 @@ typedef std::vector InputPath; struct LockedNode; -/* A node in the lock file. It has outgoing edges to other nodes (its - inputs). Only the root node has this type; all other nodes have - type LockedNode. */ +/** + * A node in the lock file. It has outgoing edges to other nodes (its + * inputs). Only the root node has this type; all other nodes have + * type LockedNode. + */ struct Node : std::enable_shared_from_this { typedef std::variant, InputPath> Edge; @@ -27,7 +30,9 @@ struct Node : std::enable_shared_from_this virtual ~Node() { } }; -/* A non-root node in the lock file. */ +/** + * A non-root node in the lock file. + */ struct LockedNode : Node { FlakeRef lockedRef, originalRef; @@ -62,10 +67,15 @@ struct LockFile void write(const Path & path) const; - /* Check whether this lock file has any unlocked inputs. */ + /** + * Check whether this lock file has any unlocked inputs. + */ std::optional isUnlocked() const; bool operator ==(const LockFile & other) const; + // Needed for old gcc versions that don't synthesize it (like gcc 8.2.2 + // that is still the default on aarch64-linux) + bool operator !=(const LockFile & other) const; std::shared_ptr findInput(const InputPath & path); @@ -73,7 +83,9 @@ struct LockFile static std::string diff(const LockFile & oldLocks, const LockFile & newLocks); - /* Check that every 'follows' input target exists. */ + /** + * Check that every 'follows' input target exists. + */ void check(); }; diff --git a/src/libexpr/function-trace.hh b/src/libexpr/function-trace.hh index e9a2526bd..91439b0aa 100644 --- a/src/libexpr/function-trace.hh +++ b/src/libexpr/function-trace.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "eval.hh" diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index 1602fbffb..506a63677 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -71,7 +71,7 @@ std::optional DrvInfo::queryDrvPath() const { if (!drvPath && attrs) { Bindings::iterator i = attrs->find(state->sDrvPath); - PathSet context; + NixStringContext context; if (i == attrs->end()) drvPath = {std::nullopt}; else @@ -93,7 +93,7 @@ StorePath DrvInfo::queryOutPath() const { if (!outPath && attrs) { Bindings::iterator i = attrs->find(state->sOutPath); - PathSet context; + NixStringContext context; if (i != attrs->end()) outPath = state->coerceToStorePath(i->pos, *i->value, context, "while evaluating the output path of a derivation"); } @@ -124,7 +124,7 @@ DrvInfo::Outputs DrvInfo::queryOutputs(bool withPaths, bool onlyOutputsToInstall /* And evaluate its ‘outPath’ attribute. */ Bindings::iterator outPath = out->value->attrs->find(state->sOutPath); if (outPath == out->value->attrs->end()) continue; // FIXME: throw error? - PathSet context; + NixStringContext context; outputs.emplace(output, state->coerceToStorePath(outPath->pos, *outPath->value, context, "while evaluating an output path of a derivation")); } else outputs.emplace(output, std::nullopt); diff --git a/src/libexpr/get-drvs.hh b/src/libexpr/get-drvs.hh index bbd2d3c47..584d64ac1 100644 --- a/src/libexpr/get-drvs.hh +++ b/src/libexpr/get-drvs.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "eval.hh" #include "path.hh" @@ -25,7 +26,10 @@ private: mutable std::string outputName; Outputs outputs; - bool failed = false; // set if we get an AssertionError + /** + * Set if we get an AssertionError + */ + bool failed = false; Bindings * attrs = nullptr, * meta = nullptr; @@ -34,7 +38,10 @@ private: bool checkMeta(Value & v); public: - std::string attrPath; /* path towards the derivation */ + /** + * path towards the derivation + */ + std::string attrPath; DrvInfo(EvalState & state) : state(&state) { }; DrvInfo(EvalState & state, std::string attrPath, Bindings * attrs); @@ -46,8 +53,10 @@ public: StorePath requireDrvPath() const; StorePath queryOutPath() const; std::string queryOutputName() const; - /** Return the unordered map of output names to (optional) output paths. - * The "outputs to install" are determined by `meta.outputsToInstall`. */ + /** + * Return the unordered map of output names to (optional) output paths. + * The "outputs to install" are determined by `meta.outputsToInstall`. + */ Outputs queryOutputs(bool withPaths = true, bool onlyOutputsToInstall = false); StringSet queryMetaNames(); @@ -79,8 +88,10 @@ typedef std::list DrvInfos; #endif -/* If value `v' denotes a derivation, return a DrvInfo object - describing it. Otherwise return nothing. */ +/** + * If value `v` denotes a derivation, return a DrvInfo object + * describing it. Otherwise return nothing. + */ std::optional getDerivation(EvalState & state, Value & v, bool ignoreAssertionFailures); diff --git a/src/libexpr/json-to-value.hh b/src/libexpr/json-to-value.hh index 84bec4eba..3b8ec000f 100644 --- a/src/libexpr/json-to-value.hh +++ b/src/libexpr/json-to-value.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "eval.hh" diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk index 2171e769b..d243b9cec 100644 --- a/src/libexpr/local.mk +++ b/src/libexpr/local.mk @@ -46,3 +46,5 @@ $(foreach i, $(wildcard src/libexpr/flake/*.hh), \ $(d)/primops.cc: $(d)/imported-drv-to-derivation.nix.gen.hh $(d)/primops/derivation.nix.gen.hh $(d)/fetchurl.nix.gen.hh $(d)/flake/flake.cc: $(d)/flake/call-flake.nix.gen.hh + +src/libexpr/primops/fromTOML.o: ERROR_SWITCH_ENUM = diff --git a/src/libexpr/nixexpr.cc b/src/libexpr/nixexpr.cc index eb6f062b4..4566a1388 100644 --- a/src/libexpr/nixexpr.cc +++ b/src/libexpr/nixexpr.cc @@ -3,6 +3,7 @@ #include "eval.hh" #include "symbol-table.hh" #include "util.hh" +#include "print.hh" #include @@ -31,9 +32,9 @@ struct PosAdapter : AbstractPos // Get rid of the null terminators added by the parser. return std::string(s.source->c_str()); }, - [](const Path & path) -> std::optional { + [](const SourcePath & path) -> std::optional { try { - return readFile(path); + return path.readFile(); } catch (Error &) { return std::nullopt; } @@ -47,7 +48,7 @@ struct PosAdapter : AbstractPos [&](const Pos::none_tag &) { out << "«none»"; }, [&](const Pos::Stdin &) { out << "«stdin»"; }, [&](const Pos::String & s) { out << "«string»"; }, - [&](const Path & path) { out << path; } + [&](const SourcePath & path) { out << path; } }, origin); } }; @@ -60,45 +61,12 @@ Pos::operator std::shared_ptr() const return pos; } -/* Displaying abstract syntax trees. */ - -static void showString(std::ostream & str, std::string_view s) -{ - str << '"'; - for (auto c : s) - if (c == '"' || c == '\\' || c == '$') str << "\\" << c; - else if (c == '\n') str << "\\n"; - else if (c == '\r') str << "\\r"; - else if (c == '\t') str << "\\t"; - else str << c; - str << '"'; -} - +// FIXME: remove, because *symbols* are abstract and do not have a single +// textual representation; see printIdentifier() std::ostream & operator <<(std::ostream & str, const SymbolStr & symbol) { std::string_view s = symbol; - - if (s.empty()) - str << "\"\""; - else if (s == "if") // FIXME: handle other keywords - str << '"' << s << '"'; - else { - char c = s[0]; - if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) { - showString(str, s); - return str; - } - for (auto c : s) - if (!((c >= 'a' && c <= 'z') || - (c >= 'A' && c <= 'Z') || - (c >= '0' && c <= '9') || - c == '_' || c == '\'' || c == '-')) { - showString(str, s); - return str; - } - str << s; - } - return str; + return printIdentifier(str, s); } void Expr::show(const SymbolTable & symbols, std::ostream & str) const @@ -118,7 +86,7 @@ void ExprFloat::show(const SymbolTable & symbols, std::ostream & str) const void ExprString::show(const SymbolTable & symbols, std::ostream & str) const { - showString(str, s); + printLiteralString(str, s); } void ExprPath::show(const SymbolTable & symbols, std::ostream & str) const diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh index 4a81eaa47..5ca3d1fa6 100644 --- a/src/libexpr/nixexpr.hh +++ b/src/libexpr/nixexpr.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -21,7 +22,9 @@ MakeError(UndefinedVarError, Error); MakeError(MissingArgumentError, EvalError); MakeError(RestrictedPathError, Error); -/* Position objects. */ +/** + * Position objects. + */ struct Pos { uint32_t line; @@ -31,7 +34,7 @@ struct Pos struct Stdin { ref source; }; struct String { ref source; }; - typedef std::variant Origin; + typedef std::variant Origin; Origin origin; @@ -132,7 +135,9 @@ class EvalState; struct StaticEnv; -/* An attribute path is a sequence of attribute names. */ +/** + * An attribute path is a sequence of attribute names. + */ struct AttrName { Symbol symbol; @@ -212,11 +217,11 @@ struct ExprVar : Expr or function argument) or from a "with". */ bool fromWith; - /* In the former case, the value is obtained by going `level' + /* In the former case, the value is obtained by going `level` levels up from the current environment and getting the - `displ'th value in that environment. In the latter case, the - value is obtained by getting the attribute named `name' from - the set stored in the environment that is `level' levels up + `displ`th value in that environment. In the latter case, the + value is obtained by getting the attribute named `name` from + the set stored in the environment that is `level` levels up from the current one.*/ Level level; Displacement displ; diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 7bc6866c1..f2e825497 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -31,7 +31,7 @@ namespace nix { EvalState & state; SymbolTable & symbols; Expr * result; - Path basePath; + SourcePath basePath; PosTable::Origin origin; std::optional error; }; @@ -471,7 +471,7 @@ expr_simple new ExprString(std::move(path))}); } | URI { - static bool noURLLiterals = settings.isExperimentalFeatureEnabled(Xp::NoUrlLiterals); + static bool noURLLiterals = experimentalFeatureSettings.isEnabled(Xp::NoUrlLiterals); if (noURLLiterals) throw ParseError({ .msg = hintfmt("URL literals are disabled"), @@ -511,7 +511,7 @@ string_parts_interpolated path_start : PATH { - Path path(absPath({$1.p, $1.l}, data->basePath)); + Path path(absPath({$1.p, $1.l}, data->basePath.path.abs())); /* add back in the trailing '/' to the first segment */ if ($1.p[$1.l-1] == '/' && $1.l > 1) path += "/"; @@ -653,7 +653,7 @@ Expr * EvalState::parse( char * text, size_t length, Pos::Origin origin, - Path basePath, + const SourcePath & basePath, std::shared_ptr & staticEnv) { yyscan_t scanner; @@ -677,48 +677,36 @@ Expr * EvalState::parse( } -Path resolveExprPath(Path path) +SourcePath resolveExprPath(const SourcePath & path) { - assert(path[0] == '/'); - - unsigned int followCount = 0, maxFollow = 1024; - /* If `path' is a symlink, follow it. This is so that relative path references work. */ - struct stat st; - while (true) { - // Basic cycle/depth limit to avoid infinite loops. - if (++followCount >= maxFollow) - throw Error("too many symbolic links encountered while traversing the path '%s'", path); - st = lstat(path); - if (!S_ISLNK(st.st_mode)) break; - path = absPath(readLink(path), dirOf(path)); - } + auto path2 = path.resolveSymlinks(); /* If `path' refers to a directory, append `/default.nix'. */ - if (S_ISDIR(st.st_mode)) - path = canonPath(path + "/default.nix"); + if (path2.lstat().type == InputAccessor::tDirectory) + return path2 + "default.nix"; - return path; + return path2; } -Expr * EvalState::parseExprFromFile(const Path & path) +Expr * EvalState::parseExprFromFile(const SourcePath & path) { return parseExprFromFile(path, staticBaseEnv); } -Expr * EvalState::parseExprFromFile(const Path & path, std::shared_ptr & staticEnv) +Expr * EvalState::parseExprFromFile(const SourcePath & path, std::shared_ptr & staticEnv) { - auto buffer = readFile(path); - // readFile should have left some extra space for terminators + auto buffer = path.readFile(); + // readFile hopefully have left some extra space for terminators buffer.append("\0\0", 2); - return parse(buffer.data(), buffer.size(), path, dirOf(path), staticEnv); + return parse(buffer.data(), buffer.size(), Pos::Origin(path), path.parent(), staticEnv); } -Expr * EvalState::parseExprFromString(std::string s_, const Path & basePath, std::shared_ptr & staticEnv) +Expr * EvalState::parseExprFromString(std::string s_, const SourcePath & basePath, std::shared_ptr & staticEnv) { auto s = make_ref(std::move(s_)); s->append("\0\0", 2); @@ -726,7 +714,7 @@ Expr * EvalState::parseExprFromString(std::string s_, const Path & basePath, std } -Expr * EvalState::parseExprFromString(std::string s, const Path & basePath) +Expr * EvalState::parseExprFromString(std::string s, const SourcePath & basePath) { return parseExprFromString(std::move(s), basePath, staticBaseEnv); } @@ -734,12 +722,12 @@ Expr * EvalState::parseExprFromString(std::string s, const Path & basePath) Expr * EvalState::parseStdin() { - //Activity act(*logger, lvlTalkative, format("parsing standard input")); + //Activity act(*logger, lvlTalkative, "parsing standard input"); auto buffer = drainFD(0); // drainFD should have left some extra space for terminators buffer.append("\0\0", 2); auto s = make_ref(std::move(buffer)); - return parse(s->data(), s->size(), Pos::Stdin{.source = s}, absPath("."), staticBaseEnv); + return parse(s->data(), s->size(), Pos::Stdin{.source = s}, rootPath(CanonPath::fromCwd()), staticBaseEnv); } @@ -759,13 +747,13 @@ void EvalState::addToSearchPath(const std::string & s) } -Path EvalState::findFile(const std::string_view path) +SourcePath EvalState::findFile(const std::string_view path) { return findFile(searchPath, path); } -Path EvalState::findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos) +SourcePath EvalState::findFile(SearchPath & searchPath, const std::string_view path, const PosIdx pos) { for (auto & i : searchPath) { std::string suffix; @@ -781,11 +769,11 @@ Path EvalState::findFile(SearchPath & searchPath, const std::string_view path, c auto r = resolveSearchPathElem(i); if (!r.first) continue; Path res = r.second + suffix; - if (pathExists(res)) return canonPath(res); + if (pathExists(res)) return CanonPath(canonPath(res)); } if (hasPrefix(path, "nix/")) - return concatStrings(corepkgsPrefix, path.substr(4)); + return CanonPath(concatStrings(corepkgsPrefix, path.substr(4))); debugThrow(ThrownError({ .msg = hintfmt(evalSettings.pureEval @@ -818,7 +806,7 @@ std::pair EvalState::resolveSearchPathElem(const SearchPathEl } else if (hasPrefix(elem.second, "flake:")) { - settings.requireExperimentalFeature(Xp::Flakes); + experimentalFeatureSettings.require(Xp::Flakes); auto flakeRef = parseFlakeRef(elem.second.substr(6), {}, true, false); debug("fetching flake search path element '%s''", elem.second); auto storePath = flakeRef.resolve(store).fetchTree(store).first.storePath; @@ -837,7 +825,7 @@ std::pair EvalState::resolveSearchPathElem(const SearchPathEl } } - debug(format("resolved search path element '%s' to '%s'") % elem.second % res.second); + debug("resolved search path element '%s' to '%s'", elem.second, res.second); searchPathResolved[elem.second] = res; return res; diff --git a/src/libexpr/paths.cc b/src/libexpr/paths.cc new file mode 100644 index 000000000..1d690b722 --- /dev/null +++ b/src/libexpr/paths.cc @@ -0,0 +1,10 @@ +#include "eval.hh" + +namespace nix { + +SourcePath EvalState::rootPath(CanonPath path) +{ + return std::move(path); +} + +} diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 41cebb258..e30fa2cd5 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -38,17 +38,16 @@ namespace nix { InvalidPathError::InvalidPathError(const Path & path) : EvalError("path '%s' is not valid", path), path(path) {} -StringMap EvalState::realiseContext(const PathSet & context) +StringMap EvalState::realiseContext(const NixStringContext & context) { std::vector drvs; StringMap res; - for (auto & c_ : context) { + for (auto & c : context) { auto ensureValid = [&](const StorePath & p) { if (!store->isValidPath(p)) debugThrowLastTrace(InvalidPathError(store->printStorePath(p))); }; - auto c = NixStringContextElem::parse(*store, c_); std::visit(overloaded { [&](const NixStringContextElem::Built & b) { drvs.push_back(DerivedPath::Built { @@ -110,16 +109,16 @@ struct RealisePathFlags { bool checkForPureEval = true; }; -static Path realisePath(EvalState & state, const PosIdx pos, Value & v, const RealisePathFlags flags = {}) +static SourcePath realisePath(EvalState & state, const PosIdx pos, Value & v, const RealisePathFlags flags = {}) { - PathSet context; + NixStringContext context; auto path = state.coerceToPath(noPos, v, context, "while realising the context of a path"); try { StringMap rewrites = state.realiseContext(context); - auto realPath = state.toRealPath(rewriteStrings(path, rewrites), context); + auto realPath = state.rootPath(CanonPath(state.toRealPath(rewriteStrings(path.path.abs(), rewrites), context))); return flags.checkForPureEval ? state.checkSourcePath(realPath) @@ -158,7 +157,12 @@ static void mkOutputString( /* FIXME: we need to depend on the basic derivation, not derivation */ : downstreamPlaceholder(*state.store, drvPath, o.first), - {"!" + o.first + "!" + state.store->printStorePath(drvPath)}); + NixStringContext { + NixStringContextElem::Built { + .drvPath = drvPath, + .output = o.first, + } + }); } /* Load and evaluate an expression from path specified by the @@ -166,28 +170,30 @@ static void mkOutputString( static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * vScope, Value & v) { auto path = realisePath(state, pos, vPath); + auto path2 = path.path.abs(); // FIXME auto isValidDerivationInStore = [&]() -> std::optional { - if (!state.store->isStorePath(path)) + if (!state.store->isStorePath(path2)) return std::nullopt; - auto storePath = state.store->parseStorePath(path); - if (!(state.store->isValidPath(storePath) && isDerivation(path))) + auto storePath = state.store->parseStorePath(path2); + if (!(state.store->isValidPath(storePath) && isDerivation(path2))) return std::nullopt; return storePath; }; - if (auto optStorePath = isValidDerivationInStore()) { - auto storePath = *optStorePath; - Derivation drv = state.store->readDerivation(storePath); + if (auto storePath = isValidDerivationInStore()) { + Derivation drv = state.store->readDerivation(*storePath); auto attrs = state.buildBindings(3 + drv.outputs.size()); - attrs.alloc(state.sDrvPath).mkString(path, {"=" + path}); + attrs.alloc(state.sDrvPath).mkString(path2, { + NixStringContextElem::DrvDeep { .drvPath = *storePath }, + }); attrs.alloc(state.sName).mkString(drv.env["name"]); auto & outputsVal = attrs.alloc(state.sOutputs); state.mkList(outputsVal, drv.outputs.size()); for (const auto & [i, o] : enumerate(drv.outputs)) { - mkOutputString(state, attrs, storePath, drv, o); + mkOutputString(state, attrs, *storePath, drv, o); (outputsVal.listElems()[i] = state.allocValue())->mkString(o.first); } @@ -198,7 +204,7 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v state.vImportedDrvToDerivation = allocRootValue(state.allocValue()); state.eval(state.parseExprFromString( #include "imported-drv-to-derivation.nix.gen.hh" - , "/"), **state.vImportedDrvToDerivation); + , CanonPath::root), **state.vImportedDrvToDerivation); } state.forceFunction(**state.vImportedDrvToDerivation, pos, "while evaluating imported-drv-to-derivation.nix.gen.hh"); @@ -206,10 +212,10 @@ static void import(EvalState & state, const PosIdx pos, Value & vPath, Value * v state.forceAttrs(v, pos, "while calling imported-drv-to-derivation.nix.gen.hh"); } - else if (path == corepkgsPrefix + "fetchurl.nix") { + else if (path2 == corepkgsPrefix + "fetchurl.nix") { state.eval(state.parseExprFromString( #include "fetchurl.nix.gen.hh" - , "/"), v); + , CanonPath::root), v); } else { @@ -254,9 +260,16 @@ static RegisterPrimOp primop_import({ .args = {"path"}, // TODO turn "normal path values" into link below .doc = R"( - Load, parse and return the Nix expression in the file *path*. If - *path* is a directory, the file ` default.nix ` in that directory - is loaded. Evaluation aborts if the file doesn’t exist or contains + Load, parse and return the Nix expression in the file *path*. + + The value *path* can be a path, a string, or an attribute set with an + `__toString` attribute or a `outPath` attribute (as derivations or flake + inputs typically have). + + If *path* is a directory, the file `default.nix` in that directory + is loaded. + + Evaluation aborts if the file doesn’t exist or contains an incorrect Nix expression. `import` implements Nix’s module system: you can put any Nix expression (such as a set or a function) in a separate file, and use it from Nix expressions in @@ -323,7 +336,7 @@ void prim_importNative(EvalState & state, const PosIdx pos, Value * * args, Valu std::string sym(state.forceStringNoCtx(*args[1], pos, "while evaluating the second argument passed to builtins.importNative")); - void *handle = dlopen(path.c_str(), RTLD_LAZY | RTLD_LOCAL); + void *handle = dlopen(path.path.c_str(), RTLD_LAZY | RTLD_LOCAL); if (!handle) state.debugThrowLastTrace(EvalError("could not open '%1%': %2%", path, dlerror())); @@ -351,7 +364,7 @@ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v) auto count = args[0]->listSize(); if (count == 0) state.error("at least one argument to 'exec' required").atPos(pos).debugThrow(); - PathSet context; + NixStringContext context; auto program = state.coerceToString(pos, *elems[0], context, "while evaluating the first element of the argument passed to builtins.exec", false, false).toOwned(); @@ -371,7 +384,7 @@ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v) auto output = runProgram(program, true, commandArgs); Expr * parsed; try { - parsed = state.parseExprFromString(std::move(output), "/"); + parsed = state.parseExprFromString(std::move(output), state.rootPath(CanonPath::root)); } catch (Error & e) { e.addTrace(state.positions[pos], "while parsing the output from '%1%'", program); throw; @@ -570,6 +583,9 @@ struct CompareValues return v1->integer < v2->fpoint; if (v1->type() != v2->type()) state.error("cannot compare %s with %s", showType(*v1), showType(*v2)).debugThrow(); + // Allow selecting a subset of enum values + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" switch (v1->type()) { case nInt: return v1->integer < v2->integer; @@ -578,7 +594,7 @@ struct CompareValues case nString: return strcmp(v1->string.s, v2->string.s) < 0; case nPath: - return strcmp(v1->path, v2->path) < 0; + return strcmp(v1->_path, v2->_path) < 0; case nList: // Lexicographic comparison for (size_t i = 0;; i++) { @@ -592,6 +608,7 @@ struct CompareValues } default: state.error("cannot compare %s with %s; values of that type are incomparable", showType(*v1), showType(*v2)).debugThrow(); + #pragma GCC diagnostic pop } } catch (Error & e) { if (!errorCtx.empty()) @@ -757,7 +774,7 @@ static RegisterPrimOp primop_abort({ )", .fun = [](EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[0], context, "while evaluating the error message passed to builtins.abort").toOwned(); state.debugThrowLastTrace(Abort("evaluation aborted with the following error message: '%1%'", s)); @@ -776,7 +793,7 @@ static RegisterPrimOp primop_throw({ )", .fun = [](EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[0], context, "while evaluating the error message passed to builtin.throw").toOwned(); state.debugThrowLastTrace(ThrownError(s)); @@ -789,7 +806,7 @@ static void prim_addErrorContext(EvalState & state, const PosIdx pos, Value * * state.forceValue(*args[1], pos); v = *args[1]; } catch (Error & e) { - PathSet context; + NixStringContext context; auto message = state.coerceToString(pos, *args[0], context, "while evaluating the error message passed to builtins.addErrorContext", false, false).toOwned(); @@ -1075,7 +1092,7 @@ drvName, Bindings * attrs, Value & v) Derivation drv; drv.name = drvName; - PathSet context; + NixStringContext context; bool contentAddressed = false; bool isImpure = false; @@ -1141,13 +1158,13 @@ drvName, Bindings * attrs, Value & v) if (i->name == state.sContentAddressed) { contentAddressed = state.forceBool(*i->value, noPos, context_below); if (contentAddressed) - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); } else if (i->name == state.sImpure) { isImpure = state.forceBool(*i->value, noPos, context_below); if (isImpure) - settings.requireExperimentalFeature(Xp::ImpureDerivations); + experimentalFeatureSettings.require(Xp::ImpureDerivations); } /* The `args' attribute is special: it supplies the @@ -1221,8 +1238,7 @@ drvName, Bindings * attrs, Value & v) /* Everything in the context of the strings in the derivation attributes should be added as dependencies of the resulting derivation. */ - for (auto & c_ : context) { - auto c = NixStringContextElem::parse(*state.store, c_); + for (auto & c : context) { std::visit(overloaded { /* Since this allows the builder to gain access to every path in the dependency graph of the derivation (including @@ -1282,7 +1298,13 @@ drvName, Bindings * attrs, Value & v) auto h = newHashAllowEmpty(*outputHash, parseHashTypeOpt(outputHashAlgo)); auto method = ingestionMethod.value_or(FileIngestionMethod::Flat); - auto outPath = state.store->makeFixedOutputPath(method, h, drvName); + auto outPath = state.store->makeFixedOutputPath(drvName, FixedOutputInfo { + .hash = { + .method = method, + .hash = h, + }, + .references = {}, + }); drv.env["out"] = state.store->printStorePath(outPath); drv.outputs.insert_or_assign("out", DerivationOutput::CAFixed { @@ -1375,7 +1397,9 @@ drvName, Bindings * attrs, Value & v) } auto result = state.buildBindings(1 + drv.outputs.size()); - result.alloc(state.sDrvPath).mkString(drvPathS, {"=" + drvPathS}); + result.alloc(state.sDrvPath).mkString(drvPathS, { + NixStringContextElem::DrvDeep { .drvPath = drvPath }, + }); for (auto & i : drv.outputs) mkOutputString(state, result, drvPath, drv, i); @@ -1420,9 +1444,9 @@ static RegisterPrimOp primop_placeholder({ /* Convert the argument to a path. !!! obsolete? */ static void prim_toPath(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.toPath"); - v.mkString(canonPath(path), context); + NixStringContext context; + auto path = state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.toPath"); + v.mkString(path.path.abs(), context); } static RegisterPrimOp primop_toPath({ @@ -1451,22 +1475,23 @@ static void prim_storePath(EvalState & state, const PosIdx pos, Value * * args, .errPos = state.positions[pos] })); - PathSet context; - Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.storePath")); + NixStringContext context; + auto path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context, "while evaluating the first argument passed to builtins.storePath")).path; /* Resolve symlinks in ‘path’, unless ‘path’ itself is a symlink directly in the store. The latter condition is necessary so e.g. nix-push does the right thing. */ - if (!state.store->isStorePath(path)) path = canonPath(path, true); - if (!state.store->isInStore(path)) + if (!state.store->isStorePath(path.abs())) + path = CanonPath(canonPath(path.abs(), true)); + if (!state.store->isInStore(path.abs())) state.debugThrowLastTrace(EvalError({ .msg = hintfmt("path '%1%' is not in the Nix store", path), .errPos = state.positions[pos] })); - auto path2 = state.store->toStorePath(path).first; + auto path2 = state.store->toStorePath(path.abs()).first; if (!settings.readOnlyMode) state.store->ensurePath(path2); - context.insert(state.store->printStorePath(path2)); - v.mkString(path, context); + context.insert(NixStringContextElem::Opaque { .path = path2 }); + v.mkString(path.abs(), context); } static RegisterPrimOp primop_storePath({ @@ -1497,7 +1522,7 @@ static void prim_pathExists(EvalState & state, const PosIdx pos, Value * * args, auto path = realisePath(state, pos, *args[0], { .checkForPureEval = false }); try { - v.mkBool(pathExists(state.checkSourcePath(path))); + v.mkBool(state.checkSourcePath(path).pathExists()); } catch (SysError & e) { /* Don't give away info from errors while canonicalising ‘path’ in restricted mode. */ @@ -1521,7 +1546,7 @@ static RegisterPrimOp primop_pathExists({ following the last slash. */ static void prim_baseNameOf(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; v.mkString(baseNameOf(*state.coerceToString(pos, *args[0], context, "while evaluating the first argument passed to builtins.baseNameOf", false, false)), context); @@ -1543,12 +1568,18 @@ static RegisterPrimOp primop_baseNameOf({ of the argument. */ static void prim_dirOf(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; - auto path = state.coerceToString(pos, *args[0], context, - "while evaluating the first argument passed to builtins.dirOf", + state.forceValue(*args[0], pos); + if (args[0]->type() == nPath) { + auto path = args[0]->path(); + v.mkPath(path.path.isRoot() ? path : path.parent()); + } else { + NixStringContext context; + auto path = state.coerceToString(pos, *args[0], context, + "while evaluating the first argument passed to 'builtins.dirOf'", false, false); - auto dir = dirOf(*path); - if (args[0]->type() == nPath) v.mkPath(dir); else v.mkString(dir, context); + auto dir = dirOf(*path); + v.mkString(dir, context); + } } static RegisterPrimOp primop_dirOf({ @@ -1566,13 +1597,13 @@ static RegisterPrimOp primop_dirOf({ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, Value & v) { auto path = realisePath(state, pos, *args[0]); - auto s = readFile(path); + auto s = path.readFile(); if (s.find((char) 0) != std::string::npos) state.debugThrowLastTrace(Error("the contents of the file '%1%' cannot be represented as a Nix string", path)); StorePathSet refs; - if (state.store->isInStore(path)) { + if (state.store->isInStore(path.path.abs())) { try { - refs = state.store->queryPathInfo(state.store->toStorePath(path).first)->references; + refs = state.store->queryPathInfo(state.store->toStorePath(path.path.abs()).first)->references; } catch (Error &) { // FIXME: should be InvalidPathError } // Re-scan references to filter down to just the ones that actually occur in the file. @@ -1580,7 +1611,12 @@ static void prim_readFile(EvalState & state, const PosIdx pos, Value * * args, V refsSink << s; refs = refsSink.getResultPaths(); } - auto context = state.store->printStorePathSet(refs); + NixStringContext context; + for (auto && p : std::move(refs)) { + context.insert(NixStringContextElem::Opaque { + .path = std::move((StorePath &&)p), + }); + } v.mkString(s, context); } @@ -1611,7 +1647,7 @@ static void prim_findFile(EvalState & state, const PosIdx pos, Value * * args, V i = getAttr(state, state.sPath, v2->attrs, "in an element of the __nixPath"); - PathSet context; + NixStringContext context; auto path = state.coerceToString(pos, *i->value, context, "while evaluating the `path` attribute of an element of the list passed to builtins.findFile", false, false).toOwned(); @@ -1653,7 +1689,7 @@ static void prim_hashFile(EvalState & state, const PosIdx pos, Value * * args, V auto path = realisePath(state, pos, *args[1]); - v.mkString(hashFile(*ht, path).to_string(Base16, false)); + v.mkString(hashString(*ht, path.readFile()).to_string(Base16, false)); } static RegisterPrimOp primop_hashFile({ @@ -1667,26 +1703,20 @@ static RegisterPrimOp primop_hashFile({ .fun = prim_hashFile, }); - -/* Stringize a directory entry enum. Used by `readFileType' and `readDir'. */ -static const char * dirEntTypeToString(unsigned char dtType) +static std::string_view fileTypeToString(InputAccessor::Type type) { - /* Enum DT_(DIR|LNK|REG|UNKNOWN) */ - switch(dtType) { - case DT_REG: return "regular"; break; - case DT_DIR: return "directory"; break; - case DT_LNK: return "symlink"; break; - default: return "unknown"; break; - } - return "unknown"; /* Unreachable */ + return + type == InputAccessor::Type::tRegular ? "regular" : + type == InputAccessor::Type::tDirectory ? "directory" : + type == InputAccessor::Type::tSymlink ? "symlink" : + "unknown"; } - static void prim_readFileType(EvalState & state, const PosIdx pos, Value * * args, Value & v) { auto path = realisePath(state, pos, *args[0]); /* Retrieve the directory entry type and stringize it. */ - v.mkString(dirEntTypeToString(getFileType(path))); + v.mkString(fileTypeToString(path.lstat().type)); } static RegisterPrimOp primop_readFileType({ @@ -1707,8 +1737,7 @@ static void prim_readDir(EvalState & state, const PosIdx pos, Value * * args, Va // Retrieve directory entries for all nodes in a directory. // This is similar to `getFileType` but is optimized to reduce system calls // on many systems. - DirEntries entries = readDirectory(path); - + auto entries = path.readDirectory(); auto attrs = state.buildBindings(entries.size()); // If we hit unknown directory entry types we may need to fallback to @@ -1717,22 +1746,21 @@ static void prim_readDir(EvalState & state, const PosIdx pos, Value * * args, Va // `builtins.readFileType` application. Value * readFileType = nullptr; - for (auto & ent : entries) { - auto & attr = attrs.alloc(ent.name); - if (ent.type == DT_UNKNOWN) { + for (auto & [name, type] : entries) { + auto & attr = attrs.alloc(name); + if (!type) { // Some filesystems or operating systems may not be able to return // detailed node info quickly in this case we produce a thunk to // query the file type lazily. auto epath = state.allocValue(); - Path path2 = path + "/" + ent.name; - epath->mkString(path2); + epath->mkPath(path + name); if (!readFileType) readFileType = &state.getBuiltin("readFileType"); attr.mkApp(readFileType, epath); } else { // This branch of the conditional is much more likely. // Here we just stringize the directory entry type. - attr.mkString(dirEntTypeToString(ent.type)); + attr.mkString(fileTypeToString(*type)); } } @@ -1770,7 +1798,7 @@ static RegisterPrimOp primop_readDir({ static void prim_toXML(EvalState & state, const PosIdx pos, Value * * args, Value & v) { std::ostringstream out; - PathSet context; + NixStringContext context; printValueAsXML(state, true, false, *args[0], out, context, pos); v.mkString(out.str(), context); } @@ -1878,7 +1906,7 @@ static RegisterPrimOp primop_toXML({ static void prim_toJSON(EvalState & state, const PosIdx pos, Value * * args, Value & v) { std::ostringstream out; - PathSet context; + NixStringContext context; printValueAsJSON(state, true, *args[0], pos, out, context); v.mkString(out.str(), context); } @@ -1928,22 +1956,23 @@ static RegisterPrimOp primop_fromJSON({ as an input by derivations. */ static void prim_toFile(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; std::string name(state.forceStringNoCtx(*args[0], pos, "while evaluating the first argument passed to builtins.toFile")); std::string contents(state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.toFile")); StorePathSet refs; - for (auto path : context) { - if (path.at(0) != '/') + for (auto c : context) { + if (auto p = std::get_if(&c)) + refs.insert(p->path); + else state.debugThrowLastTrace(EvalError({ .msg = hintfmt( "in 'toFile': the file named '%1%' must not contain a reference " "to a derivation but contains (%2%)", - name, path), + name, c.to_string()), .errPos = state.positions[pos] })); - refs.insert(state.store->parseStorePath(path)); } auto storePath = settings.readOnlyMode @@ -2038,13 +2067,13 @@ static RegisterPrimOp primop_toFile({ static void addPath( EvalState & state, const PosIdx pos, - const std::string & name, + std::string_view name, Path path, Value * filterFun, FileIngestionMethod method, const std::optional expectedHash, Value & v, - const PathSet & context) + const NixStringContext & context) { try { // FIXME: handle CA derivation outputs (where path needs to @@ -2066,7 +2095,7 @@ static void addPath( path = evalSettings.pureEval && expectedHash ? path - : state.checkSourcePath(path); + : state.checkSourcePath(CanonPath(path)).path.abs(); PathFilter filter = filterFun ? ([&](const Path & path) { auto st = lstat(path); @@ -2092,7 +2121,13 @@ static void addPath( std::optional expectedStorePath; if (expectedHash) - expectedStorePath = state.store->makeFixedOutputPath(method, *expectedHash, name); + expectedStorePath = state.store->makeFixedOutputPath(name, FixedOutputInfo { + .hash = { + .method = method, + .hash = *expectedHash, + }, + .references = {}, + }); if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) { StorePath dstPath = settings.readOnlyMode @@ -2112,10 +2147,11 @@ static void addPath( static void prim_filterSource(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; - Path path = state.coerceToPath(pos, *args[1], context, "while evaluating the second argument (the path to filter) passed to builtins.filterSource"); + NixStringContext context; + auto path = state.coerceToPath(pos, *args[1], context, + "while evaluating the second argument (the path to filter) passed to builtins.filterSource"); state.forceFunction(*args[0], pos, "while evaluating the first argument passed to builtins.filterSource"); - addPath(state, pos, std::string(baseNameOf(path)), path, args[0], FileIngestionMethod::Recursive, std::nullopt, v, context); + addPath(state, pos, path.baseName(), path.path.abs(), args[0], FileIngestionMethod::Recursive, std::nullopt, v, context); } static RegisterPrimOp primop_filterSource({ @@ -2175,18 +2211,19 @@ static RegisterPrimOp primop_filterSource({ static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - state.forceAttrs(*args[0], pos, "while evaluating the argument passed to builtins.path"); - Path path; + std::optional path; std::string name; Value * filterFun = nullptr; auto method = FileIngestionMethod::Recursive; std::optional expectedHash; - PathSet context; + NixStringContext context; + + state.forceAttrs(*args[0], pos, "while evaluating the argument passed to 'builtins.path'"); for (auto & attr : *args[0]->attrs) { auto n = state.symbols[attr.name]; if (n == "path") - path = state.coerceToPath(attr.pos, *attr.value, context, "while evaluating the `path` attribute passed to builtins.path"); + path.emplace(state.coerceToPath(attr.pos, *attr.value, context, "while evaluating the 'path' attribute passed to 'builtins.path'")); else if (attr.name == state.sName) name = state.forceStringNoCtx(*attr.value, attr.pos, "while evaluating the `name` attribute passed to builtins.path"); else if (n == "filter") @@ -2201,15 +2238,15 @@ static void prim_path(EvalState & state, const PosIdx pos, Value * * args, Value .errPos = state.positions[attr.pos] })); } - if (path.empty()) + if (!path) state.debugThrowLastTrace(EvalError({ .msg = hintfmt("missing required 'path' attribute in the first argument to builtins.path"), .errPos = state.positions[pos] })); if (name.empty()) - name = baseNameOf(path); + name = path->baseName(); - addPath(state, pos, name, path, filterFun, method, expectedHash, v, context); + addPath(state, pos, name, path->path.abs(), filterFun, method, expectedHash, v, context); } static RegisterPrimOp primop_path({ @@ -3515,7 +3552,7 @@ static RegisterPrimOp primop_lessThan({ `"/nix/store/whatever..."'. */ static void prim_toString(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[0], context, "while evaluating the first argument passed to builtins.toString", true, false); @@ -3554,7 +3591,7 @@ static void prim_substring(EvalState & state, const PosIdx pos, Value * * args, { int start = state.forceInt(*args[0], pos, "while evaluating the first argument (the start offset) passed to builtins.substring"); int len = state.forceInt(*args[1], pos, "while evaluating the second argument (the substring length) passed to builtins.substring"); - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[2], context, "while evaluating the third argument (the string) passed to builtins.substring"); if (start < 0) @@ -3588,7 +3625,7 @@ static RegisterPrimOp primop_substring({ static void prim_stringLength(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[0], context, "while evaluating the argument passed to builtins.stringLength"); v.mkInt(s->size()); } @@ -3614,7 +3651,7 @@ static void prim_hashString(EvalState & state, const PosIdx pos, Value * * args, .errPos = state.positions[pos] })); - PathSet context; // discarded + NixStringContext context; // discarded auto s = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.hashString"); v.mkString(hashString(*ht, s).to_string(Base16, false)); @@ -3660,7 +3697,7 @@ void prim_match(EvalState & state, const PosIdx pos, Value * * args, Value & v) auto regex = state.regexCache->get(re); - PathSet context; + NixStringContext context; const auto str = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.match"); std::cmatch match; @@ -3740,7 +3777,7 @@ void prim_split(EvalState & state, const PosIdx pos, Value * * args, Value & v) auto regex = state.regexCache->get(re); - PathSet context; + NixStringContext context; const auto str = state.forceString(*args[1], context, pos, "while evaluating the second argument passed to builtins.split"); auto begin = std::cregex_iterator(str.begin(), str.end(), regex); @@ -3837,7 +3874,7 @@ static RegisterPrimOp primop_split({ static void prim_concatStringsSep(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto sep = state.forceString(*args[0], context, pos, "while evaluating the first argument (the separator string) passed to builtins.concatStringsSep"); state.forceList(*args[1], pos, "while evaluating the second argument (the list of strings to concat) passed to builtins.concatStringsSep"); @@ -3877,15 +3914,15 @@ static void prim_replaceStrings(EvalState & state, const PosIdx pos, Value * * a for (auto elem : args[0]->listItems()) from.emplace_back(state.forceString(*elem, pos, "while evaluating one of the strings to replace passed to builtins.replaceStrings")); - std::vector> to; + std::vector> to; to.reserve(args[1]->listSize()); for (auto elem : args[1]->listItems()) { - PathSet ctx; + NixStringContext ctx; auto s = state.forceString(*elem, ctx, pos, "while evaluating one of the replacement strings passed to builtins.replaceStrings"); to.emplace_back(s, std::move(ctx)); } - PathSet context; + NixStringContext context; auto s = state.forceString(*args[2], context, pos, "while evaluating the third argument passed to builtins.replaceStrings"); std::string res; @@ -4117,7 +4154,7 @@ void EvalState::createBaseEnv() if (RegisterPrimOp::primOps) for (auto & primOp : *RegisterPrimOp::primOps) if (!primOp.experimentalFeature - || settings.isExperimentalFeatureEnabled(*primOp.experimentalFeature)) + || experimentalFeatureSettings.isEnabled(*primOp.experimentalFeature)) { addPrimOp({ .fun = primOp.fun, @@ -4130,7 +4167,6 @@ void EvalState::createBaseEnv() /* Add a wrapper around the derivation primop that computes the `drvPath' and `outPath' attributes lazily. */ - sDerivationNix = symbols.create(derivationNixPath); auto vDerivation = allocValue(); addConstant("derivation", vDerivation); @@ -4147,7 +4183,7 @@ void EvalState::createBaseEnv() // the parser needs two NUL bytes as terminators; one of them // is implied by being a C string. "\0"; - eval(parse(code, sizeof(code), derivationNixPath, "/", staticBaseEnv), *vDerivation); + eval(parse(code, sizeof(code), derivationInternal, {CanonPath::root}, staticBaseEnv), *vDerivation); } diff --git a/src/libexpr/primops.hh b/src/libexpr/primops.hh index 1cfb4356b..4ae73fe1f 100644 --- a/src/libexpr/primops.hh +++ b/src/libexpr/primops.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "eval.hh" @@ -22,9 +23,11 @@ struct RegisterPrimOp typedef std::vector PrimOps; static PrimOps * primOps; - /* You can register a constant by passing an arity of 0. fun - will get called during EvalState initialization, so there - may be primops not yet added and builtins is not yet sorted. */ + /** + * You can register a constant by passing an arity of 0. fun + * will get called during EvalState initialization, so there + * may be primops not yet added and builtins is not yet sorted. + */ RegisterPrimOp( std::string name, size_t arity, @@ -37,10 +40,14 @@ struct RegisterPrimOp may wish to use them in limited contexts without globally enabling them. */ -/* Load a ValueInitializer from a DSO and return whatever it initializes */ +/** + * Load a ValueInitializer from a DSO and return whatever it initializes + */ void prim_importNative(EvalState & state, const PosIdx pos, Value * * args, Value & v); -/* Execute a program and parse its output */ +/** + * Execute a program and parse its output + */ void prim_exec(EvalState & state, const PosIdx pos, Value * * args, Value & v); } diff --git a/src/libexpr/primops/context.cc b/src/libexpr/primops/context.cc index db43e5771..07bf400cf 100644 --- a/src/libexpr/primops/context.cc +++ b/src/libexpr/primops/context.cc @@ -7,7 +7,7 @@ namespace nix { static void prim_unsafeDiscardStringContext(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardStringContext"); v.mkString(*s); } @@ -17,7 +17,7 @@ static RegisterPrimOp primop_unsafeDiscardStringContext("__unsafeDiscardStringCo static void prim_hasContext(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.hasContext"); v.mkBool(!context.empty()); } @@ -33,17 +33,18 @@ static RegisterPrimOp primop_hasContext("__hasContext", 1, prim_hasContext); drv.inputDrvs. */ static void prim_unsafeDiscardOutputDependency(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto s = state.coerceToString(pos, *args[0], context, "while evaluating the argument passed to builtins.unsafeDiscardOutputDependency"); - PathSet context2; - for (auto && p : context) { - auto c = NixStringContextElem::parse(*state.store, p); + NixStringContext context2; + for (auto && c : context) { if (auto * ptr = std::get_if(&c)) { - context2.emplace(state.store->printStorePath(ptr->drvPath)); + context2.emplace(NixStringContextElem::Opaque { + .path = ptr->drvPath + }); } else { /* Can reuse original item */ - context2.emplace(std::move(p)); + context2.emplace(std::move(c)); } } @@ -79,22 +80,21 @@ static void prim_getContext(EvalState & state, const PosIdx pos, Value * * args, bool allOutputs = false; Strings outputs; }; - PathSet context; + NixStringContext context; state.forceString(*args[0], context, pos, "while evaluating the argument passed to builtins.getContext"); auto contextInfos = std::map(); - for (const auto & p : context) { - NixStringContextElem ctx = NixStringContextElem::parse(*state.store, p); + for (auto && i : context) { std::visit(overloaded { - [&](NixStringContextElem::DrvDeep & d) { - contextInfos[d.drvPath].allOutputs = true; + [&](NixStringContextElem::DrvDeep && d) { + contextInfos[std::move(d.drvPath)].allOutputs = true; }, - [&](NixStringContextElem::Built & b) { - contextInfos[b.drvPath].outputs.emplace_back(std::move(b.output)); + [&](NixStringContextElem::Built && b) { + contextInfos[std::move(b.drvPath)].outputs.emplace_back(std::move(b.output)); }, - [&](NixStringContextElem::Opaque & o) { - contextInfos[o.path].path = true; + [&](NixStringContextElem::Opaque && o) { + contextInfos[std::move(o.path)].path = true; }, - }, ctx.raw()); + }, ((NixStringContextElem &&) i).raw()); } auto attrs = state.buildBindings(contextInfos.size()); @@ -129,7 +129,7 @@ static RegisterPrimOp primop_getContext("__getContext", 1, prim_getContext); */ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - PathSet context; + NixStringContext context; auto orig = state.forceString(*args[0], context, noPos, "while evaluating the first argument passed to builtins.appendContext"); state.forceAttrs(*args[1], pos, "while evaluating the second argument passed to builtins.appendContext"); @@ -143,13 +143,16 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar .msg = hintfmt("context key '%s' is not a store path", name), .errPos = state.positions[i.pos] }); + auto namePath = state.store->parseStorePath(name); if (!settings.readOnlyMode) - state.store->ensurePath(state.store->parseStorePath(name)); + state.store->ensurePath(namePath); state.forceAttrs(*i.value, i.pos, "while evaluating the value of a string context"); auto iter = i.value->attrs->find(sPath); if (iter != i.value->attrs->end()) { if (state.forceBool(*iter->value, iter->pos, "while evaluating the `path` attribute of a string context")) - context.emplace(name); + context.emplace(NixStringContextElem::Opaque { + .path = namePath, + }); } iter = i.value->attrs->find(sAllOutputs); @@ -161,7 +164,9 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar .errPos = state.positions[i.pos] }); } - context.insert(concatStrings("=", name)); + context.emplace(NixStringContextElem::DrvDeep { + .drvPath = namePath, + }); } } @@ -176,7 +181,10 @@ static void prim_appendContext(EvalState & state, const PosIdx pos, Value * * ar } for (auto elem : iter->value->listItems()) { auto outputName = state.forceStringNoCtx(*elem, iter->pos, "while evaluating an output name within a string context"); - context.insert(concatStrings("!", outputName, "!", name)); + context.emplace(NixStringContextElem::Built { + .drvPath = namePath, + .output = std::string { outputName }, + }); } } } diff --git a/src/libexpr/primops/fetchClosure.cc b/src/libexpr/primops/fetchClosure.cc index 0dfa97fa3..4cf1f1e0b 100644 --- a/src/libexpr/primops/fetchClosure.cc +++ b/src/libexpr/primops/fetchClosure.cc @@ -18,7 +18,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value * * arg const auto & attrName = state.symbols[attr.name]; if (attrName == "fromPath") { - PathSet context; + NixStringContext context; fromPath = state.coerceToStorePath(attr.pos, *attr.value, context, "while evaluating the 'fromPath' attribute passed to builtins.fetchClosure"); } @@ -27,7 +27,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value * * arg state.forceValue(*attr.value, attr.pos); toCA = true; if (attr.value->type() != nString || attr.value->string.s != std::string("")) { - PathSet context; + NixStringContext context; toPath = state.coerceToStorePath(attr.pos, *attr.value, context, "while evaluating the 'toPath' attribute passed to builtins.fetchClosure"); } @@ -114,8 +114,7 @@ static void prim_fetchClosure(EvalState & state, const PosIdx pos, Value * * arg }); } - auto toPathS = state.store->printStorePath(*toPath); - v.mkString(toPathS, {toPathS}); + state.mkStorePathString(*toPath, v); } static RegisterPrimOp primop_fetchClosure({ diff --git a/src/libexpr/primops/fetchMercurial.cc b/src/libexpr/primops/fetchMercurial.cc index c41bd60b6..2c0d98e74 100644 --- a/src/libexpr/primops/fetchMercurial.cc +++ b/src/libexpr/primops/fetchMercurial.cc @@ -13,7 +13,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value * * a std::optional rev; std::optional ref; std::string_view name = "source"; - PathSet context; + NixStringContext context; state.forceValue(*args[0], pos); @@ -73,8 +73,7 @@ static void prim_fetchMercurial(EvalState & state, const PosIdx pos, Value * * a auto [tree, input2] = input.fetch(state.store); auto attrs2 = state.buildBindings(8); - auto storePath = state.store->printStorePath(tree.storePath); - attrs2.alloc(state.sOutPath).mkString(storePath, {storePath}); + state.mkStorePathString(tree.storePath, attrs2.alloc(state.sOutPath)); if (input2.getRef()) attrs2.alloc("branch").mkString(*input2.getRef()); // Backward compatibility: set 'rev' to diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 2e924c302..cd7039025 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -4,6 +4,7 @@ #include "fetchers.hh" #include "filetransfer.hh" #include "registry.hh" +#include "url.hh" #include #include @@ -23,9 +24,8 @@ void emitTreeAttrs( auto attrs = state.buildBindings(8); - auto storePath = state.store->printStorePath(tree.storePath); - attrs.alloc(state.sOutPath).mkString(storePath, {storePath}); + state.mkStorePathString(tree.storePath, attrs.alloc(state.sOutPath)); // FIXME: support arbitrary input attributes. @@ -68,7 +68,16 @@ void emitTreeAttrs( std::string fixURI(std::string uri, EvalState & state, const std::string & defaultScheme = "file") { state.checkURI(uri); - return uri.find("://") != std::string::npos ? uri : defaultScheme + "://" + uri; + if (uri.find("://") == std::string::npos) { + const auto p = ParsedURL { + .scheme = defaultScheme, + .authority = "", + .path = uri + }; + return p.to_string(); + } else { + return uri; + } } std::string fixURIForGit(std::string uri, EvalState & state) @@ -97,7 +106,7 @@ static void fetchTree( const FetchTreeParams & params = FetchTreeParams{} ) { fetchers::Input input; - PathSet context; + NixStringContext context; state.forceValue(*args[0], pos); @@ -180,7 +189,7 @@ static void fetchTree( static void prim_fetchTree(EvalState & state, const PosIdx pos, Value * * args, Value & v) { - settings.requireExperimentalFeature(Xp::Flakes); + experimentalFeatureSettings.require(Xp::Flakes); fetchTree(state, pos, args, v, std::nullopt, FetchTreeParams { .allowNameArgument = false }); } @@ -233,10 +242,15 @@ static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v // early exit if pinned and already in the store if (expectedHash && expectedHash->type == htSHA256) { - auto expectedPath = - unpack - ? state.store->makeFixedOutputPath(FileIngestionMethod::Recursive, *expectedHash, name, {}) - : state.store->makeFixedOutputPath(FileIngestionMethod::Flat, *expectedHash, name, {}); + auto expectedPath = state.store->makeFixedOutputPath( + name, + FixedOutputInfo { + .hash = { + .method = unpack ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat, + .hash = *expectedHash, + }, + .references = {} + }); if (state.store->isValidPath(expectedPath)) { state.allowAndSetStorePathString(expectedPath, v); @@ -343,36 +357,44 @@ static RegisterPrimOp primop_fetchGit({ of the repo at that URL is fetched. Otherwise, it can be an attribute with the following attributes (all except `url` optional): - - url\ - The URL of the repo. + - `url` - - name\ - The name of the directory the repo should be exported to in the - store. Defaults to the basename of the URL. + The URL of the repo. - - rev\ - The git revision to fetch. Defaults to the tip of `ref`. + - `name` (default: *basename of the URL*) - - ref\ - The git ref to look for the requested revision under. This is - often a branch or tag name. Defaults to `HEAD`. + The name of the directory the repo should be exported to in the store. - By default, the `ref` value is prefixed with `refs/heads/`. As - of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts - with `refs/`. + - `rev` (default: *the tip of `ref`*) - - submodules\ - A Boolean parameter that specifies whether submodules should be - checked out. Defaults to `false`. + The [Git revision] to fetch. + This is typically a commit hash. - - shallow\ - A Boolean parameter that specifies whether fetching a shallow clone - is allowed. Defaults to `false`. + [Git revision]: https://git-scm.com/docs/git-rev-parse#_specifying_revisions - - allRefs\ - Whether to fetch all refs of the repository. With this argument being - true, it's possible to load a `rev` from *any* `ref` (by default only - `rev`s from the specified `ref` are supported). + - `ref` (default: `HEAD`) + + The [Git reference] under which to look for the requested revision. + This is often a branch or tag name. + + [Git reference]: https://git-scm.com/book/en/v2/Git-Internals-Git-References + + By default, the `ref` value is prefixed with `refs/heads/`. + As of 2.3.0, Nix will not prefix `refs/heads/` if `ref` starts with `refs/`. + + - `submodules` (default: `false`) + + A Boolean parameter that specifies whether submodules should be checked out. + + - `shallow` (default: `false`) + + A Boolean parameter that specifies whether fetching a shallow clone is allowed. + + - `allRefs` + + Whether to fetch all references of the repository. + With this argument being true, it's possible to load a `rev` from *any* `ref` + (by default only `rev`s from the specified `ref` are supported). Here are some examples of how to use `fetchGit`. @@ -463,10 +485,10 @@ static RegisterPrimOp primop_fetchGit({ builtins.fetchGit ./work-dir ``` - If the URL points to a local directory, and no `ref` or `rev` is - given, `fetchGit` will use the current content of the checked-out - files, even if they are not committed or added to Git's index. It will - only consider files added to the Git repository, as listed by `git ls-files`. + If the URL points to a local directory, and no `ref` or `rev` is + given, `fetchGit` will use the current content of the checked-out + files, even if they are not committed or added to Git's index. It will + only consider files added to the Git repository, as listed by `git ls-files`. )", .fun = prim_fetchGit, }); diff --git a/src/libexpr/print.cc b/src/libexpr/print.cc new file mode 100644 index 000000000..d08672cfc --- /dev/null +++ b/src/libexpr/print.cc @@ -0,0 +1,78 @@ +#include "print.hh" + +namespace nix { + +std::ostream & +printLiteralString(std::ostream & str, const std::string_view string) +{ + str << "\""; + for (auto i = string.begin(); i != string.end(); ++i) { + if (*i == '\"' || *i == '\\') str << "\\" << *i; + else if (*i == '\n') str << "\\n"; + else if (*i == '\r') str << "\\r"; + else if (*i == '\t') str << "\\t"; + else if (*i == '$' && *(i+1) == '{') str << "\\" << *i; + else str << *i; + } + str << "\""; + return str; +} + +std::ostream & +printLiteralBool(std::ostream & str, bool boolean) +{ + str << (boolean ? "true" : "false"); + return str; +} + +std::ostream & +printIdentifier(std::ostream & str, std::string_view s) { + if (s.empty()) + str << "\"\""; + else if (s == "if") // FIXME: handle other keywords + str << '"' << s << '"'; + else { + char c = s[0]; + if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) { + printLiteralString(str, s); + return str; + } + for (auto c : s) + if (!((c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '_' || c == '\'' || c == '-')) { + printLiteralString(str, s); + return str; + } + str << s; + } + return str; +} + +// FIXME: keywords +static bool isVarName(std::string_view s) +{ + if (s.size() == 0) return false; + char c = s[0]; + if ((c >= '0' && c <= '9') || c == '-' || c == '\'') return false; + for (auto & i : s) + if (!((i >= 'a' && i <= 'z') || + (i >= 'A' && i <= 'Z') || + (i >= '0' && i <= '9') || + i == '_' || i == '-' || i == '\'')) + return false; + return true; +} + +std::ostream & +printAttributeName(std::ostream & str, std::string_view name) { + if (isVarName(name)) + str << name; + else + printLiteralString(str, name); + return str; +} + + +} diff --git a/src/libexpr/print.hh b/src/libexpr/print.hh new file mode 100644 index 000000000..f9cfc3964 --- /dev/null +++ b/src/libexpr/print.hh @@ -0,0 +1,48 @@ +#pragma once +/** + * @file + * @brief Common printing functions for the Nix language + * + * While most types come with their own methods for printing, they share some + * functions that are placed here. + */ + +#include + +namespace nix { + /** + * Print a string as a Nix string literal. + * + * Quotes and fairly minimal escaping are added. + * + * @param s The logical string + */ + std::ostream & printLiteralString(std::ostream & o, std::string_view s); + inline std::ostream & printLiteralString(std::ostream & o, const char * s) { + return printLiteralString(o, std::string_view(s)); + } + inline std::ostream & printLiteralString(std::ostream & o, const std::string & s) { + return printLiteralString(o, std::string_view(s)); + } + + /** Print `true` or `false`. */ + std::ostream & printLiteralBool(std::ostream & o, bool b); + + /** + * Print a string as an attribute name in the Nix expression language syntax. + * + * Prints a quoted string if necessary. + */ + std::ostream & printAttributeName(std::ostream & o, std::string_view s); + + /** + * Print a string as an identifier in the Nix expression language syntax. + * + * FIXME: "identifier" is ambiguous. Identifiers do not have a single + * textual representation. They can be used in variable references, + * let bindings, left-hand sides or attribute names in a select + * expression, or something else entirely, like JSON. Use one of the + * `print*` functions instead. + */ + std::ostream & printIdentifier(std::ostream & o, std::string_view s); +} diff --git a/src/libexpr/symbol-table.hh b/src/libexpr/symbol-table.hh index 288c15602..967a186dd 100644 --- a/src/libexpr/symbol-table.hh +++ b/src/libexpr/symbol-table.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -9,15 +10,11 @@ namespace nix { -/* Symbol table used by the parser and evaluator to represent and look - up identifiers and attributes efficiently. SymbolTable::create() - converts a string into a symbol. Symbols have the property that - they can be compared efficiently (using an equality test), - because the symbol table stores only one copy of each string. */ - -/* This class mainly exists to give us an operator<< for ostreams. We could also - return plain strings from SymbolTable, but then we'd have to wrap every - instance of a symbol that is fmt()ed, which is inconvenient and error-prone. */ +/** + * This class mainly exists to give us an operator<< for ostreams. We could also + * return plain strings from SymbolTable, but then we'd have to wrap every + * instance of a symbol that is fmt()ed, which is inconvenient and error-prone. + */ class SymbolStr { friend class SymbolTable; @@ -46,6 +43,11 @@ public: friend std::ostream & operator <<(std::ostream & os, const SymbolStr & symbol); }; +/** + * Symbols have the property that they can be compared efficiently + * (using an equality test), because the symbol table stores only one + * copy of each string. + */ class Symbol { friend class SymbolTable; @@ -65,6 +67,10 @@ public: bool operator!=(const Symbol other) const { return id != other.id; } }; +/** + * Symbol table used by the parser and evaluator to represent and look + * up identifiers and attributes efficiently. + */ class SymbolTable { private: @@ -73,6 +79,9 @@ private: public: + /** + * converts a string into a symbol. + */ Symbol create(std::string_view s) { // Most symbols are looked up more than once, so we trade off insertion performance diff --git a/src/libexpr/tests/json.cc b/src/libexpr/tests/json.cc index 411bc0ac3..7586bdd9b 100644 --- a/src/libexpr/tests/json.cc +++ b/src/libexpr/tests/json.cc @@ -8,7 +8,7 @@ namespace nix { protected: std::string getJSONValue(Value& value) { std::stringstream ss; - PathSet ps; + NixStringContext ps; printValueAsJSON(state, true, value, noPos, ss, ps); return ss.str(); } diff --git a/src/libexpr/tests/libexpr.hh b/src/libexpr/tests/libexpr.hh index 8534d9567..b8e65aafe 100644 --- a/src/libexpr/tests/libexpr.hh +++ b/src/libexpr/tests/libexpr.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include #include @@ -25,7 +28,7 @@ namespace nix { } Value eval(std::string input, bool forceValue = true) { Value v; - Expr * e = state.parseExprFromString(input, ""); + Expr * e = state.parseExprFromString(input, state.rootPath(CanonPath::root)); assert(e); state.eval(e, v); if (forceValue) diff --git a/src/libexpr/tests/local.mk b/src/libexpr/tests/local.mk index 3e5504f71..331a5ead6 100644 --- a/src/libexpr/tests/local.mk +++ b/src/libexpr/tests/local.mk @@ -12,7 +12,7 @@ libexpr-tests_SOURCES := \ $(wildcard $(d)/*.cc) \ $(wildcard $(d)/value/*.cc) -libexpr-tests_CXXFLAGS += -I src/libexpr -I src/libutil -I src/libstore -I src/libexpr/tests +libexpr-tests_CXXFLAGS += -I src/libexpr -I src/libutil -I src/libstore -I src/libexpr/tests -I src/libfetchers libexpr-tests_LIBS = libstore-tests libutils-tests libexpr libutil libstore libfetchers diff --git a/src/libexpr/tests/primops.cc b/src/libexpr/tests/primops.cc index e1d3ac503..ce3b5d11f 100644 --- a/src/libexpr/tests/primops.cc +++ b/src/libexpr/tests/primops.cc @@ -15,8 +15,8 @@ namespace nix { return oss.str(); } - void log(Verbosity lvl, const FormatOrString & fs) override { - oss << fs.s << std::endl; + void log(Verbosity lvl, std::string_view s) override { + oss << s << std::endl; } void logEI(const ErrorInfo & ei) override { diff --git a/src/libexpr/tests/value/context.cc b/src/libexpr/tests/value/context.cc index 083359b7a..27d6920b0 100644 --- a/src/libexpr/tests/value/context.cc +++ b/src/libexpr/tests/value/context.cc @@ -8,69 +8,62 @@ namespace nix { -// Testing of trivial expressions -struct NixStringContextElemTest : public LibExprTest { - const Store & store() const { - return *LibExprTest::store; - } -}; - -TEST_F(NixStringContextElemTest, empty_invalid) { +TEST(NixStringContextElemTest, empty_invalid) { EXPECT_THROW( - NixStringContextElem::parse(store(), ""), + NixStringContextElem::parse(""), BadNixStringContextElem); } -TEST_F(NixStringContextElemTest, single_bang_invalid) { +TEST(NixStringContextElemTest, single_bang_invalid) { EXPECT_THROW( - NixStringContextElem::parse(store(), "!"), + NixStringContextElem::parse("!"), BadNixStringContextElem); } -TEST_F(NixStringContextElemTest, double_bang_invalid) { +TEST(NixStringContextElemTest, double_bang_invalid) { EXPECT_THROW( - NixStringContextElem::parse(store(), "!!/"), + NixStringContextElem::parse("!!/"), BadStorePath); } -TEST_F(NixStringContextElemTest, eq_slash_invalid) { +TEST(NixStringContextElemTest, eq_slash_invalid) { EXPECT_THROW( - NixStringContextElem::parse(store(), "=/"), + NixStringContextElem::parse("=/"), BadStorePath); } -TEST_F(NixStringContextElemTest, slash_invalid) { +TEST(NixStringContextElemTest, slash_invalid) { EXPECT_THROW( - NixStringContextElem::parse(store(), "/"), + NixStringContextElem::parse("/"), BadStorePath); } -TEST_F(NixStringContextElemTest, opaque) { - std::string_view opaque = "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x"; - auto elem = NixStringContextElem::parse(store(), opaque); +TEST(NixStringContextElemTest, opaque) { + std::string_view opaque = "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x"; + auto elem = NixStringContextElem::parse(opaque); auto * p = std::get_if(&elem); ASSERT_TRUE(p); - ASSERT_EQ(p->path, store().parseStorePath(opaque)); - ASSERT_EQ(elem.to_string(store()), opaque); + ASSERT_EQ(p->path, StorePath { opaque }); + ASSERT_EQ(elem.to_string(), opaque); } -TEST_F(NixStringContextElemTest, drvDeep) { - std::string_view drvDeep = "=/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; - auto elem = NixStringContextElem::parse(store(), drvDeep); +TEST(NixStringContextElemTest, drvDeep) { + std::string_view drvDeep = "=g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; + auto elem = NixStringContextElem::parse(drvDeep); auto * p = std::get_if(&elem); ASSERT_TRUE(p); - ASSERT_EQ(p->drvPath, store().parseStorePath(drvDeep.substr(1))); - ASSERT_EQ(elem.to_string(store()), drvDeep); + ASSERT_EQ(p->drvPath, StorePath { drvDeep.substr(1) }); + ASSERT_EQ(elem.to_string(), drvDeep); } -TEST_F(NixStringContextElemTest, built) { - std::string_view built = "!foo!/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; - auto elem = NixStringContextElem::parse(store(), built); +TEST(NixStringContextElemTest, built) { + std::string_view built = "!foo!g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-x.drv"; + auto elem = NixStringContextElem::parse(built); auto * p = std::get_if(&elem); ASSERT_TRUE(p); ASSERT_EQ(p->output, "foo"); - ASSERT_EQ(p->drvPath, store().parseStorePath(built.substr(5))); - ASSERT_EQ(elem.to_string(store()), built); + ASSERT_EQ(p->drvPath, StorePath { built.substr(5) }); + ASSERT_EQ(elem.to_string(), built); } } @@ -116,12 +109,12 @@ Gen Arbitrary::arbitrary() namespace nix { -RC_GTEST_FIXTURE_PROP( +RC_GTEST_PROP( NixStringContextElemTest, prop_round_rip, (const NixStringContextElem & o)) { - RC_ASSERT(o == NixStringContextElem::parse(store(), o.to_string(store()))); + RC_ASSERT(o == NixStringContextElem::parse(o.to_string())); } } diff --git a/src/libexpr/tests/value/context.hh b/src/libexpr/tests/value/context.hh index 54d21760e..c0bc97ba3 100644 --- a/src/libexpr/tests/value/context.hh +++ b/src/libexpr/tests/value/context.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index c35c876e3..4996a5bde 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -11,7 +11,7 @@ namespace nix { using json = nlohmann::json; json printValueAsJSON(EvalState & state, bool strict, - Value & v, const PosIdx pos, PathSet & context, bool copyToStore) + Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore) { checkInterrupt(); @@ -36,9 +36,10 @@ json printValueAsJSON(EvalState & state, bool strict, case nPath: if (copyToStore) - out = state.store->printStorePath(state.copyPathToStore(context, v.path)); + out = state.store->printStorePath( + state.copyPathToStore(context, v.path())); else - out = v.path; + out = v.path().path.abs(); break; case nNull: @@ -94,13 +95,13 @@ json printValueAsJSON(EvalState & state, bool strict, } void printValueAsJSON(EvalState & state, bool strict, - Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore) + Value & v, const PosIdx pos, std::ostream & str, NixStringContext & context, bool copyToStore) { str << printValueAsJSON(state, strict, v, pos, context, copyToStore); } json ExternalValueBase::printValueAsJSON(EvalState & state, bool strict, - PathSet & context, bool copyToStore) const + NixStringContext & context, bool copyToStore) const { state.debugThrowLastTrace(TypeError("cannot convert %1% to JSON", showType())); } diff --git a/src/libexpr/value-to-json.hh b/src/libexpr/value-to-json.hh index 22f26b790..47ac90313 100644 --- a/src/libexpr/value-to-json.hh +++ b/src/libexpr/value-to-json.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "nixexpr.hh" #include "eval.hh" @@ -10,9 +11,9 @@ namespace nix { nlohmann::json printValueAsJSON(EvalState & state, bool strict, - Value & v, const PosIdx pos, PathSet & context, bool copyToStore = true); + Value & v, const PosIdx pos, NixStringContext & context, bool copyToStore = true); void printValueAsJSON(EvalState & state, bool strict, - Value & v, const PosIdx pos, std::ostream & str, PathSet & context, bool copyToStore = true); + Value & v, const PosIdx pos, std::ostream & str, NixStringContext & context, bool copyToStore = true); } diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index 3f6222768..2539ad1c1 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -18,21 +18,21 @@ static XMLAttrs singletonAttrs(const std::string & name, const std::string & val static void printValueAsXML(EvalState & state, bool strict, bool location, - Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen, + Value & v, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen, const PosIdx pos); static void posToXML(EvalState & state, XMLAttrs & xmlAttrs, const Pos & pos) { - if (auto path = std::get_if(&pos.origin)) - xmlAttrs["path"] = *path; - xmlAttrs["line"] = (format("%1%") % pos.line).str(); - xmlAttrs["column"] = (format("%1%") % pos.column).str(); + if (auto path = std::get_if(&pos.origin)) + xmlAttrs["path"] = path->path.abs(); + xmlAttrs["line"] = fmt("%1%", pos.line); + xmlAttrs["column"] = fmt("%1%", pos.column); } static void showAttrs(EvalState & state, bool strict, bool location, - Bindings & attrs, XMLWriter & doc, PathSet & context, PathSet & drvsSeen) + Bindings & attrs, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen) { StringSet names; @@ -54,7 +54,7 @@ static void showAttrs(EvalState & state, bool strict, bool location, static void printValueAsXML(EvalState & state, bool strict, bool location, - Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen, + Value & v, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen, const PosIdx pos) { checkInterrupt(); @@ -64,7 +64,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location, switch (v.type()) { case nInt: - doc.writeEmptyElement("int", singletonAttrs("value", (format("%1%") % v.integer).str())); + doc.writeEmptyElement("int", singletonAttrs("value", fmt("%1%", v.integer))); break; case nBool: @@ -78,7 +78,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location, break; case nPath: - doc.writeEmptyElement("path", singletonAttrs("value", v.path)); + doc.writeEmptyElement("path", singletonAttrs("value", v.path().to_string())); break; case nNull: @@ -156,7 +156,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location, break; case nFloat: - doc.writeEmptyElement("float", singletonAttrs("value", (format("%1%") % v.fpoint).str())); + doc.writeEmptyElement("float", singletonAttrs("value", fmt("%1%", v.fpoint))); break; case nThunk: @@ -166,7 +166,7 @@ static void printValueAsXML(EvalState & state, bool strict, bool location, void ExternalValueBase::printValueAsXML(EvalState & state, bool strict, - bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen, + bool location, XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen, const PosIdx pos) const { doc.writeEmptyElement("unevaluated"); @@ -174,7 +174,7 @@ void ExternalValueBase::printValueAsXML(EvalState & state, bool strict, void printValueAsXML(EvalState & state, bool strict, bool location, - Value & v, std::ostream & out, PathSet & context, const PosIdx pos) + Value & v, std::ostream & out, NixStringContext & context, const PosIdx pos) { XMLWriter doc(true, out); XMLOpenElement root(doc, "expr"); diff --git a/src/libexpr/value-to-xml.hh b/src/libexpr/value-to-xml.hh index 506f32b6b..6d702c0f2 100644 --- a/src/libexpr/value-to-xml.hh +++ b/src/libexpr/value-to-xml.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "nixexpr.hh" #include "eval.hh" @@ -9,6 +10,6 @@ namespace nix { void printValueAsXML(EvalState & state, bool strict, bool location, - Value & v, std::ostream & out, PathSet & context, const PosIdx pos); + Value & v, std::ostream & out, NixStringContext & context, const PosIdx pos); } diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh index 508dbe218..89c0c36fd 100644 --- a/src/libexpr/value.hh +++ b/src/libexpr/value.hh @@ -1,9 +1,11 @@ #pragma once +///@file #include #include "symbol-table.hh" #include "value/context.hh" +#include "input-accessor.hh" #if HAVE_BOEHMGC #include @@ -35,9 +37,11 @@ typedef enum { tFloat } InternalType; -// This type abstracts over all actual value types in the language, -// grouping together implementation details like tList*, different function -// types, and types in non-normal form (so thunks and co.) +/** + * This type abstracts over all actual value types in the language, + * grouping together implementation details like tList*, different function + * types, and types in non-normal form (so thunks and co.) + */ typedef enum { nThunk, nInt, @@ -69,40 +73,53 @@ class XMLWriter; typedef int64_t NixInt; typedef double NixFloat; -/* External values must descend from ExternalValueBase, so that +/** + * External values must descend from ExternalValueBase, so that * type-agnostic nix functions (e.g. showType) can be implemented */ class ExternalValueBase { friend std::ostream & operator << (std::ostream & str, const ExternalValueBase & v); protected: - /* Print out the value */ + /** + * Print out the value + */ virtual std::ostream & print(std::ostream & str) const = 0; public: - /* Return a simple string describing the type */ + /** + * Return a simple string describing the type + */ virtual std::string showType() const = 0; - /* Return a string to be used in builtins.typeOf */ + /** + * Return a string to be used in builtins.typeOf + */ virtual std::string typeOf() const = 0; - /* Coerce the value to a string. Defaults to uncoercable, i.e. throws an + /** + * Coerce the value to a string. Defaults to uncoercable, i.e. throws an * error. */ - virtual std::string coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const; + virtual std::string coerceToString(const Pos & pos, NixStringContext & context, bool copyMore, bool copyToStore) const; - /* Compare to another value of the same type. Defaults to uncomparable, + /** + * Compare to another value of the same type. Defaults to uncomparable, * i.e. always false. */ virtual bool operator ==(const ExternalValueBase & b) const; - /* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */ + /** + * Print the value as JSON. Defaults to unconvertable, i.e. throws an error + */ virtual nlohmann::json printValueAsJSON(EvalState & state, bool strict, - PathSet & context, bool copyToStore = true) const; + NixStringContext & context, bool copyToStore = true) const; - /* Print the value as XML. Defaults to unevaluated */ + /** + * Print the value as XML. Defaults to unevaluated + */ virtual void printValueAsXML(EvalState & state, bool strict, bool location, - XMLWriter & doc, PathSet & context, PathSet & drvsSeen, + XMLWriter & doc, NixStringContext & context, PathSet & drvsSeen, const PosIdx pos) const; virtual ~ExternalValueBase() @@ -145,32 +162,34 @@ public: NixInt integer; bool boolean; - /* Strings in the evaluator carry a so-called `context' which - is a list of strings representing store paths. This is to - allow users to write things like + /** + * Strings in the evaluator carry a so-called `context` which + * is a list of strings representing store paths. This is to + * allow users to write things like - "--with-freetype2-library=" + freetype + "/lib" + * "--with-freetype2-library=" + freetype + "/lib" - where `freetype' is a derivation (or a source to be copied - to the store). If we just concatenated the strings without - keeping track of the referenced store paths, then if the - string is used as a derivation attribute, the derivation - will not have the correct dependencies in its inputDrvs and - inputSrcs. + * where `freetype` is a derivation (or a source to be copied + * to the store). If we just concatenated the strings without + * keeping track of the referenced store paths, then if the + * string is used as a derivation attribute, the derivation + * will not have the correct dependencies in its inputDrvs and + * inputSrcs. - The semantics of the context is as follows: when a string - with context C is used as a derivation attribute, then the - derivations in C will be added to the inputDrvs of the - derivation, and the other store paths in C will be added to - the inputSrcs of the derivations. + * The semantics of the context is as follows: when a string + * with context C is used as a derivation attribute, then the + * derivations in C will be added to the inputDrvs of the + * derivation, and the other store paths in C will be added to + * the inputSrcs of the derivations. - For canonicity, the store paths should be in sorted order. */ + * For canonicity, the store paths should be in sorted order. + */ struct { const char * s; const char * * context; // must be in sorted order } string; - const char * path; + const char * _path; Bindings * attrs; struct { size_t size; @@ -196,8 +215,10 @@ public: NixFloat fpoint; }; - // Returns the normal type of a Value. This only returns nThunk if the - // Value hasn't been forceValue'd + /** + * Returns the normal type of a Value. This only returns nThunk if + * the Value hasn't been forceValue'd + */ inline ValueType type() const { switch (internalType) { @@ -216,8 +237,10 @@ public: abort(); } - /* After overwriting an app node, be sure to clear pointers in the - Value to ensure that the target isn't kept alive unnecessarily. */ + /** + * After overwriting an app node, be sure to clear pointers in the + * Value to ensure that the target isn't kept alive unnecessarily. + */ inline void clearValue() { app.left = app.right = 0; @@ -246,19 +269,24 @@ public: void mkString(std::string_view s); - void mkString(std::string_view s, const PathSet & context); + void mkString(std::string_view s, const NixStringContext & context); - void mkStringMove(const char * s, const PathSet & context); + void mkStringMove(const char * s, const NixStringContext & context); - inline void mkPath(const char * s) + inline void mkString(const Symbol & s) + { + mkString(((const std::string &) s).c_str()); + } + + void mkPath(const SourcePath & path); + + inline void mkPath(const char * path) { clearValue(); internalType = tPath; - path = s; + _path = path; } - void mkPath(std::string_view s); - inline void mkNull() { clearValue(); @@ -365,13 +393,13 @@ public: PosIdx determinePos(const PosIdx pos) const; - /* Check whether forcing this value requires a trivial amount of - computation. In particular, function applications are - non-trivial. */ + /** + * Check whether forcing this value requires a trivial amount of + * computation. In particular, function applications are + * non-trivial. + */ bool isTrivial() const; - NixStringContext getContext(const Store &); - auto listItems() { struct ListIterable @@ -399,6 +427,18 @@ public: auto begin = listElems(); return ConstListIterable { begin, begin + listSize() }; } + + SourcePath path() const + { + assert(internalType == tPath); + return SourcePath{CanonPath(_path)}; + } + + std::string_view str() const + { + assert(internalType == tString); + return std::string_view(string.s); + } }; @@ -413,7 +453,9 @@ typedef std::map ValueVectorMap; #endif -/* A value allocated in traceable memory. */ +/** + * A value allocated in traceable memory. + */ typedef std::shared_ptr RootValue; RootValue allocRootValue(Value * v); diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index 61d9c53df..f76fc76e4 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -1,11 +1,10 @@ #include "value/context.hh" -#include "store-api.hh" #include namespace nix { -NixStringContextElem NixStringContextElem::parse(const Store & store, std::string_view s0) +NixStringContextElem NixStringContextElem::parse(std::string_view s0) { std::string_view s = s0; @@ -25,41 +24,41 @@ NixStringContextElem NixStringContextElem::parse(const Store & store, std::strin "String content element beginning with '!' should have a second '!'"); } return NixStringContextElem::Built { - .drvPath = store.parseStorePath(s.substr(index + 1)), + .drvPath = StorePath { s.substr(index + 1) }, .output = std::string(s.substr(0, index)), }; } case '=': { return NixStringContextElem::DrvDeep { - .drvPath = store.parseStorePath(s.substr(1)), + .drvPath = StorePath { s.substr(1) }, }; } default: { return NixStringContextElem::Opaque { - .path = store.parseStorePath(s), + .path = StorePath { s }, }; } } } -std::string NixStringContextElem::to_string(const Store & store) const { +std::string NixStringContextElem::to_string() const { return std::visit(overloaded { [&](const NixStringContextElem::Built & b) { std::string res; res += '!'; res += b.output; res += '!'; - res += store.printStorePath(b.drvPath); + res += b.drvPath.to_string(); return res; }, [&](const NixStringContextElem::DrvDeep & d) { std::string res; res += '='; - res += store.printStorePath(d.drvPath); + res += d.drvPath.to_string(); return res; }, [&](const NixStringContextElem::Opaque & o) { - return store.printStorePath(o.path); + return std::string { o.path.to_string() }; }, }, raw()); } diff --git a/src/libexpr/value/context.hh b/src/libexpr/value/context.hh index 721563cba..287ae08a9 100644 --- a/src/libexpr/value/context.hh +++ b/src/libexpr/value/context.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "util.hh" #include "comparator.hh" @@ -25,36 +26,37 @@ public: } }; -class Store; - -/* Plain opaque path to some store object. - - Encoded as just the path: ‘’. -*/ +/** + * Plain opaque path to some store object. + * + * Encoded as just the path: ‘’. + */ struct NixStringContextElem_Opaque { StorePath path; GENERATE_CMP(NixStringContextElem_Opaque, me->path); }; -/* Path to a derivation and its entire build closure. - - The path doesn't just refer to derivation itself and its closure, but - also all outputs of all derivations in that closure (including the - root derivation). - - Encoded in the form ‘=’. -*/ +/** + * Path to a derivation and its entire build closure. + * + * The path doesn't just refer to derivation itself and its closure, but + * also all outputs of all derivations in that closure (including the + * root derivation). + * + * Encoded in the form ‘=’. + */ struct NixStringContextElem_DrvDeep { StorePath drvPath; GENERATE_CMP(NixStringContextElem_DrvDeep, me->drvPath); }; -/* Derivation output. - - Encoded in the form ‘!!’. -*/ +/** + * Derivation output. + * + * Encoded in the form ‘!!’. + */ struct NixStringContextElem_Built { StorePath drvPath; std::string output; @@ -76,22 +78,26 @@ struct NixStringContextElem : _NixStringContextElem_Raw { using DrvDeep = NixStringContextElem_DrvDeep; using Built = NixStringContextElem_Built; - inline const Raw & raw() const { + inline const Raw & raw() const & { return static_cast(*this); } - inline Raw & raw() { + inline Raw & raw() & { return static_cast(*this); } + inline Raw && raw() && { + return static_cast(*this); + } - /* Decode a context string, one of: - - ‘’ - - ‘=’ - - ‘!!’ - */ - static NixStringContextElem parse(const Store & store, std::string_view s); - std::string to_string(const Store & store) const; + /** + * Decode a context string, one of: + * - ‘’ + * - ‘=’ + * - ‘!!’ + */ + static NixStringContextElem parse(std::string_view s); + std::string to_string() const; }; -typedef std::vector NixStringContext; +typedef std::set NixStringContext; } diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh index e41037633..1a14bb023 100644 --- a/src/libfetchers/attrs.hh +++ b/src/libfetchers/attrs.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" diff --git a/src/libfetchers/cache.hh b/src/libfetchers/cache.hh index 3763ee2a6..ae398d040 100644 --- a/src/libfetchers/cache.hh +++ b/src/libfetchers/cache.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "fetchers.hh" diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh index f33cbdcfc..6108a179c 100644 --- a/src/libfetchers/fetch-settings.hh +++ b/src/libfetchers/fetch-settings.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "config.hh" @@ -57,7 +58,7 @@ struct FetchSettings : public Config ``` This example specifies three tokens, one each for accessing - github.com, gitlab.mycompany.com, and sourceforge.net. + github.com, gitlab.mycompany.com, and gitlab.com. The `input.foo` uses the "gitlab" fetcher, which might requires specifying the token type along with the token @@ -75,21 +76,25 @@ struct FetchSettings : public Config Path or URI of the global flake registry. When empty, disables the global flake registry. - )"}; + )", + {}, true, Xp::Flakes}; Setting useRegistries{this, true, "use-registries", - "Whether to use flake registries to resolve flake references."}; + "Whether to use flake registries to resolve flake references.", + {}, true, Xp::Flakes}; Setting acceptFlakeConfig{this, false, "accept-flake-config", - "Whether to accept nix configuration from a flake without prompting."}; + "Whether to accept nix configuration from a flake without prompting.", + {}, true, Xp::Flakes}; Setting commitLockFileSummary{ this, "", "commit-lockfile-summary", R"( The commit summary to use when committing changed flake lock files. If empty, the summary is generated based on the action performed. - )"}; + )", + {}, true, Xp::Flakes}; }; // FIXME: don't use a global variable. diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index c767e72e5..91db3a9eb 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -210,7 +210,13 @@ StorePath Input::computeStorePath(Store & store) const auto narHash = getNarHash(); if (!narHash) throw Error("cannot compute store path for unlocked input '%s'", to_string()); - return store.makeFixedOutputPath(FileIngestionMethod::Recursive, *narHash, getName()); + return store.makeFixedOutputPath(getName(), FixedOutputInfo { + .hash = { + .method = FileIngestionMethod::Recursive, + .hash = *narHash, + }, + .references = {}, + }); } std::string Input::getType() const diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh index 17da37f47..498ad7e4d 100644 --- a/src/libfetchers/fetchers.hh +++ b/src/libfetchers/fetchers.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "hash.hh" @@ -20,14 +21,14 @@ struct Tree struct InputScheme; -/* The Input object is generated by a specific fetcher, based on the +/** + * The Input object is generated by a specific fetcher, based on the * user-supplied input attribute in the flake.nix file, and contains * the information that the specific fetcher needs to perform the * actual fetch. The Input object is most commonly created via the * "fromURL()" or "fromAttrs()" static functions which are provided * the url or attrset specified in the flake file. */ - struct Input { friend struct InputScheme; @@ -37,7 +38,9 @@ struct Input bool locked = false; bool direct = true; - /* path of the parent of this input, used for relative path resolution */ + /** + * path of the parent of this input, used for relative path resolution + */ std::optional parent; public: @@ -55,22 +58,35 @@ public: Attrs toAttrs() const; - /* Check whether this is a "direct" input, that is, not - one that goes through a registry. */ + /** + * Check whether this is a "direct" input, that is, not + * one that goes through a registry. + */ bool isDirect() const { return direct; } - /* Check whether this is a "locked" input, that is, - one that contains a commit hash or content hash. */ + /** + * Check whether this is a "locked" input, that is, + * one that contains a commit hash or content hash. + */ bool isLocked() const { return locked; } + /** + * Check whether the input carries all necessary info required + * for cache insertion and substitution. + * These fields are used to uniquely identify cached trees + * within the "tarball TTL" window without necessarily + * indicating that the input's origin is unchanged. + */ bool hasAllInfo() const; bool operator ==(const Input & other) const; bool contains(const Input & other) const; - /* Fetch the input into the Nix store, returning the location in - the Nix store and the locked input. */ + /** + * Fetch the input into the Nix store, returning the location in + * the Nix store and the locked input. + */ std::pair fetch(ref store) const; Input applyOverrides( @@ -99,7 +115,8 @@ public: }; -/* The InputScheme represents a type of fetcher. Each fetcher +/** + * The InputScheme represents a type of fetcher. Each fetcher * registers with nix at startup time. When processing an input for a * flake, each scheme is given an opportunity to "recognize" that * input from the url or attributes in the flake file's specification diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index aeb2d4914..b80b48800 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -267,7 +267,7 @@ struct GitInputScheme : InputScheme for (auto & [name, value] : url.query) { if (name == "rev" || name == "ref") attrs.emplace(name, value); - else if (name == "shallow" || name == "submodules") + else if (name == "shallow" || name == "submodules" || name == "allRefs") attrs.emplace(name, Explicit { value == "1" }); else url2.query.emplace(name, value); diff --git a/src/libfetchers/input-accessor.cc b/src/libfetchers/input-accessor.cc new file mode 100644 index 000000000..f9909c218 --- /dev/null +++ b/src/libfetchers/input-accessor.cc @@ -0,0 +1,100 @@ +#include "input-accessor.hh" +#include "store-api.hh" + +namespace nix { + +std::ostream & operator << (std::ostream & str, const SourcePath & path) +{ + str << path.to_string(); + return str; +} + +std::string_view SourcePath::baseName() const +{ + return path.baseName().value_or("source"); +} + +SourcePath SourcePath::parent() const +{ + auto p = path.parent(); + assert(p); + return std::move(*p); +} + +InputAccessor::Stat SourcePath::lstat() const +{ + auto st = nix::lstat(path.abs()); + return InputAccessor::Stat { + .type = + S_ISREG(st.st_mode) ? InputAccessor::tRegular : + S_ISDIR(st.st_mode) ? InputAccessor::tDirectory : + S_ISLNK(st.st_mode) ? InputAccessor::tSymlink : + InputAccessor::tMisc, + .isExecutable = S_ISREG(st.st_mode) && st.st_mode & S_IXUSR + }; +} + +std::optional SourcePath::maybeLstat() const +{ + // FIXME: merge these into one operation. + if (!pathExists()) + return {}; + return lstat(); +} + +InputAccessor::DirEntries SourcePath::readDirectory() const +{ + InputAccessor::DirEntries res; + for (auto & entry : nix::readDirectory(path.abs())) { + std::optional type; + switch (entry.type) { + case DT_REG: type = InputAccessor::Type::tRegular; break; + case DT_LNK: type = InputAccessor::Type::tSymlink; break; + case DT_DIR: type = InputAccessor::Type::tDirectory; break; + } + res.emplace(entry.name, type); + } + return res; +} + +StorePath SourcePath::fetchToStore( + ref store, + std::string_view name, + PathFilter * filter, + RepairFlag repair) const +{ + return + settings.readOnlyMode + ? store->computeStorePathForPath(name, path.abs(), FileIngestionMethod::Recursive, htSHA256, filter ? *filter : defaultPathFilter).first + : store->addToStore(name, path.abs(), FileIngestionMethod::Recursive, htSHA256, filter ? *filter : defaultPathFilter, repair); +} + +SourcePath SourcePath::resolveSymlinks() const +{ + SourcePath res(CanonPath::root); + + int linksAllowed = 1024; + + for (auto & component : path) { + res.path.push(component); + while (true) { + if (auto st = res.maybeLstat()) { + if (!linksAllowed--) + throw Error("infinite symlink recursion in path '%s'", path); + if (st->type != InputAccessor::tSymlink) break; + auto target = res.readLink(); + if (hasPrefix(target, "/")) + res = CanonPath(target); + else { + res.path.pop(); + res.path.extend(CanonPath(target)); + } + } else + break; + } + } + + return res; +} + +} diff --git a/src/libfetchers/input-accessor.hh b/src/libfetchers/input-accessor.hh new file mode 100644 index 000000000..5a2f17f62 --- /dev/null +++ b/src/libfetchers/input-accessor.hh @@ -0,0 +1,167 @@ +#pragma once + +#include "ref.hh" +#include "types.hh" +#include "archive.hh" +#include "canon-path.hh" +#include "repair-flag.hh" + +namespace nix { + +class StorePath; +class Store; + +struct InputAccessor +{ + enum Type { + tRegular, tSymlink, tDirectory, + /** + Any other node types that may be encountered on the file system, such as device nodes, sockets, named pipe, and possibly even more exotic things. + + Responsible for `"unknown"` from `builtins.readFileType "/dev/null"`. + + Unlike `DT_UNKNOWN`, this must not be used for deferring the lookup of types. + */ + tMisc + }; + + struct Stat + { + Type type = tMisc; + //uint64_t fileSize = 0; // regular files only + bool isExecutable = false; // regular files only + }; + + typedef std::optional DirEntry; + + typedef std::map DirEntries; +}; + +/** + * An abstraction for accessing source files during + * evaluation. Currently, it's just a wrapper around `CanonPath` that + * accesses files in the regular filesystem, but in the future it will + * support fetching files in other ways. + */ +struct SourcePath +{ + CanonPath path; + + SourcePath(CanonPath path) + : path(std::move(path)) + { } + + std::string_view baseName() const; + + /** + * Construct the parent of this `SourcePath`. Aborts if `this` + * denotes the root. + */ + SourcePath parent() const; + + /** + * If this `SourcePath` denotes a regular file (not a symlink), + * return its contents; otherwise throw an error. + */ + std::string readFile() const + { return nix::readFile(path.abs()); } + + /** + * Return whether this `SourcePath` denotes a file (of any type) + * that exists + */ + bool pathExists() const + { return nix::pathExists(path.abs()); } + + /** + * Return stats about this `SourcePath`, or throw an exception if + * it doesn't exist. + */ + InputAccessor::Stat lstat() const; + + /** + * Return stats about this `SourcePath`, or std::nullopt if it + * doesn't exist. + */ + std::optional maybeLstat() const; + + /** + * If this `SourcePath` denotes a directory (not a symlink), + * return its directory entries; otherwise throw an error. + */ + InputAccessor::DirEntries readDirectory() const; + + /** + * If this `SourcePath` denotes a symlink, return its target; + * otherwise throw an error. + */ + std::string readLink() const + { return nix::readLink(path.abs()); } + + /** + * Dump this `SourcePath` to `sink` as a NAR archive. + */ + void dumpPath( + Sink & sink, + PathFilter & filter = defaultPathFilter) const + { return nix::dumpPath(path.abs(), sink, filter); } + + /** + * Copy this `SourcePath` to the Nix store. + */ + StorePath fetchToStore( + ref store, + std::string_view name = "source", + PathFilter * filter = nullptr, + RepairFlag repair = NoRepair) const; + + /** + * Return the location of this path in the "real" filesystem, if + * it has a physical location. + */ + std::optional getPhysicalPath() const + { return path; } + + std::string to_string() const + { return path.abs(); } + + /** + * Append a `CanonPath` to this path. + */ + SourcePath operator + (const CanonPath & x) const + { return {path + x}; } + + /** + * Append a single component `c` to this path. `c` must not + * contain a slash. A slash is implicitly added between this path + * and `c`. + */ + SourcePath operator + (std::string_view c) const + { return {path + c}; } + + bool operator == (const SourcePath & x) const + { + return path == x.path; + } + + bool operator != (const SourcePath & x) const + { + return path != x.path; + } + + bool operator < (const SourcePath & x) const + { + return path < x.path; + } + + /** + * Resolve any symlinks in this `SourcePath` (including its + * parents). The result is a `SourcePath` in which no element is a + * symlink. + */ + SourcePath resolveSymlinks() const; +}; + +std::ostream & operator << (std::ostream & str, const SourcePath & path); + +} diff --git a/src/libfetchers/registry.hh b/src/libfetchers/registry.hh index 260a2c460..f57ab1e6b 100644 --- a/src/libfetchers/registry.hh +++ b/src/libfetchers/registry.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "fetchers.hh" diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc index e9686262a..96fe5faca 100644 --- a/src/libfetchers/tarball.cc +++ b/src/libfetchers/tarball.cc @@ -71,15 +71,19 @@ DownloadFileResult downloadFile( dumpString(res.data, sink); auto hash = hashString(htSHA256, res.data); ValidPathInfo info { - store->makeFixedOutputPath(FileIngestionMethod::Flat, hash, name), + *store, + name, + FixedOutputInfo { + .hash = { + .method = FileIngestionMethod::Flat, + .hash = hash, + }, + .references = {}, + }, hashString(htSHA256, sink.s), }; info.narSize = sink.s.size(); - info.ca = FixedOutputHash { - .method = FileIngestionMethod::Flat, - .hash = hash, - }; - auto source = StringSource(sink.s); + auto source = StringSource { sink.s }; store->addToStore(info, source, NoRepair, NoCheckSigs); storePath = std::move(info.path); } diff --git a/src/libmain/common-args.hh b/src/libmain/common-args.hh index f180d83ce..c35406c3b 100644 --- a/src/libmain/common-args.hh +++ b/src/libmain/common-args.hh @@ -1,6 +1,8 @@ #pragma once +///@file #include "args.hh" +#include "repair-flag.hh" namespace nix { @@ -48,4 +50,21 @@ struct MixJSON : virtual Args } }; +struct MixRepair : virtual Args +{ + RepairFlag repair = NoRepair; + + MixRepair() + { + addFlag({ + .longName = "repair", + .description = + "During evaluation, rewrite missing or corrupted files in the Nix store. " + "During building, rebuild missing or corrupted store paths.", + .category = miscCategory, + .handler = {&repair, Repair}, + }); + } +}; + } diff --git a/src/libmain/loggers.hh b/src/libmain/loggers.hh index f3c759193..e5721420c 100644 --- a/src/libmain/loggers.hh +++ b/src/libmain/loggers.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index e9205a5e5..6600ec177 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -72,6 +72,7 @@ private: uint64_t corruptedPaths = 0, untrustedPaths = 0; bool active = true; + bool paused = false; bool haveUpdate = true; }; @@ -120,16 +121,28 @@ public: updateThread.join(); } + void pause() override { + state_.lock()->paused = true; + writeToStderr("\r\e[K"); + } + + void resume() override { + state_.lock()->paused = false; + writeToStderr("\r\e[K"); + state_.lock()->haveUpdate = true; + updateCV.notify_one(); + } + bool isVerbose() override { return printBuildLogs; } - void log(Verbosity lvl, const FormatOrString & fs) override + void log(Verbosity lvl, std::string_view s) override { if (lvl > verbosity) return; auto state(state_.lock()); - log(*state, lvl, fs.s); + log(*state, lvl, s); } void logEI(const ErrorInfo & ei) override @@ -142,7 +155,7 @@ public: log(*state, ei.level, oss.str()); } - void log(State & state, Verbosity lvl, const std::string & s) + void log(State & state, Verbosity lvl, std::string_view s) { if (state.active) { writeToStderr("\r\e[K" + filterANSIEscapes(s, !isTTY) + ANSI_NORMAL "\n"); @@ -339,7 +352,7 @@ public: auto nextWakeup = std::chrono::milliseconds::max(); state.haveUpdate = false; - if (!state.active) return nextWakeup; + if (state.paused || !state.active) return nextWakeup; std::string line; diff --git a/src/libmain/progress-bar.hh b/src/libmain/progress-bar.hh index 3a76f8448..c3c6e3833 100644 --- a/src/libmain/progress-bar.hh +++ b/src/libmain/progress-bar.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "logging.hh" diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index f5f4008ff..eb3be7a52 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -10,7 +10,6 @@ #include #include #include -#include #include #include @@ -20,16 +19,9 @@ #ifdef __linux__ #include #endif -#ifdef __GLIBC__ -#include -#include -#include -#endif #include -#include - namespace nix { @@ -84,8 +76,18 @@ void printMissing(ref store, const StorePathSet & willBuild, downloadSizeMiB, narSizeMiB); } - for (auto & i : willSubstitute) - printMsg(lvl, " %s", store->printStorePath(i)); + std::vector willSubstituteSorted = {}; + std::for_each(willSubstitute.begin(), willSubstitute.end(), + [&](const StorePath &p) { willSubstituteSorted.push_back(&p); }); + std::sort(willSubstituteSorted.begin(), willSubstituteSorted.end(), + [](const StorePath *lhs, const StorePath *rhs) { + if (lhs->name() == rhs->name()) + return lhs->to_string() < rhs->to_string(); + else + return lhs->name() < rhs->name(); + }); + for (auto p : willSubstituteSorted) + printMsg(lvl, " %s", store->printStorePath(*p)); } if (!unknown.empty()) { @@ -105,57 +107,6 @@ std::string getArg(const std::string & opt, return *i; } - -#if OPENSSL_VERSION_NUMBER < 0x10101000L -/* OpenSSL is not thread-safe by default - it will randomly crash - unless the user supplies a mutex locking function. So let's do - that. */ -static std::vector opensslLocks; - -static void opensslLockCallback(int mode, int type, const char * file, int line) -{ - if (mode & CRYPTO_LOCK) - opensslLocks[type].lock(); - else - opensslLocks[type].unlock(); -} -#endif - -static std::once_flag dns_resolve_flag; - -static void preloadNSS() { - /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of - one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already - been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to - load its lookup libraries in the parent before any child gets a chance to. */ - std::call_once(dns_resolve_flag, []() { -#ifdef __GLIBC__ - /* On linux, glibc will run every lookup through the nss layer. - * That means every lookup goes, by default, through nscd, which acts as a local - * cache. - * Because we run builds in a sandbox, we also remove access to nscd otherwise - * lookups would leak into the sandbox. - * - * But now we have a new problem, we need to make sure the nss_dns backend that - * does the dns lookups when nscd is not available is loaded or available. - * - * We can't make it available without leaking nix's environment, so instead we'll - * load the backend, and configure nss so it does not try to run dns lookups - * through nscd. - * - * This is technically only used for builtins:fetch* functions so we only care - * about dns. - * - * All other platforms are unaffected. - */ - if (!dlopen(LIBNSS_DNS_SO, RTLD_NOW)) - warn("unable to load nss_dns backend"); - // FIXME: get hosts entry from nsswitch.conf. - __nss_configure_lookup("hosts", "files dns"); -#endif - }); -} - static void sigHandler(int signo) { } @@ -167,16 +118,7 @@ void initNix() std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf)); #endif -#if OPENSSL_VERSION_NUMBER < 0x10101000L - /* Initialise OpenSSL locking. */ - opensslLocks = std::vector(CRYPTO_num_locks()); - CRYPTO_set_locking_callback(opensslLockCallback); -#endif - - if (sodium_init() == -1) - throw Error("could not initialise libsodium"); - - loadConfFile(); + initLibStore(); startSignalHandlerThread(); @@ -213,7 +155,10 @@ void initNix() if (sigaction(SIGTRAP, &act, 0)) throw SysError("handling SIGTRAP"); #endif - /* Register a SIGSEGV handler to detect stack overflows. */ + /* Register a SIGSEGV handler to detect stack overflows. + Why not initLibExpr()? initGC() is essentially that, but + detectStackOverflow is not an instance of the init function concept, as + it may have to be invoked more than once per process. */ detectStackOverflow(); /* There is no privacy in the Nix system ;-) At least not for @@ -226,16 +171,6 @@ void initNix() gettimeofday(&tv, 0); srandom(tv.tv_usec); - /* On macOS, don't use the per-session TMPDIR (as set e.g. by - sshd). This breaks build users because they don't have access - to the TMPDIR, in particular in ‘nix-store --serve’. */ -#if __APPLE__ - if (hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/")) - unsetenv("TMPDIR"); -#endif - - preloadNSS(); - initLibStore(); } @@ -347,7 +282,7 @@ void parseCmdLine(const std::string & programName, const Strings & args, void printVersion(const std::string & programName) { - std::cout << format("%1% (Nix Super) %2%") % programName % nixVersion << std::endl; + std::cout << fmt("%1% (Nix Super) %2%", programName, nixVersion) << std::endl; if (verbosity > lvlInfo) { Strings cfg; #if HAVE_BOEHMGC diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 1715374a6..7a9e83c6c 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "util.hh" #include "args.hh" @@ -24,7 +25,9 @@ public: int handleExceptions(const std::string & programName, std::function fun); -/* Don't forget to call initPlugins() after settings are initialized! */ +/** + * Don't forget to call initPlugins() after settings are initialized! + */ void initNix(); void parseCmdLine(int argc, char * * argv, @@ -35,7 +38,9 @@ void parseCmdLine(const std::string & programName, const Strings & args, void printVersion(const std::string & programName); -/* Ugh. No better place to put this. */ +/** + * Ugh. No better place to put this. + */ void printGCWarning(); class Store; @@ -74,11 +79,15 @@ struct LegacyArgs : public MixCommonArgs }; -/* Show the manual page for the specified program. */ +/** + * Show the manual page for the specified program. + */ void showManPage(const std::string & name); -/* The constructor of this class starts a pager if stdout is a - terminal and $PAGER is set. Stdout is redirected to the pager. */ +/** + * The constructor of this class starts a pager if stdout is a + * terminal and $PAGER is set. Stdout is redirected to the pager. + */ class RunPager { public: @@ -109,28 +118,34 @@ struct PrintFreed }; -/* Install a SIGSEGV handler to detect stack overflows. */ +/** + * Install a SIGSEGV handler to detect stack overflows. + */ void detectStackOverflow(); -/* Pluggable behavior to run in case of a stack overflow. - - Default value: defaultStackOverflowHandler. - - This is called by the handler installed by detectStackOverflow(). - - This gives Nix library consumers a limit opportunity to report the error - condition. The handler should exit the process. - See defaultStackOverflowHandler() for a reference implementation. - - NOTE: Use with diligence, because this runs in the signal handler, with very - limited stack space and a potentially a corrupted heap, all while the failed - thread is blocked indefinitely. All functions called must be reentrant. */ +/** + * Pluggable behavior to run in case of a stack overflow. + * + * Default value: defaultStackOverflowHandler. + * + * This is called by the handler installed by detectStackOverflow(). + * + * This gives Nix library consumers a limit opportunity to report the error + * condition. The handler should exit the process. + * See defaultStackOverflowHandler() for a reference implementation. + * + * NOTE: Use with diligence, because this runs in the signal handler, with very + * limited stack space and a potentially a corrupted heap, all while the failed + * thread is blocked indefinitely. All functions called must be reentrant. + */ extern std::function stackOverflowHandler; -/* The default, robust implementation of stackOverflowHandler. - - Prints an error message directly to stderr using a syscall instead of the - logger. Exits the process immediately after. */ +/** + * The default, robust implementation of stackOverflowHandler. + * + * Prints an error message directly to stderr using a syscall instead of the + * logger. Exits the process immediately after. + */ void defaultStackOverflowHandler(siginfo_t * info, void * ctx); } diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 751cf8c30..fcd763a9d 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -306,11 +306,22 @@ StorePath BinaryCacheStore::addToStoreFromDump(Source & dump, std::string_view n unsupported("addToStoreFromDump"); return addToStoreCommon(dump, repair, CheckSigs, [&](HashResult nar) { ValidPathInfo info { - makeFixedOutputPath(method, nar.first, name, references), + *this, + name, + FixedOutputInfo { + .hash = { + .method = method, + .hash = nar.first, + }, + .references = { + .others = references, + // caller is not capable of creating a self-reference, because this is content-addressed without modulus + .self = false, + }, + }, nar.first, }; info.narSize = nar.second; - info.references = references; return info; })->path; } @@ -414,15 +425,22 @@ StorePath BinaryCacheStore::addToStore( }); return addToStoreCommon(*source, repair, CheckSigs, [&](HashResult nar) { ValidPathInfo info { - makeFixedOutputPath(method, h, name, references), + *this, + name, + FixedOutputInfo { + .hash = { + .method = method, + .hash = h, + }, + .references = { + .others = references, + // caller is not capable of creating a self-reference, because this is content-addressed without modulus + .self = false, + }, + }, nar.first, }; info.narSize = nar.second; - info.references = references; - info.ca = FixedOutputHash { - .method = method, - .hash = h, - }; return info; })->path; } @@ -434,7 +452,7 @@ StorePath BinaryCacheStore::addTextToStore( RepairFlag repair) { auto textHash = hashString(htSHA256, s); - auto path = makeTextPath(name, textHash, references); + auto path = makeTextPath(name, TextInfo { { textHash }, references }); if (!repair && isValidPath(path)) return path; @@ -443,10 +461,16 @@ StorePath BinaryCacheStore::addTextToStore( dumpString(s, sink); StringSource source(sink.s); return addToStoreCommon(source, repair, CheckSigs, [&](HashResult nar) { - ValidPathInfo info { path, nar.first }; + ValidPathInfo info { + *this, + std::string { name }, + TextInfo { + { .hash = textHash }, + references, + }, + nar.first, + }; info.narSize = nar.second; - info.ca = TextHash { textHash }; - info.references = references; return info; })->path; } diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh index abd92a83c..49f271d24 100644 --- a/src/libstore/binary-cache-store.hh +++ b/src/libstore/binary-cache-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "crypto.hh" #include "store-api.hh" @@ -16,19 +17,40 @@ struct BinaryCacheStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; - const Setting compression{(StoreConfig*) this, "xz", "compression", "NAR compression method ('xz', 'bzip2', 'gzip', 'zstd', or 'none')"}; - const Setting writeNARListing{(StoreConfig*) this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"}; - const Setting writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"}; - const Setting secretKeyFile{(StoreConfig*) this, "", "secret-key", "path to secret key used to sign the binary cache"}; - const Setting localNarCache{(StoreConfig*) this, "", "local-nar-cache", "path to a local cache of NARs"}; + const Setting compression{(StoreConfig*) this, "xz", "compression", + "NAR compression method (`xz`, `bzip2`, `gzip`, `zstd`, or `none`)."}; + + const Setting writeNARListing{(StoreConfig*) this, false, "write-nar-listing", + "Whether to write a JSON file that lists the files in each NAR."}; + + const Setting writeDebugInfo{(StoreConfig*) this, false, "index-debug-info", + R"( + Whether to index DWARF debug info files by build ID. This allows [`dwarffs`](https://github.com/edolstra/dwarffs) to + fetch debug info on demand + )"}; + + const Setting secretKeyFile{(StoreConfig*) this, "", "secret-key", + "Path to the secret key used to sign the binary cache."}; + + const Setting localNarCache{(StoreConfig*) this, "", "local-nar-cache", + "Path to a local cache of NARs fetched from this binary cache, used by commands such as `nix store cat`."}; + const Setting parallelCompression{(StoreConfig*) this, false, "parallel-compression", - "enable multi-threading compression for NARs, available for xz and zstd only currently"}; + "Enable multi-threaded compression of NARs. This is currently only available for `xz` and `zstd`."}; + const Setting compressionLevel{(StoreConfig*) this, -1, "compression-level", - "specify 'preset level' of compression to be used with NARs: " - "meaning and accepted range of values depends on compression method selected, " - "other than -1 which we reserve to indicate Nix defaults should be used"}; + R"( + The *preset level* to be used when compressing NARs. + The meaning and accepted values depend on the compression method selected. + `-1` specifies that the default compression level should be used. + )"}; }; + +/** + * @note subclasses must implement at least one of the two + * virtual getFile() methods. + */ class BinaryCacheStore : public virtual BinaryCacheStoreConfig, public virtual Store, public virtual LogStore @@ -58,14 +80,15 @@ public: std::string && data, const std::string & mimeType); - /* Note: subclasses must implement at least one of the two - following getFile() methods. */ - - /* Dump the contents of the specified file to a sink. */ + /** + * Dump the contents of the specified file to a sink. + */ virtual void getFile(const std::string & path, Sink & sink); - /* Fetch the specified file and call the specified callback with - the result. A subclass may implement this asynchronously. */ + /** + * Fetch the specified file and call the specified callback with + * the result. A subclass may implement this asynchronously. + */ virtual void getFile( const std::string & path, Callback> callback) noexcept; diff --git a/src/libstore/build-result.hh b/src/libstore/build-result.hh index a5749cf33..b7a56e791 100644 --- a/src/libstore/build-result.hh +++ b/src/libstore/build-result.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "realisation.hh" #include "derived-path.hh" @@ -11,9 +12,12 @@ namespace nix { struct BuildResult { - /* Note: don't remove status codes, and only add new status codes - at the end of the list, to prevent client/server - incompatibilities in the nix-store --serve protocol. */ + /** + * @note This is directly used in the nix-store --serve protocol. + * That means we need to worry about compatability across versions. + * Therefore, don't remove status codes, and only add new status + * codes at the end of the list. + */ enum Status { Built = 0, Substituted, @@ -21,8 +25,10 @@ struct BuildResult PermanentFailure, InputRejected, OutputRejected, - TransientFailure, // possibly transient - CachedFailure, // no longer used + /// possibly transient + TransientFailure, + /// no longer used + CachedFailure, TimedOut, MiscFailure, DependencyFailed, @@ -32,7 +38,12 @@ struct BuildResult NoSubstituters, } status = MiscFailure; - // FIXME: include entire ErrorInfo object. + /** + * Information about the error if the build failed. + * + * @todo This should be an entire ErrorInfo object, not just a + * string, for richer information. + */ std::string errorMsg; std::string toString() const { @@ -52,33 +63,41 @@ struct BuildResult case LogLimitExceeded: return "LogLimitExceeded"; case NotDeterministic: return "NotDeterministic"; case ResolvesToAlreadyValid: return "ResolvesToAlreadyValid"; + case NoSubstituters: return "NoSubstituters"; default: return "Unknown"; }; }(); return strStatus + ((errorMsg == "") ? "" : " : " + errorMsg); } - /* How many times this build was performed. */ + /** + * How many times this build was performed. + */ unsigned int timesBuilt = 0; - /* If timesBuilt > 1, whether some builds did not produce the same - result. (Note that 'isNonDeterministic = false' does not mean - the build is deterministic, just that we don't have evidence of - non-determinism.) */ + /** + * If timesBuilt > 1, whether some builds did not produce the same + * result. (Note that 'isNonDeterministic = false' does not mean + * the build is deterministic, just that we don't have evidence of + * non-determinism.) + */ bool isNonDeterministic = false; - /* The derivation we built or the store path we substituted. */ - DerivedPath path; + /** + * For derivations, a mapping from the names of the wanted outputs + * to actual paths. + */ + SingleDrvOutputs builtOutputs; - /* For derivations, a mapping from the names of the wanted outputs - to actual paths. */ - DrvOutputs builtOutputs; - - /* The start/stop times of the build (or one of the rounds, if it - was repeated). */ + /** + * The start/stop times of the build (or one of the rounds, if it + * was repeated). + */ time_t startTime = 0, stopTime = 0; - /* User and system CPU time the build took. */ + /** + * User and system CPU time the build took. + */ std::optional cpuUser, cpuSystem; bool success() @@ -92,4 +111,15 @@ struct BuildResult } }; +/** + * A `BuildResult` together with its "primary key". + */ +struct KeyedBuildResult : BuildResult +{ + /** + * The derivation we built or the store path we substituted. + */ + DerivedPath path; +}; + } diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc index 2021d0023..a4bb94b0e 100644 --- a/src/libstore/build/derivation-goal.cc +++ b/src/libstore/build/derivation-goal.cc @@ -145,8 +145,20 @@ void DerivationGoal::work() void DerivationGoal::addWantedOutputs(const OutputsSpec & outputs) { auto newWanted = wantedOutputs.union_(outputs); - if (!newWanted.isSubsetOf(wantedOutputs)) - needRestart = true; + switch (needRestart) { + case NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed: + if (!newWanted.isSubsetOf(wantedOutputs)) + needRestart = NeedRestartForMoreOutputs::OutputsAddedDoNeed; + break; + case NeedRestartForMoreOutputs::OutputsAddedDoNeed: + /* No need to check whether we added more outputs, because a + restart is already queued up. */ + break; + case NeedRestartForMoreOutputs::BuildInProgressWillNotNeed: + /* We are already building all outputs, so it doesn't matter if + we now want more. */ + break; + }; wantedOutputs = newWanted; } @@ -199,10 +211,10 @@ void DerivationGoal::haveDerivation() parsedDrv = std::make_unique(drvPath, *drv); if (!drv->type().hasKnownOutputPaths()) - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); if (!drv->type().isPure()) { - settings.requireExperimentalFeature(Xp::ImpureDerivations); + experimentalFeatureSettings.require(Xp::ImpureDerivations); for (auto & [outputName, output] : drv->outputs) { auto randomPath = StorePath::random(outputPathName(drv->name, outputName)); @@ -297,12 +309,29 @@ void DerivationGoal::outputsSubstitutionTried() In particular, it may be the case that the hole in the closure is an output of the current derivation, which causes a loop if retried. */ - if (nrIncompleteClosure > 0 && nrIncompleteClosure == nrFailed) retrySubstitution = true; + { + bool substitutionFailed = + nrIncompleteClosure > 0 && + nrIncompleteClosure == nrFailed; + switch (retrySubstitution) { + case RetrySubstitution::NoNeed: + if (substitutionFailed) + retrySubstitution = RetrySubstitution::YesNeed; + break; + case RetrySubstitution::YesNeed: + // Should not be able to reach this state from here. + assert(false); + break; + case RetrySubstitution::AlreadyRetried: + debug("substitution failed again, but we already retried once. Not retrying again."); + break; + } + } nrFailed = nrNoSubstituters = nrIncompleteClosure = 0; - if (needRestart) { - needRestart = false; + if (needRestart == NeedRestartForMoreOutputs::OutputsAddedDoNeed) { + needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed; haveDerivation(); return; } @@ -330,13 +359,17 @@ void DerivationGoal::outputsSubstitutionTried() produced using a substitute. So we have to build instead. */ void DerivationGoal::gaveUpOnSubstitution() { + /* At this point we are building all outputs, so if more are wanted there + is no need to restart. */ + needRestart = NeedRestartForMoreOutputs::BuildInProgressWillNotNeed; + /* The inputs must be built before we can build this goal. */ inputDrvOutputs.clear(); if (useDerivation) for (auto & i : dynamic_cast(drv.get())->inputDrvs) { /* Ensure that pure, non-fixed-output derivations don't depend on impure derivations. */ - if (settings.isExperimentalFeatureEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) { + if (experimentalFeatureSettings.isEnabled(Xp::ImpureDerivations) && drv->type().isPure() && !drv->type().isFixed()) { auto inputDrv = worker.evalStore.readDerivation(i.first); if (!inputDrv.type().isPure()) throw Error("pure derivation '%s' depends on impure derivation '%s'", @@ -451,8 +484,8 @@ void DerivationGoal::inputsRealised() return; } - if (retrySubstitution && !retriedSubstitution) { - retriedSubstitution = true; + if (retrySubstitution == RetrySubstitution::YesNeed) { + retrySubstitution = RetrySubstitution::AlreadyRetried; haveDerivation(); return; } @@ -477,7 +510,7 @@ void DerivationGoal::inputsRealised() ca.fixed /* Can optionally resolve if fixed, which is good for avoiding unnecessary rebuilds. */ - ? settings.isExperimentalFeatureEnabled(Xp::CaDerivations) + ? experimentalFeatureSettings.isEnabled(Xp::CaDerivations) /* Must resolve if floating and there are any inputs drvs. */ : true); @@ -488,7 +521,7 @@ void DerivationGoal::inputsRealised() }, drvType.raw()); if (resolveDrv && !fullDrv.inputDrvs.empty()) { - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); /* We are be able to resolve this derivation based on the now-known results of dependencies. If so, we become a @@ -570,8 +603,6 @@ void DerivationGoal::inputsRealised() build hook. */ state = &DerivationGoal::tryToBuild; worker.wakeUp(shared_from_this()); - - buildResult = BuildResult { .path = buildResult.path }; } void DerivationGoal::started() @@ -732,7 +763,7 @@ void replaceValidPath(const Path & storePath, const Path & tmpPath) tmpPath (the replacement), so we have to move it out of the way first. We'd better not be interrupted here, because if we're repairing (say) Glibc, we end up with a broken system. */ - Path oldPath = (format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str(); + Path oldPath = fmt("%1%.old-%2%-%3%", storePath, getpid(), random()); if (pathExists(storePath)) movePath(storePath, oldPath); @@ -911,7 +942,11 @@ void DerivationGoal::buildDone() msg += line; msg += "\n"; } - msg += fmt("For full logs, run '" ANSI_BOLD "nix log %s" ANSI_NORMAL "'.", + auto nixLogCommand = experimentalFeatureSettings.isEnabled(Xp::NixCommand) + ? "nix log" + : "nix-store -l"; + msg += fmt("For full logs, run '" ANSI_BOLD "%s %s" ANSI_NORMAL "'.", + nixLogCommand, worker.store.printStorePath(drvPath)); } @@ -978,7 +1013,7 @@ void DerivationGoal::resolvedFinished() auto resolvedDrv = *resolvedDrvGoal->drv; auto & resolvedResult = resolvedDrvGoal->buildResult; - DrvOutputs builtOutputs; + SingleDrvOutputs builtOutputs; if (resolvedResult.success()) { auto resolvedHashes = staticOutputHashes(worker.store, resolvedDrv); @@ -1004,7 +1039,7 @@ void DerivationGoal::resolvedFinished() worker.store.printStorePath(drvPath), wantedOutput); auto realisation = [&]{ - auto take1 = get(resolvedResult.builtOutputs, DrvOutput { *resolvedHash, wantedOutput }); + auto take1 = get(resolvedResult.builtOutputs, wantedOutput); if (take1) return *take1; /* The above `get` should work. But sateful tracking of @@ -1029,7 +1064,7 @@ void DerivationGoal::resolvedFinished() worker.store.registerDrvOutput(newRealisation); } outputPaths.insert(realisation.outPath); - builtOutputs.emplace(realisation.id, realisation); + builtOutputs.emplace(wantedOutput, realisation); } runPostBuildHook( @@ -1154,7 +1189,7 @@ HookReply DerivationGoal::tryBuildHook() } -DrvOutputs DerivationGoal::registerOutputs() +SingleDrvOutputs DerivationGoal::registerOutputs() { /* When using a build hook, the build hook can register the output as valid (by doing `nix-store --import'). If so we don't have @@ -1316,7 +1351,7 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap() } -std::pair DerivationGoal::checkPathValidity() +std::pair DerivationGoal::checkPathValidity() { if (!drv->type().isPure()) return { false, {} }; @@ -1329,7 +1364,7 @@ std::pair DerivationGoal::checkPathValidity() return static_cast(names); }, }, wantedOutputs.raw()); - DrvOutputs validOutputs; + SingleDrvOutputs validOutputs; for (auto & i : queryPartialDerivationOutputMap()) { auto initialOutput = get(initialOutputs, i.first); @@ -1352,7 +1387,7 @@ std::pair DerivationGoal::checkPathValidity() }; } auto drvOutput = DrvOutput{info.outputHash, i.first}; - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { if (auto real = worker.store.queryRealisation(drvOutput)) { info.known = { .path = real->outPath, @@ -1372,7 +1407,7 @@ std::pair DerivationGoal::checkPathValidity() } } if (info.wanted && info.known && info.known->isValid()) - validOutputs.emplace(drvOutput, Realisation { drvOutput, info.known->path }); + validOutputs.emplace(i.first, Realisation { drvOutput, info.known->path }); } // If we requested all the outputs, we are always fine. @@ -1396,7 +1431,7 @@ std::pair DerivationGoal::checkPathValidity() } -DrvOutputs DerivationGoal::assertPathValidity() +SingleDrvOutputs DerivationGoal::assertPathValidity() { auto [allValid, validOutputs] = checkPathValidity(); if (!allValid) @@ -1407,7 +1442,7 @@ DrvOutputs DerivationGoal::assertPathValidity() void DerivationGoal::done( BuildResult::Status status, - DrvOutputs builtOutputs, + SingleDrvOutputs builtOutputs, std::optional ex) { buildResult.status = status; @@ -1448,12 +1483,28 @@ void DerivationGoal::waiteeDone(GoalPtr waitee, ExitCode result) { Goal::waiteeDone(waitee, result); - if (waitee->buildResult.success()) - if (auto bfd = std::get_if(&waitee->buildResult.path)) - for (auto & [output, realisation] : waitee->buildResult.builtOutputs) + if (!useDerivation) return; + auto & fullDrv = *dynamic_cast(drv.get()); + + auto * dg = dynamic_cast(&*waitee); + if (!dg) return; + + auto outputs = fullDrv.inputDrvs.find(dg->drvPath); + if (outputs == fullDrv.inputDrvs.end()) return; + + for (auto & outputName : outputs->second) { + auto buildResult = dg->getBuildResult(DerivedPath::Built { + .drvPath = dg->drvPath, + .outputs = OutputsSpec::Names { outputName }, + }); + if (buildResult.success()) { + auto i = buildResult.builtOutputs.find(outputName); + if (i != buildResult.builtOutputs.end()) inputDrvOutputs.insert_or_assign( - { bfd->drvPath, output.outputName }, - realisation.outPath); + { dg->drvPath, outputName }, + i->second.outPath); + } + } } } diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh index 707e38b4b..7033b7a58 100644 --- a/src/libstore/build/derivation-goal.hh +++ b/src/libstore/build/derivation-goal.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "parsed-derivations.hh" #include "lock.hh" @@ -15,8 +16,10 @@ struct HookInstance; typedef enum {rpAccept, rpDecline, rpPostpone} HookReply; -/* Unless we are repairing, we don't both to test validity and just assume it, - so the choices are `Absent` or `Valid`. */ +/** + * Unless we are repairing, we don't both to test validity and just assume it, + * so the choices are `Absent` or `Valid`. + */ enum struct PathStatus { Corrupt, Absent, @@ -26,11 +29,15 @@ enum struct PathStatus { struct InitialOutputStatus { StorePath path; PathStatus status; - /* Valid in the store, and additionally non-corrupt if we are repairing */ + /** + * Valid in the store, and additionally non-corrupt if we are repairing + */ bool isValid() const { return status == PathStatus::Valid; } - /* Merely present, allowed to be corrupt */ + /** + * Merely present, allowed to be corrupt + */ bool isPresent() const { return status == PathStatus::Corrupt || status == PathStatus::Valid; @@ -45,59 +52,123 @@ struct InitialOutput { struct DerivationGoal : public Goal { - /* Whether to use an on-disk .drv file. */ + /** + * Whether to use an on-disk .drv file. + */ bool useDerivation; - /* The path of the derivation. */ + /** The path of the derivation. */ StorePath drvPath; - /* The goal for the corresponding resolved derivation */ + /** + * The goal for the corresponding resolved derivation + */ std::shared_ptr resolvedDrvGoal; - /* The specific outputs that we need to build. Empty means all of - them. */ + /** + * The specific outputs that we need to build. Empty means all of + * them. + */ OutputsSpec wantedOutputs; - /* Mapping from input derivations + output names to actual store - paths. This is filled in by waiteeDone() as each dependency - finishes, before inputsRealised() is reached, */ + /** + * Mapping from input derivations + output names to actual store + * paths. This is filled in by waiteeDone() as each dependency + * finishes, before inputsRealised() is reached. + */ std::map, StorePath> inputDrvOutputs; - /* Whether additional wanted outputs have been added. */ - bool needRestart = false; + /** + * See `needRestart`; just for that field. + */ + enum struct NeedRestartForMoreOutputs { + /** + * The goal state machine is progressing based on the current value of + * `wantedOutputs. No actions are needed. + */ + OutputsUnmodifedDontNeed, + /** + * `wantedOutputs` has been extended, but the state machine is + * proceeding according to its old value, so we need to restart. + */ + OutputsAddedDoNeed, + /** + * The goal state machine has progressed to the point of doing a build, + * in which case all outputs will be produced, so extensions to + * `wantedOutputs` no longer require a restart. + */ + BuildInProgressWillNotNeed, + }; - /* Whether to retry substituting the outputs after building the - inputs. This is done in case of an incomplete closure. */ - bool retrySubstitution = false; + /** + * Whether additional wanted outputs have been added. + */ + NeedRestartForMoreOutputs needRestart = NeedRestartForMoreOutputs::OutputsUnmodifedDontNeed; - /* Whether we've retried substitution, in which case we won't try - again. */ - bool retriedSubstitution = false; + /** + * See `retrySubstitution`; just for that field. + */ + enum RetrySubstitution { + /** + * No issues have yet arose, no need to restart. + */ + NoNeed, + /** + * Something failed and there is an incomplete closure. Let's retry + * substituting. + */ + YesNeed, + /** + * We are current or have already retried substitution, and whether or + * not something goes wrong we will not retry again. + */ + AlreadyRetried, + }; - /* The derivation stored at drvPath. */ + /** + * Whether to retry substituting the outputs after building the + * inputs. This is done in case of an incomplete closure. + */ + RetrySubstitution retrySubstitution = RetrySubstitution::NoNeed; + + /** + * The derivation stored at drvPath. + */ std::unique_ptr drv; std::unique_ptr parsedDrv; - /* The remainder is state held during the build. */ + /** + * The remainder is state held during the build. + */ - /* Locks on (fixed) output paths. */ + /** + * Locks on (fixed) output paths. + */ PathLocks outputLocks; - /* All input paths (that is, the union of FS closures of the - immediate input paths). */ + /** + * All input paths (that is, the union of FS closures of the + * immediate input paths). + */ StorePathSet inputPaths; std::map initialOutputs; - /* File descriptor for the log file. */ + /** + * File descriptor for the log file. + */ AutoCloseFD fdLogFile; std::shared_ptr logFileSink, logSink; - /* Number of bytes received from the builder's stdout/stderr. */ + /** + * Number of bytes received from the builder's stdout/stderr. + */ unsigned long logSize; - /* The most recent log lines. */ + /** + * The most recent log lines. + */ std::list logTail; std::string currentLogLine; @@ -105,10 +176,14 @@ struct DerivationGoal : public Goal std::string currentHookLine; - /* The build hook. */ + /** + * The build hook. + */ std::unique_ptr hook; - /* The sort of derivation we are building. */ + /** + * The sort of derivation we are building. + */ DerivationType derivationType; typedef void (DerivationGoal::*GoalState)(); @@ -120,12 +195,16 @@ struct DerivationGoal : public Goal std::unique_ptr act; - /* Activity that denotes waiting for a lock. */ + /** + * Activity that denotes waiting for a lock. + */ std::unique_ptr actLock; std::map builderActivities; - /* The remote machine on which we're building. */ + /** + * The remote machine on which we're building. + */ std::string machineName; DerivationGoal(const StorePath & drvPath, @@ -142,10 +221,14 @@ struct DerivationGoal : public Goal void work() override; - /* Add wanted outputs to an already existing derivation goal. */ + /** + * Add wanted outputs to an already existing derivation goal. + */ void addWantedOutputs(const OutputsSpec & outputs); - /* The states. */ + /** + * The states. + */ void getDerivation(); void loadDerivation(); void haveDerivation(); @@ -159,28 +242,42 @@ struct DerivationGoal : public Goal void resolvedFinished(); - /* Is the build hook willing to perform the build? */ + /** + * Is the build hook willing to perform the build? + */ HookReply tryBuildHook(); virtual int getChildStatus(); - /* Check that the derivation outputs all exist and register them - as valid. */ - virtual DrvOutputs registerOutputs(); + /** + * Check that the derivation outputs all exist and register them + * as valid. + */ + virtual SingleDrvOutputs registerOutputs(); - /* Open a log file and a pipe to it. */ + /** + * Open a log file and a pipe to it. + */ Path openLogFile(); - /* Sign the newly built realisation if the store allows it */ + /** + * Sign the newly built realisation if the store allows it + */ virtual void signRealisation(Realisation&) {} - /* Close the log file. */ + /** + * Close the log file. + */ void closeLogFile(); - /* Close the read side of the logger pipe. */ + /** + * Close the read side of the logger pipe. + */ virtual void closeReadPipes(); - /* Cleanup hooks for buildDone() */ + /** + * Cleanup hooks for buildDone() + */ virtual void cleanupHookFinally(); virtual void cleanupPreChildKill(); virtual void cleanupPostChildKill(); @@ -190,30 +287,40 @@ struct DerivationGoal : public Goal virtual bool isReadDesc(int fd); - /* Callback used by the worker to write to the log. */ + /** + * Callback used by the worker to write to the log. + */ void handleChildOutput(int fd, std::string_view data) override; void handleEOF(int fd) override; void flushLine(); - /* Wrappers around the corresponding Store methods that first consult the - derivation. This is currently needed because when there is no drv file - there also is no DB entry. */ + /** + * Wrappers around the corresponding Store methods that first consult the + * derivation. This is currently needed because when there is no drv file + * there also is no DB entry. + */ std::map> queryPartialDerivationOutputMap(); OutputPathMap queryDerivationOutputMap(); - /* Update 'initialOutputs' to determine the current status of the - outputs of the derivation. Also returns a Boolean denoting - whether all outputs are valid and non-corrupt, and a - 'DrvOutputs' structure containing the valid and wanted - outputs. */ - std::pair checkPathValidity(); + /** + * Update 'initialOutputs' to determine the current status of the + * outputs of the derivation. Also returns a Boolean denoting + * whether all outputs are valid and non-corrupt, and a + * 'SingleDrvOutputs' structure containing the valid and wanted + * outputs. + */ + std::pair checkPathValidity(); - /* Aborts if any output is not valid or corrupt, and otherwise - returns a 'DrvOutputs' structure containing the wanted - outputs. */ - DrvOutputs assertPathValidity(); + /** + * Aborts if any output is not valid or corrupt, and otherwise + * returns a 'SingleDrvOutputs' structure containing the wanted + * outputs. + */ + SingleDrvOutputs assertPathValidity(); - /* Forcibly kill the child process, if any. */ + /** + * Forcibly kill the child process, if any. + */ virtual void killChild(); void repairClosure(); @@ -222,7 +329,7 @@ struct DerivationGoal : public Goal void done( BuildResult::Status status, - DrvOutputs builtOutputs = {}, + SingleDrvOutputs builtOutputs = {}, std::optional ex = {}); void waiteeDone(GoalPtr waitee, ExitCode result) override; diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc index b7f7b5ab1..b30957c84 100644 --- a/src/libstore/build/drv-output-substitution-goal.cc +++ b/src/libstore/build/drv-output-substitution-goal.cc @@ -61,20 +61,25 @@ void DrvOutputSubstitutionGoal::tryNext() // FIXME: Make async // outputInfo = sub->queryRealisation(id); - outPipe.create(); - promise = decltype(promise)(); + + /* The callback of the curl download below can outlive `this` (if + some other error occurs), so it must not touch `this`. So put + the shared state in a separate refcounted object. */ + downloadState = std::make_shared(); + downloadState->outPipe.create(); sub->queryRealisation( - id, { [&](std::future> res) { + id, + { [downloadState(downloadState)](std::future> res) { try { - Finally updateStats([this]() { outPipe.writeSide.close(); }); - promise.set_value(res.get()); + Finally updateStats([&]() { downloadState->outPipe.writeSide.close(); }); + downloadState->promise.set_value(res.get()); } catch (...) { - promise.set_exception(std::current_exception()); + downloadState->promise.set_exception(std::current_exception()); } } }); - worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false); + worker.childStarted(shared_from_this(), {downloadState->outPipe.readSide.get()}, true, false); state = &DrvOutputSubstitutionGoal::realisationFetched; } @@ -84,7 +89,7 @@ void DrvOutputSubstitutionGoal::realisationFetched() worker.childTerminated(this); try { - outputInfo = promise.get_future().get(); + outputInfo = downloadState->promise.get_future().get(); } catch (std::exception & e) { printError(e.what()); substituterFailed = true; @@ -155,7 +160,7 @@ void DrvOutputSubstitutionGoal::work() void DrvOutputSubstitutionGoal::handleEOF(int fd) { - if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this()); + if (fd == downloadState->outPipe.readSide.get()) worker.wakeUp(shared_from_this()); } diff --git a/src/libstore/build/drv-output-substitution-goal.hh b/src/libstore/build/drv-output-substitution-goal.hh index 948dbda8f..697ddb283 100644 --- a/src/libstore/build/drv-output-substitution-goal.hh +++ b/src/libstore/build/drv-output-substitution-goal.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" #include "goal.hh" @@ -10,31 +11,47 @@ namespace nix { class Worker; -// Substitution of a derivation output. -// This is done in three steps: -// 1. Fetch the output info from a substituter -// 2. Substitute the corresponding output path -// 3. Register the output info +/** + * Substitution of a derivation output. + * This is done in three steps: + * 1. Fetch the output info from a substituter + * 2. Substitute the corresponding output path + * 3. Register the output info + */ class DrvOutputSubstitutionGoal : public Goal { -private: - // The drv output we're trying to substitue + + /** + * The drv output we're trying to substitue + */ DrvOutput id; - // The realisation corresponding to the given output id. - // Will be filled once we can get it. + /** + * The realisation corresponding to the given output id. + * Will be filled once we can get it. + */ std::shared_ptr outputInfo; - /* The remaining substituters. */ + /** + * The remaining substituters. + */ std::list> subs; - /* The current substituter. */ + /** + * The current substituter. + */ std::shared_ptr sub; - Pipe outPipe; - std::thread thr; - std::promise> promise; + struct DownloadState + { + Pipe outPipe; + std::promise> promise; + }; - /* Whether a substituter failed. */ + std::shared_ptr downloadState; + + /** + * Whether a substituter failed. + */ bool substituterFailed = false; public: diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc index 2925fe3ca..74eae0692 100644 --- a/src/libstore/build/entry-points.cc +++ b/src/libstore/build/entry-points.cc @@ -10,16 +10,8 @@ void Store::buildPaths(const std::vector & reqs, BuildMode buildMod Worker worker(*this, evalStore ? *evalStore : *this); Goals goals; - for (const auto & br : reqs) { - std::visit(overloaded { - [&](const DerivedPath::Built & bfd) { - goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode)); - }, - [&](const DerivedPath::Opaque & bo) { - goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair)); - }, - }, br.raw()); - } + for (auto & br : reqs) + goals.insert(worker.makeGoal(br, buildMode)); worker.run(goals); @@ -47,7 +39,7 @@ void Store::buildPaths(const std::vector & reqs, BuildMode buildMod } } -std::vector Store::buildPathsWithResults( +std::vector Store::buildPathsWithResults( const std::vector & reqs, BuildMode buildMode, std::shared_ptr evalStore) @@ -55,23 +47,23 @@ std::vector Store::buildPathsWithResults( Worker worker(*this, evalStore ? *evalStore : *this); Goals goals; - for (const auto & br : reqs) { - std::visit(overloaded { - [&](const DerivedPath::Built & bfd) { - goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode)); - }, - [&](const DerivedPath::Opaque & bo) { - goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair)); - }, - }, br.raw()); + std::vector> state; + + for (const auto & req : reqs) { + auto goal = worker.makeGoal(req, buildMode); + goals.insert(goal); + state.push_back({req, goal}); } worker.run(goals); - std::vector results; + std::vector results; - for (auto & i : goals) - results.push_back(i->buildResult); + for (auto & [req, goalPtr] : state) + results.emplace_back(KeyedBuildResult { + goalPtr->getBuildResult(req), + /* .path = */ req, + }); return results; } @@ -84,15 +76,14 @@ BuildResult Store::buildDerivation(const StorePath & drvPath, const BasicDerivat try { worker.run(Goals{goal}); - return goal->buildResult; + return goal->getBuildResult(DerivedPath::Built { + .drvPath = drvPath, + .outputs = OutputsSpec::All {}, + }); } catch (Error & e) { return BuildResult { .status = BuildResult::MiscFailure, .errorMsg = e.msg(), - .path = DerivedPath::Built { - .drvPath = drvPath, - .outputs = OutputsSpec::All { }, - }, }; }; } diff --git a/src/libstore/build/goal.cc b/src/libstore/build/goal.cc index 58e805f55..ca7097a68 100644 --- a/src/libstore/build/goal.cc +++ b/src/libstore/build/goal.cc @@ -11,6 +11,29 @@ bool CompareGoalPtrs::operator() (const GoalPtr & a, const GoalPtr & b) const { } +BuildResult Goal::getBuildResult(const DerivedPath & req) { + BuildResult res { buildResult }; + + if (auto pbp = std::get_if(&req)) { + auto & bp = *pbp; + + /* Because goals are in general shared between derived paths + that share the same derivation, we need to filter their + results to get back just the results we care about. + */ + + for (auto it = res.builtOutputs.begin(); it != res.builtOutputs.end();) { + if (bp.outputs.contains(it->first)) + ++it; + else + it = res.builtOutputs.erase(it); + } + } + + return res; +} + + void addToWeakGoals(WeakGoals & goals, GoalPtr p) { if (goals.find(p) != goals.end()) @@ -78,9 +101,9 @@ void Goal::amDone(ExitCode result, std::optional ex) } -void Goal::trace(const FormatOrString & fs) +void Goal::trace(std::string_view s) { - debug("%1%: %2%", name, fs.s); + debug("%1%: %2%", name, s); } } diff --git a/src/libstore/build/goal.hh b/src/libstore/build/goal.hh index 35121c5d9..c0e12a2ed 100644 --- a/src/libstore/build/goal.hh +++ b/src/libstore/build/goal.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "store-api.hh" @@ -6,11 +7,15 @@ namespace nix { -/* Forward definition. */ +/** + * Forward definition. + */ struct Goal; class Worker; -/* A pointer to a goal. */ +/** + * A pointer to a goal. + */ typedef std::shared_ptr GoalPtr; typedef std::weak_ptr WeakGoalPtr; @@ -18,53 +23,91 @@ struct CompareGoalPtrs { bool operator() (const GoalPtr & a, const GoalPtr & b) const; }; -/* Set of goals. */ +/** + * Set of goals. + */ typedef std::set Goals; typedef std::set> WeakGoals; -/* A map of paths to goals (and the other way around). */ +/** + * A map of paths to goals (and the other way around). + */ typedef std::map WeakGoalMap; struct Goal : public std::enable_shared_from_this { typedef enum {ecBusy, ecSuccess, ecFailed, ecNoSubstituters, ecIncompleteClosure} ExitCode; - /* Backlink to the worker. */ + /** + * Backlink to the worker. + */ Worker & worker; - /* Goals that this goal is waiting for. */ + /** + * Goals that this goal is waiting for. + */ Goals waitees; - /* Goals waiting for this one to finish. Must use weak pointers - here to prevent cycles. */ + /** + * Goals waiting for this one to finish. Must use weak pointers + * here to prevent cycles. + */ WeakGoals waiters; - /* Number of goals we are/were waiting for that have failed. */ + /** + * Number of goals we are/were waiting for that have failed. + */ size_t nrFailed = 0; - /* Number of substitution goals we are/were waiting for that - failed because there are no substituters. */ + /** + * Number of substitution goals we are/were waiting for that + * failed because there are no substituters. + */ size_t nrNoSubstituters = 0; - /* Number of substitution goals we are/were waiting for that - failed because they had unsubstitutable references. */ + /** + * Number of substitution goals we are/were waiting for that + * failed because they had unsubstitutable references. + */ size_t nrIncompleteClosure = 0; - /* Name of this goal for debugging purposes. */ + /** + * Name of this goal for debugging purposes. + */ std::string name; - /* Whether the goal is finished. */ + /** + * Whether the goal is finished. + */ ExitCode exitCode = ecBusy; - /* Build result. */ +protected: + /** + * Build result. + */ BuildResult buildResult; - /* Exception containing an error message, if any. */ +public: + + /** + * Project a `BuildResult` with just the information that pertains + * to the given request. + * + * In general, goals may be aliased between multiple requests, and + * the stored `BuildResult` has information for the union of all + * requests. We don't want to leak what the other request are for + * sake of both privacy and determinism, and this "safe accessor" + * ensures we don't. + */ + BuildResult getBuildResult(const DerivedPath &); + + /** + * Exception containing an error message, if any. + */ std::optional ex; Goal(Worker & worker, DerivedPath path) : worker(worker) - , buildResult { .path = std::move(path) } { } virtual ~Goal() @@ -88,16 +131,18 @@ struct Goal : public std::enable_shared_from_this abort(); } - void trace(const FormatOrString & fs); + void trace(std::string_view s); std::string getName() { return name; } - /* Callback in case of a timeout. It should wake up its waiters, - get rid of any running child processes that are being monitored - by the worker (important!), etc. */ + /** + * Callback in case of a timeout. It should wake up its waiters, + * get rid of any running child processes that are being monitored + * by the worker (important!), etc. + */ virtual void timedOut(Error && ex) = 0; virtual std::string key() = 0; diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc index cb58a1f02..075ad554f 100644 --- a/src/libstore/build/hook-instance.cc +++ b/src/libstore/build/hook-instance.cc @@ -35,7 +35,10 @@ HookInstance::HookInstance() /* Fork the hook. */ pid = startProcess([&]() { - commonChildInit(fromHook); + if (dup2(fromHook.writeSide.get(), STDERR_FILENO) == -1) + throw SysError("cannot pipe standard error into log file"); + + commonChildInit(); if (chdir("/") == -1) throw SysError("changing into /"); diff --git a/src/libstore/build/hook-instance.hh b/src/libstore/build/hook-instance.hh index 9e8cff128..d84f62877 100644 --- a/src/libstore/build/hook-instance.hh +++ b/src/libstore/build/hook-instance.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "logging.hh" #include "serialise.hh" @@ -7,16 +8,24 @@ namespace nix { struct HookInstance { - /* Pipes for talking to the build hook. */ + /** + * Pipes for talking to the build hook. + */ Pipe toHook; - /* Pipe for the hook's standard output/error. */ + /** + * Pipe for the hook's standard output/error. + */ Pipe fromHook; - /* Pipe for the builder's standard output/error. */ + /** + * Pipe for the builder's standard output/error. + */ Pipe builderOut; - /* The process ID of the hook. */ + /** + * The process ID of the hook. + */ Pid pid; FdSink sink; diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc index a961d8eed..21cd6e7ee 100644 --- a/src/libstore/build/local-derivation-goal.cc +++ b/src/libstore/build/local-derivation-goal.cc @@ -292,7 +292,7 @@ void LocalDerivationGoal::closeReadPipes() if (hook) { DerivationGoal::closeReadPipes(); } else - builderOut.readSide = -1; + builderOut.close(); } @@ -413,7 +413,7 @@ void LocalDerivationGoal::startBuilder() ) { #if __linux__ - settings.requireExperimentalFeature(Xp::Cgroups); + experimentalFeatureSettings.require(Xp::Cgroups); auto cgroupFS = getCgroupFS(); if (!cgroupFS) @@ -650,7 +650,7 @@ void LocalDerivationGoal::startBuilder() /* Clean up the chroot directory automatically. */ autoDelChroot = std::make_shared(chrootRootDir); - printMsg(lvlChatty, format("setting up chroot environment in '%1%'") % chrootRootDir); + printMsg(lvlChatty, "setting up chroot environment in '%1%'", chrootRootDir); // FIXME: make this 0700 if (mkdir(chrootRootDir.c_str(), buildUser && buildUser->getUIDCount() != 1 ? 0755 : 0750) == -1) @@ -753,8 +753,7 @@ void LocalDerivationGoal::startBuilder() throw Error("home directory '%1%' exists; please remove it to assure purity of builds without sandboxing", homeDir); if (useChroot && settings.preBuildHook != "" && dynamic_cast(drv.get())) { - printMsg(lvlChatty, format("executing pre-build hook '%1%'") - % settings.preBuildHook); + printMsg(lvlChatty, "executing pre-build hook '%1%'", settings.preBuildHook); auto args = useChroot ? Strings({worker.store.printStorePath(drvPath), chrootRootDir}) : Strings({ worker.store.printStorePath(drvPath) }); enum BuildHookState { @@ -803,15 +802,13 @@ void LocalDerivationGoal::startBuilder() /* Create the log file. */ Path logFile = openLogFile(); - /* Create a pipe to get the output of the builder. */ - //builderOut.create(); - - builderOut.readSide = posix_openpt(O_RDWR | O_NOCTTY); - if (!builderOut.readSide) + /* Create a pseudoterminal to get the output of the builder. */ + builderOut = posix_openpt(O_RDWR | O_NOCTTY); + if (!builderOut) throw SysError("opening pseudoterminal master"); // FIXME: not thread-safe, use ptsname_r - std::string slaveName(ptsname(builderOut.readSide.get())); + std::string slaveName = ptsname(builderOut.get()); if (buildUser) { if (chmod(slaveName.c_str(), 0600)) @@ -822,34 +819,34 @@ void LocalDerivationGoal::startBuilder() } #if __APPLE__ else { - if (grantpt(builderOut.readSide.get())) + if (grantpt(builderOut.get())) throw SysError("granting access to pseudoterminal slave"); } #endif - #if 0 - // Mount the pt in the sandbox so that the "tty" command works. - // FIXME: this doesn't work with the new devpts in the sandbox. - if (useChroot) - dirsInChroot[slaveName] = {slaveName, false}; - #endif - - if (unlockpt(builderOut.readSide.get())) + if (unlockpt(builderOut.get())) throw SysError("unlocking pseudoterminal"); - builderOut.writeSide = open(slaveName.c_str(), O_RDWR | O_NOCTTY); - if (!builderOut.writeSide) - throw SysError("opening pseudoterminal slave"); + /* Open the slave side of the pseudoterminal and use it as stderr. */ + auto openSlave = [&]() + { + AutoCloseFD builderOut = open(slaveName.c_str(), O_RDWR | O_NOCTTY); + if (!builderOut) + throw SysError("opening pseudoterminal slave"); - // Put the pt into raw mode to prevent \n -> \r\n translation. - struct termios term; - if (tcgetattr(builderOut.writeSide.get(), &term)) - throw SysError("getting pseudoterminal attributes"); + // Put the pt into raw mode to prevent \n -> \r\n translation. + struct termios term; + if (tcgetattr(builderOut.get(), &term)) + throw SysError("getting pseudoterminal attributes"); - cfmakeraw(&term); + cfmakeraw(&term); - if (tcsetattr(builderOut.writeSide.get(), TCSANOW, &term)) - throw SysError("putting pseudoterminal into raw mode"); + if (tcsetattr(builderOut.get(), TCSANOW, &term)) + throw SysError("putting pseudoterminal into raw mode"); + + if (dup2(builderOut.get(), STDERR_FILENO) == -1) + throw SysError("cannot pipe standard error into log file"); + }; buildResult.startTime = time(0); @@ -898,7 +895,16 @@ void LocalDerivationGoal::startBuilder() usingUserNamespace = userNamespacesSupported(); + Pipe sendPid; + sendPid.create(); + Pid helper = startProcess([&]() { + sendPid.readSide.close(); + + /* We need to open the slave early, before + CLONE_NEWUSER. Otherwise we get EPERM when running as + root. */ + openSlave(); /* Drop additional groups here because we can't do it after we've created the new user namespace. FIXME: @@ -920,11 +926,12 @@ void LocalDerivationGoal::startBuilder() pid_t child = startProcess([&]() { runChild(); }, options); - writeFull(builderOut.writeSide.get(), - fmt("%d %d\n", usingUserNamespace, child)); + writeFull(sendPid.writeSide.get(), fmt("%d\n", child)); _exit(0); }); + sendPid.writeSide.close(); + if (helper.wait() != 0) throw Error("unable to start build process"); @@ -936,10 +943,9 @@ void LocalDerivationGoal::startBuilder() userNamespaceSync.writeSide = -1; }); - auto ss = tokenizeString>(readLine(builderOut.readSide.get())); - assert(ss.size() == 2); - usingUserNamespace = ss[0] == "1"; - pid = string2Int(ss[1]).value(); + auto ss = tokenizeString>(readLine(sendPid.readSide.get())); + assert(ss.size() == 1); + pid = string2Int(ss[0]).value(); if (usingUserNamespace) { /* Set the UID/GID mapping of the builder's user namespace @@ -994,21 +1000,21 @@ void LocalDerivationGoal::startBuilder() #endif { pid = startProcess([&]() { + openSlave(); runChild(); }); } /* parent */ pid.setSeparatePG(true); - builderOut.writeSide = -1; - worker.childStarted(shared_from_this(), {builderOut.readSide.get()}, true, true); + worker.childStarted(shared_from_this(), {builderOut.get()}, true, true); /* Check if setting up the build environment failed. */ std::vector msgs; while (true) { std::string msg = [&]() { try { - return readLine(builderOut.readSide.get()); + return readLine(builderOut.get()); } catch (Error & e) { auto status = pid.wait(); e.addTrace({}, "while waiting for the build environment for '%s' to initialize (%s, previous messages: %s)", @@ -1020,7 +1026,7 @@ void LocalDerivationGoal::startBuilder() }(); if (msg.substr(0, 1) == "\2") break; if (msg.substr(0, 1) == "\1") { - FdSource source(builderOut.readSide.get()); + FdSource source(builderOut.get()); auto ex = readError(source); ex.addTrace({}, "while setting up the build environment"); throw ex; @@ -1104,7 +1110,7 @@ void LocalDerivationGoal::initEnv() env["NIX_STORE"] = worker.store.storeDir; /* The maximum number of cores to utilize for parallel building. */ - env["NIX_BUILD_CORES"] = (format("%d") % settings.buildCores).str(); + env["NIX_BUILD_CORES"] = fmt("%d", settings.buildCores); initTmpDir(); @@ -1155,10 +1161,10 @@ void LocalDerivationGoal::writeStructuredAttrs() writeFile(tmpDir + "/.attrs.sh", rewriteStrings(jsonSh, inputRewrites)); chownToBuilder(tmpDir + "/.attrs.sh"); - env["NIX_ATTRS_SH_FILE"] = tmpDir + "/.attrs.sh"; + env["NIX_ATTRS_SH_FILE"] = tmpDirInSandbox + "/.attrs.sh"; writeFile(tmpDir + "/.attrs.json", rewriteStrings(json.dump(), inputRewrites)); chownToBuilder(tmpDir + "/.attrs.json"); - env["NIX_ATTRS_JSON_FILE"] = tmpDir + "/.attrs.json"; + env["NIX_ATTRS_JSON_FILE"] = tmpDirInSandbox + "/.attrs.json"; } } @@ -1329,7 +1335,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo result.rethrow(); } - std::vector buildPathsWithResults( + std::vector buildPathsWithResults( const std::vector & paths, BuildMode buildMode = bmNormal, std::shared_ptr evalStore = nullptr) override @@ -1409,12 +1415,15 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo virtual void addBuildLog(const StorePath & path, std::string_view log) override { unsupported("addBuildLog"); } + + std::optional isTrustedClient() override + { return NotTrusted; } }; void LocalDerivationGoal::startDaemon() { - settings.requireExperimentalFeature(Xp::RecursiveNix); + experimentalFeatureSettings.require(Xp::RecursiveNix); Store::Params params; params["path-info-cache-size"] = "0"; @@ -1461,7 +1470,7 @@ void LocalDerivationGoal::startDaemon() FdSink to(remote.get()); try { daemon::processConnection(store, from, to, - daemon::NotTrusted, daemon::Recursive); + NotTrusted, daemon::Recursive); debug("terminated daemon connection"); } catch (SysError &) { ignoreException(); @@ -1650,7 +1659,7 @@ void LocalDerivationGoal::runChild() try { /* child */ - commonChildInit(builderOut); + commonChildInit(); try { setupSeccomp(); @@ -2063,7 +2072,7 @@ void LocalDerivationGoal::runChild() /* The tmpDir in scope points at the temporary build directory for our derivation. Some packages try different mechanisms to find temporary directories, so we want to open up a broader place for them to dump their files, if needed. */ - Path globalTmpDir = canonPath(getEnv("TMPDIR").value_or("/tmp"), true); + Path globalTmpDir = canonPath(getEnvNonEmpty("TMPDIR").value_or("/tmp"), true); /* They don't like trailing slashes on subpath directives */ if (globalTmpDir.back() == '/') globalTmpDir.pop_back(); @@ -2165,7 +2174,7 @@ void LocalDerivationGoal::runChild() } -DrvOutputs LocalDerivationGoal::registerOutputs() +SingleDrvOutputs LocalDerivationGoal::registerOutputs() { /* When using a build hook, the build hook can register the output as valid (by doing `nix-store --import'). If so we don't have @@ -2274,7 +2283,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs() bool discardReferences = false; if (auto structuredAttrs = parsedDrv->getStructuredAttrs()) { if (auto udr = get(*structuredAttrs, "unsafeDiscardReferences")) { - settings.requireExperimentalFeature(Xp::DiscardReferences); + experimentalFeatureSettings.require(Xp::DiscardReferences); if (auto output = get(*udr, outputName)) { if (!output->is_boolean()) throw Error("attribute 'unsafeDiscardReferences.\"%s\"' of derivation '%s' must be a Boolean", outputName, drvPath.to_string()); @@ -2386,27 +2395,26 @@ DrvOutputs LocalDerivationGoal::registerOutputs() } }; - auto rewriteRefs = [&]() -> std::pair { + auto rewriteRefs = [&]() -> StoreReferences { /* In the CA case, we need the rewritten refs to calculate the final path, therefore we look for a *non-rewritten self-reference, and use a bool rather try to solve the computationally intractable fixed point. */ - std::pair res { - false, - {}, + StoreReferences res { + .self = false, }; for (auto & r : references) { auto name = r.name(); auto origHash = std::string { r.hashPart() }; if (r == *scratchPath) { - res.first = true; + res.self = true; } else if (auto outputRewrite = get(outputRewrites, origHash)) { std::string newRef = *outputRewrite; newRef += '-'; newRef += name; - res.second.insert(StorePath { newRef }); + res.others.insert(StorePath { newRef }); } else { - res.second.insert(r); + res.others.insert(r); } } return res; @@ -2439,18 +2447,22 @@ DrvOutputs LocalDerivationGoal::registerOutputs() break; } auto got = caSink.finish().first; - auto refs = rewriteRefs(); - - auto finalPath = worker.store.makeFixedOutputPath( - outputHash.method, - got, - outputPathName(drv->name, outputName), - refs.second, - refs.first); - if (*scratchPath != finalPath) { + ValidPathInfo newInfo0 { + worker.store, + outputPathName(drv->name, outputName), + FixedOutputInfo { + .hash = { + .method = outputHash.method, + .hash = got, + }, + .references = rewriteRefs(), + }, + Hash::dummy, + }; + if (*scratchPath != newInfo0.path) { // Also rewrite the output path auto source = sinkToSource([&](Sink & nextSink) { - RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink); + RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink); dumpPath(actualPath, rsink2); rsink2.flush(); }); @@ -2461,19 +2473,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs() } HashResult narHashAndSize = hashPath(htSHA256, actualPath); - ValidPathInfo newInfo0 { - finalPath, - narHashAndSize.first, - }; - + newInfo0.narHash = narHashAndSize.first; newInfo0.narSize = narHashAndSize.second; - newInfo0.ca = FixedOutputHash { - .method = outputHash.method, - .hash = got, - }; - newInfo0.references = refs.second; - if (refs.first) - newInfo0.references.insert(newInfo0.path); assert(newInfo0.ca); return newInfo0; @@ -2495,8 +2496,8 @@ DrvOutputs LocalDerivationGoal::registerOutputs() ValidPathInfo newInfo0 { requiredFinalPath, narHashAndSize.first }; newInfo0.narSize = narHashAndSize.second; auto refs = rewriteRefs(); - newInfo0.references = refs.second; - if (refs.first) + newInfo0.references = std::move(refs.others); + if (refs.self) newInfo0.references.insert(newInfo0.path); return newInfo0; }, @@ -2510,7 +2511,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs() /* Check wanted hash */ const Hash & wanted = dof.hash.hash; assert(newInfo0.ca); - auto got = getContentAddressHash(*newInfo0.ca); + auto got = newInfo0.ca->getHash(); if (wanted != got) { /* Throw an error after registering the path as valid. */ @@ -2682,7 +2683,7 @@ DrvOutputs LocalDerivationGoal::registerOutputs() means it's safe to link the derivation to the output hash. We must do that for floating CA derivations, which otherwise couldn't be cached, but it's fine to do in all cases. */ - DrvOutputs builtOutputs; + SingleDrvOutputs builtOutputs; for (auto & [outputName, newInfo] : infos) { auto oldinfo = get(initialOutputs, outputName); @@ -2694,14 +2695,14 @@ DrvOutputs LocalDerivationGoal::registerOutputs() }, .outPath = newInfo.path }; - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations) + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations) && drv->type().isPure()) { signRealisation(thisRealisation); worker.store.registerDrvOutput(thisRealisation); } if (wantedOutputs.contains(outputName)) - builtOutputs.emplace(thisRealisation.id, thisRealisation); + builtOutputs.emplace(outputName, thisRealisation); } return builtOutputs; @@ -2892,7 +2893,7 @@ void LocalDerivationGoal::deleteTmpDir(bool force) bool LocalDerivationGoal::isReadDesc(int fd) { return (hook && DerivationGoal::isReadDesc(fd)) || - (!hook && fd == builderOut.readSide.get()); + (!hook && fd == builderOut.get()); } diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh index 34c4e9187..9acd7593d 100644 --- a/src/libstore/build/local-derivation-goal.hh +++ b/src/libstore/build/local-derivation-goal.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "derivation-goal.hh" #include "local-store.hh" @@ -9,48 +10,75 @@ struct LocalDerivationGoal : public DerivationGoal { LocalStore & getLocalStore(); - /* User selected for running the builder. */ + /** + * User selected for running the builder. + */ std::unique_ptr buildUser; - /* The process ID of the builder. */ + /** + * The process ID of the builder. + */ Pid pid; - /* The cgroup of the builder, if any. */ + /** + * The cgroup of the builder, if any. + */ std::optional cgroup; - /* The temporary directory. */ + /** + * The temporary directory. + */ Path tmpDir; - /* The path of the temporary directory in the sandbox. */ + /** + * The path of the temporary directory in the sandbox. + */ Path tmpDirInSandbox; - /* Pipe for the builder's standard output/error. */ - Pipe builderOut; + /** + * Master side of the pseudoterminal used for the builder's + * standard output/error. + */ + AutoCloseFD builderOut; - /* Pipe for synchronising updates to the builder namespaces. */ + /** + * Pipe for synchronising updates to the builder namespaces. + */ Pipe userNamespaceSync; - /* The mount namespace and user namespace of the builder, used to add additional - paths to the sandbox as a result of recursive Nix calls. */ + /** + * The mount namespace and user namespace of the builder, used to add additional + * paths to the sandbox as a result of recursive Nix calls. + */ AutoCloseFD sandboxMountNamespace; AutoCloseFD sandboxUserNamespace; - /* On Linux, whether we're doing the build in its own user - namespace. */ + /** + * On Linux, whether we're doing the build in its own user + * namespace. + */ bool usingUserNamespace = true; - /* Whether we're currently doing a chroot build. */ + /** + * Whether we're currently doing a chroot build. + */ bool useChroot = false; Path chrootRootDir; - /* RAII object to delete the chroot directory. */ + /** + * RAII object to delete the chroot directory. + */ std::shared_ptr autoDelChroot; - /* Whether to run the build in a private network namespace. */ + /** + * Whether to run the build in a private network namespace. + */ bool privateNetwork = false; - /* Stuff we need to pass to initChild(). */ + /** + * Stuff we need to pass to initChild(). + */ struct ChrootPath { Path source; bool optional; @@ -69,30 +97,35 @@ struct LocalDerivationGoal : public DerivationGoal SandboxProfile additionalSandboxProfile; #endif - /* Hash rewriting. */ + /** + * Hash rewriting. + */ StringMap inputRewrites, outputRewrites; typedef map RedirectedOutputs; RedirectedOutputs redirectedOutputs; - /* The outputs paths used during the build. - - - Input-addressed derivations or fixed content-addressed outputs are - sometimes built when some of their outputs already exist, and can not - be hidden via sandboxing. We use temporary locations instead and - rewrite after the build. Otherwise the regular predetermined paths are - put here. - - - Floating content-addressed derivations do not know their final build - output paths until the outputs are hashed, so random locations are - used, and then renamed. The randomness helps guard against hidden - self-references. + /** + * The outputs paths used during the build. + * + * - Input-addressed derivations or fixed content-addressed outputs are + * sometimes built when some of their outputs already exist, and can not + * be hidden via sandboxing. We use temporary locations instead and + * rewrite after the build. Otherwise the regular predetermined paths are + * put here. + * + * - Floating content-addressed derivations do not know their final build + * output paths until the outputs are hashed, so random locations are + * used, and then renamed. The randomness helps guard against hidden + * self-references. */ OutputPathMap scratchOutputs; - /* Path registration info from the previous round, if we're - building multiple times. Since this contains the hash, it - allows us to compare whether two rounds produced the same - result. */ + /** + * Path registration info from the previous round, if we're + * building multiple times. Since this contains the hash, it + * allows us to compare whether two rounds produced the same + * result. + */ std::map prevInfos; uid_t sandboxUid() { return usingUserNamespace ? (!buildUser || buildUser->getUIDCount() == 1 ? 1000 : 0) : buildUser->getUID(); } @@ -100,25 +133,37 @@ struct LocalDerivationGoal : public DerivationGoal const static Path homeDir; - /* The recursive Nix daemon socket. */ + /** + * The recursive Nix daemon socket. + */ AutoCloseFD daemonSocket; - /* The daemon main thread. */ + /** + * The daemon main thread. + */ std::thread daemonThread; - /* The daemon worker threads. */ + /** + * The daemon worker threads. + */ std::vector daemonWorkerThreads; - /* Paths that were added via recursive Nix calls. */ + /** + * Paths that were added via recursive Nix calls. + */ StorePathSet addedPaths; - /* Realisations that were added via recursive Nix calls. */ + /** + * Realisations that were added via recursive Nix calls. + */ std::set addedDrvOutputs; - /* Recursive Nix calls are only allowed to build or realize paths - in the original input closure or added via a recursive Nix call - (so e.g. you can't do 'nix-store -r /nix/store/' where - /nix/store/ is some arbitrary path in a binary cache). */ + /** + * Recursive Nix calls are only allowed to build or realize paths + * in the original input closure or added via a recursive Nix call + * (so e.g. you can't do 'nix-store -r /nix/store/' where + * /nix/store/ is some arbitrary path in a binary cache). + */ bool isAllowed(const StorePath & path) { return inputPaths.count(path) || addedPaths.count(path); @@ -136,55 +181,81 @@ struct LocalDerivationGoal : public DerivationGoal virtual ~LocalDerivationGoal() override; - /* Whether we need to perform hash rewriting if there are valid output paths. */ + /** + * Whether we need to perform hash rewriting if there are valid output paths. + */ bool needsHashRewrite(); - /* The additional states. */ + /** + * The additional states. + */ void tryLocalBuild() override; - /* Start building a derivation. */ + /** + * Start building a derivation. + */ void startBuilder(); - /* Fill in the environment for the builder. */ + /** + * Fill in the environment for the builder. + */ void initEnv(); - /* Setup tmp dir location. */ + /** + * Setup tmp dir location. + */ void initTmpDir(); - /* Write a JSON file containing the derivation attributes. */ + /** + * Write a JSON file containing the derivation attributes. + */ void writeStructuredAttrs(); void startDaemon(); void stopDaemon(); - /* Add 'path' to the set of paths that may be referenced by the - outputs, and make it appear in the sandbox. */ + /** + * Add 'path' to the set of paths that may be referenced by the + * outputs, and make it appear in the sandbox. + */ void addDependency(const StorePath & path); - /* Make a file owned by the builder. */ + /** + * Make a file owned by the builder. + */ void chownToBuilder(const Path & path); int getChildStatus() override; - /* Run the builder's process. */ + /** + * Run the builder's process. + */ void runChild(); - /* Check that the derivation outputs all exist and register them - as valid. */ - DrvOutputs registerOutputs() override; + /** + * Check that the derivation outputs all exist and register them + * as valid. + */ + SingleDrvOutputs registerOutputs() override; void signRealisation(Realisation &) override; - /* Check that an output meets the requirements specified by the - 'outputChecks' attribute (or the legacy - '{allowed,disallowed}{References,Requisites}' attributes). */ + /** + * Check that an output meets the requirements specified by the + * 'outputChecks' attribute (or the legacy + * '{allowed,disallowed}{References,Requisites}' attributes). + */ void checkOutputs(const std::map & outputs); - /* Close the read side of the logger pipe. */ + /** + * Close the read side of the logger pipe. + */ void closeReadPipes() override; - /* Cleanup hooks for buildDone() */ + /** + * Cleanup hooks for buildDone() + */ void cleanupHookFinally() override; void cleanupPreChildKill() override; void cleanupPostChildKill() override; @@ -194,24 +265,36 @@ struct LocalDerivationGoal : public DerivationGoal bool isReadDesc(int fd) override; - /* Delete the temporary directory, if we have one. */ + /** + * Delete the temporary directory, if we have one. + */ void deleteTmpDir(bool force); - /* Forcibly kill the child process, if any. */ + /** + * Forcibly kill the child process, if any. + */ void killChild() override; - /* Kill any processes running under the build user UID or in the - cgroup of the build. */ + /** + * Kill any processes running under the build user UID or in the + * cgroup of the build. + */ void killSandbox(bool getStats); - /* Create alternative path calculated from but distinct from the - input, so we can avoid overwriting outputs (or other store paths) - that already exist. */ + /** + * Create alternative path calculated from but distinct from the + * input, so we can avoid overwriting outputs (or other store paths) + * that already exist. + */ StorePath makeFallbackPath(const StorePath & path); - /* Make a path to another based on the output name along with the - derivation hash. */ - /* FIXME add option to randomize, so we can audit whether our - rewrites caught everything */ + + /** + * Make a path to another based on the output name along with the + * derivation hash. + * + * @todo Add option to randomize, so we can audit whether our + * rewrites caught everything + */ StorePath makeFallbackPath(std::string_view outputName); }; diff --git a/src/libstore/build/personality.hh b/src/libstore/build/personality.hh index 30e4f4062..91b730fab 100644 --- a/src/libstore/build/personality.hh +++ b/src/libstore/build/personality.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc index 2af105b4d..190fb455a 100644 --- a/src/libstore/build/substitution-goal.cc +++ b/src/libstore/build/substitution-goal.cc @@ -95,7 +95,9 @@ void PathSubstitutionGoal::tryNext() subs.pop_front(); if (ca) { - subPath = sub->makeFixedOutputPathFromCA(storePath.name(), *ca); + subPath = sub->makeFixedOutputPathFromCA( + std::string { storePath.name() }, + ContentAddressWithReferences::withoutRefs(*ca)); if (sub->storeDir == worker.store.storeDir) assert(subPath == storePath); } else if (sub->storeDir != worker.store.storeDir) { diff --git a/src/libstore/build/substitution-goal.hh b/src/libstore/build/substitution-goal.hh index a73f8e666..c2b7fc95a 100644 --- a/src/libstore/build/substitution-goal.hh +++ b/src/libstore/build/substitution-goal.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "lock.hh" #include "store-api.hh" @@ -10,38 +11,58 @@ class Worker; struct PathSubstitutionGoal : public Goal { - /* The store path that should be realised through a substitute. */ + /** + * The store path that should be realised through a substitute. + */ StorePath storePath; - /* The path the substituter refers to the path as. This will be - different when the stores have different names. */ + /** + * The path the substituter refers to the path as. This will be + * different when the stores have different names. + */ std::optional subPath; - /* The remaining substituters. */ + /** + * The remaining substituters. + */ std::list> subs; - /* The current substituter. */ + /** + * The current substituter. + */ std::shared_ptr sub; - /* Whether a substituter failed. */ + /** + * Whether a substituter failed. + */ bool substituterFailed = false; - /* Path info returned by the substituter's query info operation. */ + /** + * Path info returned by the substituter's query info operation. + */ std::shared_ptr info; - /* Pipe for the substituter's standard output. */ + /** + * Pipe for the substituter's standard output. + */ Pipe outPipe; - /* The substituter thread. */ + /** + * The substituter thread. + */ std::thread thr; std::promise promise; - /* Whether to try to repair a valid path. */ + /** + * Whether to try to repair a valid path. + */ RepairFlag repair; - /* Location where we're downloading the substitute. Differs from - storePath when doing a repair. */ + /** + * Location where we're downloading the substitute. Differs from + * storePath when doing a repair. + */ Path destPath; std::unique_ptr> maintainExpectedSubstitutions, @@ -50,7 +71,9 @@ struct PathSubstitutionGoal : public Goal typedef void (PathSubstitutionGoal::*GoalState)(); GoalState state; - /* Content address for recomputing store path */ + /** + * Content address for recomputing store path + */ std::optional ca; void done( @@ -64,16 +87,20 @@ public: void timedOut(Error && ex) override { abort(); }; + /** + * We prepend "a$" to the key name to ensure substitution goals + * happen before derivation goals. + */ std::string key() override { - /* "a$" ensures substitution goals happen before derivation - goals. */ return "a$" + std::string(storePath.name()) + "$" + worker.store.printStorePath(storePath); } void work() override; - /* The states. */ + /** + * The states. + */ void init(); void tryNext(); void gotInfo(); @@ -81,7 +108,9 @@ public: void tryToRun(); void finished(); - /* Callback used by the worker to write to the log. */ + /** + * Callback used by the worker to write to the log. + */ void handleChildOutput(int fd, std::string_view data) override; void handleEOF(int fd) override; diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index f775f8486..6ad4a0e2b 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -92,6 +92,7 @@ std::shared_ptr Worker::makePathSubstitutionGoal(const Sto return goal; } + std::shared_ptr Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional ca) { std::weak_ptr & goal_weak = drvOutputSubstitutionGoals[id]; @@ -104,6 +105,20 @@ std::shared_ptr Worker::makeDrvOutputSubstitutionGoal return goal; } + +GoalPtr Worker::makeGoal(const DerivedPath & req, BuildMode buildMode) +{ + return std::visit(overloaded { + [&](const DerivedPath::Built & bfd) -> GoalPtr { + return makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode); + }, + [&](const DerivedPath::Opaque & bo) -> GoalPtr { + return makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair); + }, + }, req.raw()); +} + + template static void removeGoal(std::shared_ptr goal, std::map> & goalMap) { diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh index 6d68d3cf1..bb51d641d 100644 --- a/src/libstore/build/worker.hh +++ b/src/libstore/build/worker.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "lock.hh" @@ -16,24 +17,29 @@ struct DerivationGoal; struct PathSubstitutionGoal; class DrvOutputSubstitutionGoal; -/* Workaround for not being able to declare a something like - - class PathSubstitutionGoal : public Goal; - - even when Goal is a complete type. - - This is still a static cast. The purpose of exporting it is to define it in - a place where `PathSubstitutionGoal` is concrete, and use it in a place where it - is opaque. */ +/** + * Workaround for not being able to declare a something like + * + * ```c++ + * class PathSubstitutionGoal : public Goal; + * ``` + * even when Goal is a complete type. + * + * This is still a static cast. The purpose of exporting it is to define it in + * a place where `PathSubstitutionGoal` is concrete, and use it in a place where it + * is opaque. + */ GoalPtr upcast_goal(std::shared_ptr subGoal); GoalPtr upcast_goal(std::shared_ptr subGoal); typedef std::chrono::time_point steady_time_point; -/* A mapping used to remember for each child process to what goal it - belongs, and file descriptors for receiving log data and output - path creation commands. */ +/** + * A mapping used to remember for each child process to what goal it + * belongs, and file descriptors for receiving log data and output + * path creation commands. + */ struct Child { WeakGoalPtr goal; @@ -41,14 +47,19 @@ struct Child std::set fds; bool respectTimeouts; bool inBuildSlot; - steady_time_point lastOutput; /* time we last got output on stdout/stderr */ + /** + * Time we last got output on stdout/stderr + */ + steady_time_point lastOutput; steady_time_point timeStarted; }; /* Forward definition. */ struct HookInstance; -/* The worker class. */ +/** + * The worker class. + */ class Worker { private: @@ -56,38 +67,58 @@ private: /* Note: the worker should only have strong pointers to the top-level goals. */ - /* The top-level goals of the worker. */ + /** + * The top-level goals of the worker. + */ Goals topGoals; - /* Goals that are ready to do some work. */ + /** + * Goals that are ready to do some work. + */ WeakGoals awake; - /* Goals waiting for a build slot. */ + /** + * Goals waiting for a build slot. + */ WeakGoals wantingToBuild; - /* Child processes currently running. */ + /** + * Child processes currently running. + */ std::list children; - /* Number of build slots occupied. This includes local builds and - substitutions but not remote builds via the build hook. */ + /** + * Number of build slots occupied. This includes local builds and + * substitutions but not remote builds via the build hook. + */ unsigned int nrLocalBuilds; - /* Maps used to prevent multiple instantiations of a goal for the - same derivation / path. */ + /** + * Maps used to prevent multiple instantiations of a goal for the + * same derivation / path. + */ std::map> derivationGoals; std::map> substitutionGoals; std::map> drvOutputSubstitutionGoals; - /* Goals waiting for busy paths to be unlocked. */ + /** + * Goals waiting for busy paths to be unlocked. + */ WeakGoals waitingForAnyGoal; - /* Goals sleeping for a few seconds (polling a lock). */ + /** + * Goals sleeping for a few seconds (polling a lock). + */ WeakGoals waitingForAWhile; - /* Last time the goals in `waitingForAWhile' where woken up. */ + /** + * Last time the goals in `waitingForAWhile` where woken up. + */ steady_time_point lastWokenUp; - /* Cache for pathContentsGood(). */ + /** + * Cache for pathContentsGood(). + */ std::map pathContentsGoodCache; public: @@ -96,17 +127,25 @@ public: const Activity actDerivations; const Activity actSubstitutions; - /* Set if at least one derivation had a BuildError (i.e. permanent - failure). */ + /** + * Set if at least one derivation had a BuildError (i.e. permanent + * failure). + */ bool permanentFailure; - /* Set if at least one derivation had a timeout. */ + /** + * Set if at least one derivation had a timeout. + */ bool timedOut; - /* Set if at least one derivation fails with a hash mismatch. */ + /** + * Set if at least one derivation fails with a hash mismatch. + */ bool hashMismatch; - /* Set if at least one derivation is not deterministic in check mode. */ + /** + * Set if at least one derivation is not deterministic in check mode. + */ bool checkMismatch; Store & store; @@ -128,16 +167,22 @@ public: uint64_t expectedNarSize = 0; uint64_t doneNarSize = 0; - /* Whether to ask the build hook if it can build a derivation. If - it answers with "decline-permanently", we don't try again. */ + /** + * Whether to ask the build hook if it can build a derivation. If + * it answers with "decline-permanently", we don't try again. + */ bool tryBuildHook = true; Worker(Store & store, Store & evalStore); ~Worker(); - /* Make a goal (with caching). */ + /** + * Make a goal (with caching). + */ - /* derivation goal */ + /** + * @ref DerivationGoal "derivation goal" + */ private: std::shared_ptr makeDerivationGoalCommon( const StorePath & drvPath, const OutputsSpec & wantedOutputs, @@ -150,56 +195,88 @@ public: const StorePath & drvPath, const BasicDerivation & drv, const OutputsSpec & wantedOutputs, BuildMode buildMode = bmNormal); - /* substitution goal */ + /** + * @ref SubstitutionGoal "substitution goal" + */ std::shared_ptr makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); std::shared_ptr makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional ca = std::nullopt); - /* Remove a dead goal. */ + /** + * Make a goal corresponding to the `DerivedPath`. + * + * It will be a `DerivationGoal` for a `DerivedPath::Built` or + * a `SubstitutionGoal` for a `DerivedPath::Opaque`. + */ + GoalPtr makeGoal(const DerivedPath & req, BuildMode buildMode = bmNormal); + + /** + * Remove a dead goal. + */ void removeGoal(GoalPtr goal); - /* Wake up a goal (i.e., there is something for it to do). */ + /** + * Wake up a goal (i.e., there is something for it to do). + */ void wakeUp(GoalPtr goal); - /* Return the number of local build and substitution processes - currently running (but not remote builds via the build - hook). */ + /** + * Return the number of local build and substitution processes + * currently running (but not remote builds via the build + * hook). + */ unsigned int getNrLocalBuilds(); - /* Registers a running child process. `inBuildSlot' means that - the process counts towards the jobs limit. */ + /** + * Registers a running child process. `inBuildSlot` means that + * the process counts towards the jobs limit. + */ void childStarted(GoalPtr goal, const std::set & fds, bool inBuildSlot, bool respectTimeouts); - /* Unregisters a running child process. `wakeSleepers' should be - false if there is no sense in waking up goals that are sleeping - because they can't run yet (e.g., there is no free build slot, - or the hook would still say `postpone'). */ + /** + * Unregisters a running child process. `wakeSleepers` should be + * false if there is no sense in waking up goals that are sleeping + * because they can't run yet (e.g., there is no free build slot, + * or the hook would still say `postpone`). + */ void childTerminated(Goal * goal, bool wakeSleepers = true); - /* Put `goal' to sleep until a build slot becomes available (which - might be right away). */ + /** + * Put `goal` to sleep until a build slot becomes available (which + * might be right away). + */ void waitForBuildSlot(GoalPtr goal); - /* Wait for any goal to finish. Pretty indiscriminate way to - wait for some resource that some other goal is holding. */ + /** + * Wait for any goal to finish. Pretty indiscriminate way to + * wait for some resource that some other goal is holding. + */ void waitForAnyGoal(GoalPtr goal); - /* Wait for a few seconds and then retry this goal. Used when - waiting for a lock held by another process. This kind of - polling is inefficient, but POSIX doesn't really provide a way - to wait for multiple locks in the main select() loop. */ + /** + * Wait for a few seconds and then retry this goal. Used when + * waiting for a lock held by another process. This kind of + * polling is inefficient, but POSIX doesn't really provide a way + * to wait for multiple locks in the main select() loop. + */ void waitForAWhile(GoalPtr goal); - /* Loop until the specified top-level goals have finished. */ + /** + * Loop until the specified top-level goals have finished. + */ void run(const Goals & topGoals); - /* Wait for input to become available. */ + /** + * Wait for input to become available. + */ void waitForInput(); unsigned int exitStatus(); - /* Check whether the given valid path exists and has the right - contents. */ + /** + * Check whether the given valid path exists and has the right + * contents. + */ bool pathContentsGood(const StorePath & path); void markContentsGood(const StorePath & path); diff --git a/src/libstore/builtins.hh b/src/libstore/builtins.hh index 66597e456..d201fb3ac 100644 --- a/src/libstore/builtins.hh +++ b/src/libstore/builtins.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "derivations.hh" diff --git a/src/libstore/builtins/buildenv.cc b/src/libstore/builtins/buildenv.cc index b1fbda13d..7bba33fb9 100644 --- a/src/libstore/builtins/buildenv.cc +++ b/src/libstore/builtins/buildenv.cc @@ -92,13 +92,11 @@ static void createLinks(State & state, const Path & srcDir, const Path & dstDir, if (S_ISLNK(dstSt.st_mode)) { auto prevPriority = state.priorities[dstFile]; if (prevPriority == priority) - throw Error( - "files '%1%' and '%2%' have the same priority %3%; " - "use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' " - "or type 'nix profile install --help' if using 'nix profile' to find out how " - "to change the priority of one of the conflicting packages" - " (0 being the highest priority)", - srcFile, readLink(dstFile), priority); + throw BuildEnvFileConflictError( + readLink(dstFile), + srcFile, + priority + ); if (prevPriority < priority) continue; if (unlink(dstFile.c_str()) == -1) diff --git a/src/libstore/builtins/buildenv.hh b/src/libstore/builtins/buildenv.hh index 73c0f5f7f..0923c2adb 100644 --- a/src/libstore/builtins/buildenv.hh +++ b/src/libstore/builtins/buildenv.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "derivations.hh" #include "store-api.hh" @@ -12,6 +13,32 @@ struct Package { Package(const Path & path, bool active, int priority) : path{path}, active{active}, priority{priority} {} }; +class BuildEnvFileConflictError : public Error +{ +public: + const Path fileA; + const Path fileB; + int priority; + + BuildEnvFileConflictError( + const Path fileA, + const Path fileB, + int priority + ) + : Error( + "Unable to build profile. There is a conflict for the following files:\n" + "\n" + " %1%\n" + " %2%", + fileA, + fileB + ) + , fileA(fileA) + , fileB(fileB) + , priority(priority) + {} +}; + typedef std::vector Packages; void buildProfile(const Path & out, Packages && pkgs); diff --git a/src/libstore/content-address.cc b/src/libstore/content-address.cc index cf32ccdc4..055b216db 100644 --- a/src/libstore/content-address.cc +++ b/src/libstore/content-address.cc @@ -9,7 +9,7 @@ std::string FixedOutputHash::printMethodAlgo() const return makeFileIngestionPrefix(method) + printHashType(hash.type); } -std::string makeFileIngestionPrefix(const FileIngestionMethod m) +std::string makeFileIngestionPrefix(FileIngestionMethod m) { switch (m) { case FileIngestionMethod::Flat: @@ -21,39 +21,35 @@ std::string makeFileIngestionPrefix(const FileIngestionMethod m) } } -std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash) -{ - return "fixed:" - + makeFileIngestionPrefix(method) - + hash.to_string(Base32, true); -} - -std::string renderContentAddress(ContentAddress ca) +std::string ContentAddress::render() const { return std::visit(overloaded { - [](TextHash & th) { - return "text:" + th.hash.to_string(Base32, true); + [](const TextHash & th) { + return "text:" + + th.hash.to_string(Base32, true); }, - [](FixedOutputHash & fsh) { - return makeFixedOutputCA(fsh.method, fsh.hash); + [](const FixedOutputHash & fsh) { + return "fixed:" + + makeFileIngestionPrefix(fsh.method) + + fsh.hash.to_string(Base32, true); } - }, ca); + }, raw); } -std::string renderContentAddressMethod(ContentAddressMethod cam) +std::string ContentAddressMethod::render() const { return std::visit(overloaded { - [](TextHashMethod & th) { + [](const TextHashMethod & th) { return std::string{"text:"} + printHashType(htSHA256); }, - [](FixedOutputHashMethod & fshm) { + [](const FixedOutputHashMethod & fshm) { return "fixed:" + makeFileIngestionPrefix(fshm.fileIngestionMethod) + printHashType(fshm.hashType); } - }, cam); + }, raw); } -/* - Parses content address strings up to the hash. +/** + * Parses content address strings up to the hash. */ static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & rest) { @@ -97,7 +93,7 @@ static ContentAddressMethod parseContentAddressMethodPrefix(std::string_view & r throw UsageError("content address prefix '%s' is unrecognized. Recogonized prefixes are 'text' or 'fixed'", prefix); } -ContentAddress parseContentAddress(std::string_view rawCa) { +ContentAddress ContentAddress::parse(std::string_view rawCa) { auto rest = rawCa; ContentAddressMethod caMethod = parseContentAddressMethodPrefix(rest); @@ -115,10 +111,10 @@ ContentAddress parseContentAddress(std::string_view rawCa) { .hash = Hash::parseNonSRIUnprefixed(rest, std::move(fohMethod.hashType)), }); }, - }, caMethod); + }, caMethod.raw); } -ContentAddressMethod parseContentAddressMethod(std::string_view caMethod) +ContentAddressMethod ContentAddressMethod::parse(std::string_view caMethod) { std::string asPrefix = std::string{caMethod} + ":"; // parseContentAddressMethodPrefix takes its argument by reference @@ -126,26 +122,55 @@ ContentAddressMethod parseContentAddressMethod(std::string_view caMethod) return parseContentAddressMethodPrefix(asPrefixView); } -std::optional parseContentAddressOpt(std::string_view rawCaOpt) +std::optional ContentAddress::parseOpt(std::string_view rawCaOpt) { - return rawCaOpt == "" ? std::optional() : parseContentAddress(rawCaOpt); + return rawCaOpt == "" + ? std::nullopt + : std::optional { ContentAddress::parse(rawCaOpt) }; }; std::string renderContentAddress(std::optional ca) { - return ca ? renderContentAddress(*ca) : ""; + return ca ? ca->render() : ""; } -Hash getContentAddressHash(const ContentAddress & ca) +const Hash & ContentAddress::getHash() const { return std::visit(overloaded { - [](const TextHash & th) { + [](const TextHash & th) -> auto & { return th.hash; }, - [](const FixedOutputHash & fsh) { + [](const FixedOutputHash & fsh) -> auto & { return fsh.hash; - } - }, ca); + }, + }, raw); +} + +bool StoreReferences::empty() const +{ + return !self && others.empty(); +} + +size_t StoreReferences::size() const +{ + return (self ? 1 : 0) + others.size(); +} + +ContentAddressWithReferences ContentAddressWithReferences::withoutRefs(const ContentAddress & ca) { + return std::visit(overloaded { + [&](const TextHash & h) -> ContentAddressWithReferences { + return TextInfo { + .hash = h, + .references = {}, + }; + }, + [&](const FixedOutputHash & h) -> ContentAddressWithReferences { + return FixedOutputInfo { + .hash = h, + .references = {}, + }; + }, + }, ca.raw); } } diff --git a/src/libstore/content-address.hh b/src/libstore/content-address.hh index f6a6f5140..2f98950fb 100644 --- a/src/libstore/content-address.hh +++ b/src/libstore/content-address.hh @@ -1,77 +1,253 @@ #pragma once +///@file #include #include "hash.hh" +#include "path.hh" +#include "comparator.hh" namespace nix { +/* + * Content addressing method + */ + +/* We only have one way to hash text with references, so this is a single-value + type, mainly useful with std::variant. +*/ + +/** + * The single way we can serialize "text" file system objects. + * + * Somewhat obscure, used by \ref Derivation derivations and + * `builtins.toFile` currently. + */ +struct TextHashMethod : std::monostate { }; + +/** + * An enumeration of the main ways we can serialize file system + * objects. + */ enum struct FileIngestionMethod : uint8_t { + /** + * Flat-file hashing. Directly ingest the contents of a single file + */ Flat = false, + /** + * Recursive (or NAR) hashing. Serializes the file-system object in Nix + * Archive format and ingest that + */ Recursive = true }; -struct TextHash { - Hash hash; +/** + * Compute the prefix to the hash algorithm which indicates how the + * files were ingested. + */ +std::string makeFileIngestionPrefix(FileIngestionMethod m); + +struct FixedOutputHashMethod { + FileIngestionMethod fileIngestionMethod; + HashType hashType; + + GENERATE_CMP(FixedOutputHashMethod, me->fileIngestionMethod, me->hashType); }; -/// Pair of a hash, and how the file system was ingested -struct FixedOutputHash { - FileIngestionMethod method; - Hash hash; - std::string printMethodAlgo() const; +/** + * An enumeration of all the ways we can serialize file system objects. + * + * Just the type of a content address. Combine with the hash itself, and + * we have a `ContentAddress` as defined below. Combine that, in turn, + * with info on references, and we have `ContentAddressWithReferences`, + * as defined further below. + */ +struct ContentAddressMethod +{ + typedef std::variant< + TextHashMethod, + FixedOutputHashMethod + > Raw; + + Raw raw; + + GENERATE_CMP(ContentAddressMethod, me->raw); + + /* The moral equivalent of `using Raw::Raw;` */ + ContentAddressMethod(auto &&... arg) + : raw(std::forward(arg)...) + { } + + static ContentAddressMethod parse(std::string_view rawCaMethod); + + std::string render() const; }; + /* - We've accumulated several types of content-addressed paths over the years; - fixed-output derivations support multiple hash algorithms and serialisation - methods (flat file vs NAR). Thus, ‘ca’ has one of the following forms: + * Mini content address + */ - * ‘text:sha256:’: For paths - computed by makeTextPath() / addTextToStore(). +/** + * Somewhat obscure, used by \ref Derivation derivations and + * `builtins.toFile` currently. + */ +struct TextHash { + /** + * Hash of the contents of the text/file. + */ + Hash hash; - * ‘fixed:::’: For paths computed by - makeFixedOutputPath() / addToStore(). -*/ -typedef std::variant< - TextHash, // for paths computed by makeTextPath() / addTextToStore - FixedOutputHash // for path computed by makeFixedOutputPath -> ContentAddress; + GENERATE_CMP(TextHash, me->hash); +}; -/* Compute the prefix to the hash algorithm which indicates how the files were - ingested. */ -std::string makeFileIngestionPrefix(const FileIngestionMethod m); +/** + * Used by most store objects that are content-addressed. + */ +struct FixedOutputHash { + /** + * How the file system objects are serialized + */ + FileIngestionMethod method; + /** + * Hash of that serialization + */ + Hash hash; -/* Compute the content-addressability assertion (ValidPathInfo::ca) - for paths created by makeFixedOutputPath() / addToStore(). */ -std::string makeFixedOutputCA(FileIngestionMethod method, const Hash & hash); + std::string printMethodAlgo() const; -std::string renderContentAddress(ContentAddress ca); + GENERATE_CMP(FixedOutputHash, me->method, me->hash); +}; + +/** + * We've accumulated several types of content-addressed paths over the + * years; fixed-output derivations support multiple hash algorithms and + * serialisation methods (flat file vs NAR). Thus, ‘ca’ has one of the + * following forms: + * + * - ‘text:sha256:’: For paths + * computed by Store::makeTextPath() / Store::addTextToStore(). + * + * - ‘fixed:::’: For paths computed by + * Store::makeFixedOutputPath() / Store::addToStore(). + */ +struct ContentAddress +{ + typedef std::variant< + TextHash, + FixedOutputHash + > Raw; + + Raw raw; + + GENERATE_CMP(ContentAddress, me->raw); + + /* The moral equivalent of `using Raw::Raw;` */ + ContentAddress(auto &&... arg) + : raw(std::forward(arg)...) + { } + + /** + * Compute the content-addressability assertion (ValidPathInfo::ca) for + * paths created by Store::makeFixedOutputPath() / Store::addToStore(). + */ + std::string render() const; + + static ContentAddress parse(std::string_view rawCa); + + static std::optional parseOpt(std::string_view rawCaOpt); + + const Hash & getHash() const; +}; std::string renderContentAddress(std::optional ca); -ContentAddress parseContentAddress(std::string_view rawCa); - -std::optional parseContentAddressOpt(std::string_view rawCaOpt); - -Hash getContentAddressHash(const ContentAddress & ca); /* - We only have one way to hash text with references, so this is single-value - type is only useful in std::variant. -*/ -struct TextHashMethod { }; -struct FixedOutputHashMethod { - FileIngestionMethod fileIngestionMethod; - HashType hashType; + * Full content address + * + * See the schema for store paths in store-api.cc + */ + +/** + * A set of references to other store objects. + * + * References to other store objects are tracked with store paths, self + * references however are tracked with a boolean. + */ +struct StoreReferences { + /** + * References to other store objects + */ + StorePathSet others; + + /** + * Reference to this store object + */ + bool self = false; + + /** + * @return true iff no references, i.e. others is empty and self is + * false. + */ + bool empty() const; + + /** + * Returns the numbers of references, i.e. the size of others + 1 + * iff self is true. + */ + size_t size() const; + + GENERATE_CMP(StoreReferences, me->self, me->others); }; -typedef std::variant< - TextHashMethod, - FixedOutputHashMethod - > ContentAddressMethod; +// This matches the additional info that we need for makeTextPath +struct TextInfo { + TextHash hash; + /** + * References to other store objects only; self references + * disallowed + */ + StorePathSet references; -ContentAddressMethod parseContentAddressMethod(std::string_view rawCaMethod); + GENERATE_CMP(TextInfo, me->hash, me->references); +}; -std::string renderContentAddressMethod(ContentAddressMethod caMethod); +struct FixedOutputInfo { + FixedOutputHash hash; + /** + * References to other store objects or this one. + */ + StoreReferences references; + + GENERATE_CMP(FixedOutputInfo, me->hash, me->references); +}; + +/** + * Ways of content addressing but not a complete ContentAddress. + * + * A ContentAddress without a Hash. + */ +struct ContentAddressWithReferences +{ + typedef std::variant< + TextInfo, + FixedOutputInfo + > Raw; + + Raw raw; + + GENERATE_CMP(ContentAddressWithReferences, me->raw); + + /* The moral equivalent of `using Raw::Raw;` */ + ContentAddressWithReferences(auto &&... arg) + : raw(std::forward(arg)...) + { } + + /** + * Create a ContentAddressWithReferences from a mere ContentAddress, by + * assuming no references in all cases. + */ + static ContentAddressWithReferences withoutRefs(const ContentAddress &); +}; } diff --git a/src/libstore/crypto.hh b/src/libstore/crypto.hh index 03f85c103..35216d470 100644 --- a/src/libstore/crypto.hh +++ b/src/libstore/crypto.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" @@ -11,8 +12,10 @@ struct Key std::string name; std::string key; - /* Construct Key from a string in the format - ‘:’. */ + /** + * Construct Key from a string in the format + * ‘:’. + */ Key(std::string_view s); std::string to_string() const; @@ -28,7 +31,9 @@ struct SecretKey : Key { SecretKey(std::string_view s); - /* Return a detached signature of the given string. */ + /** + * Return a detached signature of the given string. + */ std::string signDetached(std::string_view s) const; PublicKey toPublicKey() const; @@ -52,8 +57,10 @@ private: typedef std::map PublicKeys; -/* Return true iff ‘sig’ is a correct signature over ‘data’ using one - of the given public keys. */ +/** + * @return true iff ‘sig’ is a correct signature over ‘data’ using one + * of the given public keys. + */ bool verifyDetached(const std::string & data, const std::string & sig, const PublicKeys & publicKeys); diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc index 5e6fd011f..af9a76f1e 100644 --- a/src/libstore/daemon.cc +++ b/src/libstore/daemon.cc @@ -67,12 +67,12 @@ struct TunnelLogger : public Logger state->pendingMsgs.push_back(s); } - void log(Verbosity lvl, const FormatOrString & fs) override + void log(Verbosity lvl, std::string_view s) override { if (lvl > verbosity) return; StringSink buf; - buf << STDERR_NEXT << (fs.s + "\n"); + buf << STDERR_NEXT << (s + "\n"); enqueueMsg(buf.s); } @@ -231,10 +231,10 @@ struct ClientSettings try { if (name == "ssh-auth-sock") // obsolete ; - else if (name == settings.experimentalFeatures.name) { + else if (name == experimentalFeatureSettings.experimentalFeatures.name) { // We don’t want to forward the experimental features to // the daemon, as that could cause some pretty weird stuff - if (parseFeatures(tokenizeString(value)) != settings.experimentalFeatures.get()) + if (parseFeatures(tokenizeString(value)) != experimentalFeatureSettings.experimentalFeatures.get()) debug("Ignoring the client-specified experimental features"); } else if (name == settings.pluginFiles.name) { if (tokenizeString(value) != settings.pluginFiles.get()) @@ -401,21 +401,21 @@ static void performOp(TunnelLogger * logger, ref store, logger->startWork(); auto pathInfo = [&]() { // NB: FramedSource must be out of scope before logger->stopWork(); - ContentAddressMethod contentAddressMethod = parseContentAddressMethod(camStr); + ContentAddressMethod contentAddressMethod = ContentAddressMethod::parse(camStr); FramedSource source(from); // TODO this is essentially RemoteStore::addCAToStore. Move it up to Store. return std::visit(overloaded { - [&](TextHashMethod &) { + [&](const TextHashMethod &) { // We could stream this by changing Store std::string contents = source.drain(); auto path = store->addTextToStore(name, contents, refs, repair); return store->queryPathInfo(path); }, - [&](FixedOutputHashMethod & fohm) { + [&](const FixedOutputHashMethod & fohm) { auto path = store->addToStoreFromDump(source, name, fohm.fileIngestionMethod, fohm.hashType, repair, refs); return store->queryPathInfo(path); }, - }, contentAddressMethod); + }, contentAddressMethod.raw); }(); logger->stopWork(); @@ -637,7 +637,10 @@ static void performOp(TunnelLogger * logger, ref store, to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime; } if (GET_PROTOCOL_MINOR(clientVersion) >= 28) { - worker_proto::write(*store, to, res.builtOutputs); + DrvOutputs builtOutputs; + for (auto & [output, realisation] : res.builtOutputs) + builtOutputs.insert_or_assign(realisation.id, realisation); + worker_proto::write(*store, to, builtOutputs); } break; } @@ -880,7 +883,7 @@ static void performOp(TunnelLogger * logger, ref store, info.references = worker_proto::read(*store, from, Phantom {}); from >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(from); - info.ca = parseContentAddressOpt(readString(from)); + info.ca = ContentAddress::parseOpt(readString(from)); from >> repair >> dontCheckSigs; if (!trusted && dontCheckSigs) dontCheckSigs = false; @@ -1032,6 +1035,15 @@ void processConnection( if (GET_PROTOCOL_MINOR(clientVersion) >= 33) to << nixVersion; + if (GET_PROTOCOL_MINOR(clientVersion) >= 35) { + // We and the underlying store both need to trust the client for + // it to be trusted. + auto temp = trusted + ? store->isTrustedClient() + : std::optional { NotTrusted }; + worker_proto::write(*store, to, temp); + } + /* Send startup error messages to the client. */ tunnelLogger->startWork(); @@ -1055,6 +1067,8 @@ void processConnection( opCount++; + debug("performing daemon worker op: %d", op); + try { performOp(tunnelLogger, store, trusted, recursive, clientVersion, from, to, op); } catch (Error & e) { diff --git a/src/libstore/daemon.hh b/src/libstore/daemon.hh index 8c765615c..1964c0d99 100644 --- a/src/libstore/daemon.hh +++ b/src/libstore/daemon.hh @@ -1,11 +1,11 @@ #pragma once +///@file #include "serialise.hh" #include "store-api.hh" namespace nix::daemon { -enum TrustedFlag : bool { NotTrusted = false, Trusted = true }; enum RecursiveFlag : bool { NotRecursive = false, Recursive = true }; void processConnection( diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index 05dc9a3cc..15f3908ed 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -36,8 +36,8 @@ std::optional DerivationOutput::path(const Store & store, std::string StorePath DerivationOutput::CAFixed::path(const Store & store, std::string_view drvName, std::string_view outputName) const { return store.makeFixedOutputPath( - hash.method, hash.hash, - outputPathName(drvName, outputName)); + outputPathName(drvName, outputName), + { hash, {} }); } @@ -221,7 +221,7 @@ static DerivationOutput parseDerivationOutput(const Store & store, } const auto hashType = parseHashType(hashAlgo); if (hash == "impure") { - settings.requireExperimentalFeature(Xp::ImpureDerivations); + experimentalFeatureSettings.require(Xp::ImpureDerivations); assert(pathS == ""); return DerivationOutput::Impure { .method = std::move(method), @@ -236,7 +236,7 @@ static DerivationOutput parseDerivationOutput(const Store & store, }, }; } else { - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); assert(pathS == ""); return DerivationOutput::CAFloating { .method = std::move(method), @@ -313,6 +313,15 @@ Derivation parseDerivation(const Store & store, std::string && s, std::string_vi } +/** + * Print a derivation string literal to an `std::string`. + * + * This syntax does not generalize to the expression language, which needs to + * escape `$`. + * + * @param res Where to print to + * @param s Which logical string to print + */ static void printString(std::string & res, std::string_view s) { boost::container::small_vector buffer; @@ -889,6 +898,67 @@ std::optional Derivation::tryResolve( return resolved; } + +void Derivation::checkInvariants(Store & store, const StorePath & drvPath) const +{ + assert(drvPath.isDerivation()); + std::string drvName(drvPath.name()); + drvName = drvName.substr(0, drvName.size() - drvExtension.size()); + + if (drvName != name) { + throw Error("Derivation '%s' has name '%s' which does not match its path", store.printStorePath(drvPath), name); + } + + auto envHasRightPath = [&](const StorePath & actual, const std::string & varName) + { + auto j = env.find(varName); + if (j == env.end() || store.parseStorePath(j->second) != actual) + throw Error("derivation '%s' has incorrect environment variable '%s', should be '%s'", + store.printStorePath(drvPath), varName, store.printStorePath(actual)); + }; + + + // Don't need the answer, but do this anyways to assert is proper + // combination. The code below is more general and naturally allows + // combinations that are currently prohibited. + type(); + + std::optional hashesModulo; + for (auto & i : outputs) { + std::visit(overloaded { + [&](const DerivationOutput::InputAddressed & doia) { + if (!hashesModulo) { + // somewhat expensive so we do lazily + hashesModulo = hashDerivationModulo(store, *this, true); + } + auto currentOutputHash = get(hashesModulo->hashes, i.first); + if (!currentOutputHash) + throw Error("derivation '%s' has unexpected output '%s' (local-store / hashesModulo) named '%s'", + store.printStorePath(drvPath), store.printStorePath(doia.path), i.first); + StorePath recomputed = store.makeOutputPath(i.first, *currentOutputHash, drvName); + if (doia.path != recomputed) + throw Error("derivation '%s' has incorrect output '%s', should be '%s'", + store.printStorePath(drvPath), store.printStorePath(doia.path), store.printStorePath(recomputed)); + envHasRightPath(doia.path, i.first); + }, + [&](const DerivationOutput::CAFixed & dof) { + StorePath path = store.makeFixedOutputPath(drvName, { dof.hash, {} }); + envHasRightPath(path, i.first); + }, + [&](const DerivationOutput::CAFloating &) { + /* Nothing to check */ + }, + [&](const DerivationOutput::Deferred &) { + /* Nothing to check */ + }, + [&](const DerivationOutput::Impure &) { + /* Nothing to check */ + }, + }, i.second.raw()); + } +} + + const Hash impureOutputHash = hashString(htSHA256, "impure"); nlohmann::json DerivationOutput::toJSON( @@ -916,10 +986,82 @@ nlohmann::json DerivationOutput::toJSON( return res; } + +DerivationOutput DerivationOutput::fromJSON( + const Store & store, std::string_view drvName, std::string_view outputName, + const nlohmann::json & _json, + const ExperimentalFeatureSettings & xpSettings) +{ + std::set keys; + auto json = (std::map) _json; + + for (const auto & [key, _] : json) + keys.insert(key); + + auto methodAlgo = [&]() -> std::pair { + std::string hashAlgo = json["hashAlgo"]; + auto method = FileIngestionMethod::Flat; + if (hashAlgo.substr(0, 2) == "r:") { + method = FileIngestionMethod::Recursive; + hashAlgo = hashAlgo.substr(2); + } + auto hashType = parseHashType(hashAlgo); + return { method, hashType }; + }; + + if (keys == (std::set { "path" })) { + return DerivationOutput::InputAddressed { + .path = store.parseStorePath((std::string) json["path"]), + }; + } + + else if (keys == (std::set { "path", "hashAlgo", "hash" })) { + auto [method, hashType] = methodAlgo(); + auto dof = DerivationOutput::CAFixed { + .hash = { + .method = method, + .hash = Hash::parseNonSRIUnprefixed((std::string) json["hash"], hashType), + }, + }; + if (dof.path(store, drvName, outputName) != store.parseStorePath((std::string) json["path"])) + throw Error("Path doesn't match derivation output"); + return dof; + } + + else if (keys == (std::set { "hashAlgo" })) { + xpSettings.require(Xp::CaDerivations); + auto [method, hashType] = methodAlgo(); + return DerivationOutput::CAFloating { + .method = method, + .hashType = hashType, + }; + } + + else if (keys == (std::set { })) { + return DerivationOutput::Deferred {}; + } + + else if (keys == (std::set { "hashAlgo", "impure" })) { + xpSettings.require(Xp::ImpureDerivations); + auto [method, hashType] = methodAlgo(); + return DerivationOutput::Impure { + .method = method, + .hashType = hashType, + }; + } + + else { + throw Error("invalid JSON for derivation output"); + } +} + + nlohmann::json Derivation::toJSON(const Store & store) const { nlohmann::json res = nlohmann::json::object(); + res["name"] = name; + { nlohmann::json & outputsObj = res["outputs"]; outputsObj = nlohmann::json::object(); @@ -950,4 +1092,43 @@ nlohmann::json Derivation::toJSON(const Store & store) const return res; } + +Derivation Derivation::fromJSON( + const Store & store, + const nlohmann::json & json) +{ + Derivation res; + + res.name = json["name"]; + + { + auto & outputsObj = json["outputs"]; + for (auto & [outputName, output] : outputsObj.items()) { + res.outputs.insert_or_assign( + outputName, + DerivationOutput::fromJSON(store, res.name, outputName, output)); + } + } + + { + auto & inputsList = json["inputSrcs"]; + for (auto & input : inputsList) + res.inputSrcs.insert(store.parseStorePath(static_cast(input))); + } + + { + auto & inputDrvsObj = json["inputDrvs"]; + for (auto & [inputDrvPath, inputOutputs] : inputDrvsObj.items()) + res.inputDrvs[store.parseStorePath(inputDrvPath)] = + static_cast(inputOutputs); + } + + res.platform = json["system"]; + res.builder = json["builder"]; + res.args = json["args"]; + res.env = json["env"]; + + return res; +} + } diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh index 8456b29e7..d00b23b6d 100644 --- a/src/libstore/derivations.hh +++ b/src/libstore/derivations.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "path.hh" #include "types.hh" @@ -6,6 +7,7 @@ #include "content-address.hh" #include "repair-flag.hh" #include "sync.hh" +#include "comparator.hh" #include #include @@ -17,43 +19,83 @@ class Store; /* Abstract syntax of derivations. */ -/* The traditional non-fixed-output derivation type. */ +/** + * The traditional non-fixed-output derivation type. + */ struct DerivationOutputInputAddressed { StorePath path; + + GENERATE_CMP(DerivationOutputInputAddressed, me->path); }; -/* Fixed-output derivations, whose output paths are content addressed - according to that fixed output. */ +/** + * Fixed-output derivations, whose output paths are content + * addressed according to that fixed output. + */ struct DerivationOutputCAFixed { - FixedOutputHash hash; /* hash used for expected hash computation */ + /** + * hash used for expected hash computation + */ + FixedOutputHash hash; + + /** + * Return the \ref StorePath "store path" corresponding to this output + * + * @param drvName The name of the derivation this is an output of, without the `.drv`. + * @param outputName The name of this output. + */ StorePath path(const Store & store, std::string_view drvName, std::string_view outputName) const; + + GENERATE_CMP(DerivationOutputCAFixed, me->hash); }; -/* Floating-output derivations, whose output paths are content addressed, but - not fixed, and so are dynamically calculated from whatever the output ends - up being. */ +/** + * Floating-output derivations, whose output paths are content + * addressed, but not fixed, and so are dynamically calculated from + * whatever the output ends up being. + * */ struct DerivationOutputCAFloating { - /* information used for expected hash computation */ + /** + * How the file system objects will be serialized for hashing + */ FileIngestionMethod method; + + /** + * How the serialization will be hashed + */ HashType hashType; + + GENERATE_CMP(DerivationOutputCAFloating, me->method, me->hashType); }; -/* Input-addressed output which depends on a (CA) derivation whose hash isn't - * known yet. +/** + * Input-addressed output which depends on a (CA) derivation whose hash + * isn't known yet. */ -struct DerivationOutputDeferred {}; +struct DerivationOutputDeferred { + GENERATE_CMP(DerivationOutputDeferred); +}; -/* Impure output which is moved to a content-addressed location (like - CAFloating) but isn't registered as a realization. +/** + * Impure output which is moved to a content-addressed location (like + * CAFloating) but isn't registered as a realization. */ struct DerivationOutputImpure { - /* information used for expected hash computation */ + /** + * How the file system objects will be serialized for hashing + */ FileIngestionMethod method; + + /** + * How the serialization will be hashed + */ HashType hashType; + + GENERATE_CMP(DerivationOutputImpure, me->method, me->hashType); }; typedef std::variant< @@ -64,6 +106,9 @@ typedef std::variant< DerivationOutputImpure > _DerivationOutputRaw; +/** + * A single output of a BasicDerivation (and Derivation). + */ struct DerivationOutput : _DerivationOutputRaw { using Raw = _DerivationOutputRaw; @@ -75,9 +120,12 @@ struct DerivationOutput : _DerivationOutputRaw using Deferred = DerivationOutputDeferred; using Impure = DerivationOutputImpure; - /* Note, when you use this function you should make sure that you're passing - the right derivation name. When in doubt, you should use the safer - interface provided by BasicDerivation::outputsAndOptPaths */ + /** + * \note when you use this function you should make sure that you're + * passing the right derivation name. When in doubt, you should use + * the safer interface provided by + * BasicDerivation::outputsAndOptPaths + */ std::optional path(const Store & store, std::string_view drvName, std::string_view outputName) const; inline const Raw & raw() const { @@ -88,30 +136,74 @@ struct DerivationOutput : _DerivationOutputRaw const Store & store, std::string_view drvName, std::string_view outputName) const; + /** + * @param xpSettings Stop-gap to avoid globals during unit tests. + */ + static DerivationOutput fromJSON( + const Store & store, + std::string_view drvName, + std::string_view outputName, + const nlohmann::json & json, + const ExperimentalFeatureSettings & xpSettings = experimentalFeatureSettings); }; typedef std::map DerivationOutputs; -/* These are analogues to the previous DerivationOutputs data type, but they - also contains, for each output, the (optional) store path in which it would - be written. To calculate values of these types, see the corresponding - functions in BasicDerivation */ +/** + * These are analogues to the previous DerivationOutputs data type, + * but they also contains, for each output, the (optional) store + * path in which it would be written. To calculate values of these + * types, see the corresponding functions in BasicDerivation. + */ typedef std::map>> DerivationOutputsAndOptPaths; -/* For inputs that are sub-derivations, we specify exactly which - output IDs we are interested in. */ +/** + * For inputs that are sub-derivations, we specify exactly which + * output IDs we are interested in. + */ typedef std::map DerivationInputs; +/** + * Input-addressed derivation types + */ struct DerivationType_InputAddressed { + /** + * True iff the derivation type can't be determined statically, + * for instance because it (transitively) depends on a content-addressed + * derivation. + */ bool deferred; }; +/** + * Content-addressed derivation types + */ struct DerivationType_ContentAddressed { + /** + * Whether the derivation should be built safely inside a sandbox. + */ bool sandboxed; + /** + * Whether the derivation's outputs' content-addresses are "fixed" + * or "floating. + * + * - Fixed: content-addresses are written down as part of the + * derivation itself. If the outputs don't end up matching the + * build fails. + * + * - Floating: content-addresses are not written down, we do not + * know them until we perform the build. + */ bool fixed; }; +/** + * Impure derivation type + * + * This is similar at buil-time to the content addressed, not standboxed, not fixed + * type, but has some restrictions on its usage. + */ struct DerivationType_Impure { }; @@ -128,30 +220,38 @@ struct DerivationType : _DerivationTypeRaw { using ContentAddressed = DerivationType_ContentAddressed; using Impure = DerivationType_Impure; - /* Do the outputs of the derivation have paths calculated from their content, - or from the derivation itself? */ + /** + * Do the outputs of the derivation have paths calculated from their + * content, or from the derivation itself? + */ bool isCA() const; - /* Is the content of the outputs fixed a-priori via a hash? Never true for - non-CA derivations. */ + /** + * Is the content of the outputs fixed a priori via a hash? + * Never true for non-CA derivations. + */ bool isFixed() const; - /* Whether the derivation is fully sandboxed. If false, the - sandbox is opened up, e.g. the derivation has access to the - network. Note that whether or not we actually sandbox the - derivation is controlled separately. Always true for non-CA - derivations. */ + /** + * Whether the derivation is fully sandboxed. If false, the sandbox + * is opened up, e.g. the derivation has access to the network. Note + * that whether or not we actually sandbox the derivation is + * controlled separately. Always true for non-CA derivations. + */ bool isSandboxed() const; - /* Whether the derivation is expected to produce the same result - every time, and therefore it only needs to be built once. This - is only false for derivations that have the attribute '__impure - = true'. */ + /** + * Whether the derivation is expected to produce the same result + * every time, and therefore it only needs to be built once. This is + * only false for derivations that have the attribute '__impure = + * true'. + */ bool isPure() const; - /* Does the derivation knows its own output paths? - Only true when there's no floating-ca derivation involved in the - closure, or if fixed output. + /** + * Does the derivation knows its own output paths? + * Only true when there's no floating-ca derivation involved in the + * closure, or if fixed output. */ bool hasKnownOutputPaths() const; @@ -162,8 +262,14 @@ struct DerivationType : _DerivationTypeRaw { struct BasicDerivation { - DerivationOutputs outputs; /* keyed on symbolic IDs */ - StorePathSet inputSrcs; /* inputs that are sources */ + /** + * keyed on symbolic IDs + */ + DerivationOutputs outputs; + /** + * inputs that are sources + */ + StorePathSet inputSrcs; std::string platform; Path builder; Strings args; @@ -175,128 +281,195 @@ struct BasicDerivation bool isBuiltin() const; - /* Return true iff this is a fixed-output derivation. */ + /** + * Return true iff this is a fixed-output derivation. + */ DerivationType type() const; - /* Return the output names of a derivation. */ + /** + * Return the output names of a derivation. + */ StringSet outputNames() const; - /* Calculates the maps that contains all the DerivationOutputs, but - augmented with knowledge of the Store paths they would be written - into. */ + /** + * Calculates the maps that contains all the DerivationOutputs, but + * augmented with knowledge of the Store paths they would be written + * into. + */ DerivationOutputsAndOptPaths outputsAndOptPaths(const Store & store) const; static std::string_view nameFromPath(const StorePath & storePath); + + GENERATE_CMP(BasicDerivation, + me->outputs, + me->inputSrcs, + me->platform, + me->builder, + me->args, + me->env, + me->name); }; struct Derivation : BasicDerivation { - DerivationInputs inputDrvs; /* inputs that are sub-derivations */ + /** + * inputs that are sub-derivations + */ + DerivationInputs inputDrvs; - /* Print a derivation. */ + /** + * Print a derivation. + */ std::string unparse(const Store & store, bool maskOutputs, std::map * actualInputs = nullptr) const; - /* Return the underlying basic derivation but with these changes: - - 1. Input drvs are emptied, but the outputs of them that were used are - added directly to input sources. - - 2. Input placeholders are replaced with realized input store paths. */ + /** + * Return the underlying basic derivation but with these changes: + * + * 1. Input drvs are emptied, but the outputs of them that were used + * are added directly to input sources. + * + * 2. Input placeholders are replaced with realized input store + * paths. + */ std::optional tryResolve(Store & store) const; - /* Like the above, but instead of querying the Nix database for - realisations, uses a given mapping from input derivation paths - + output names to actual output store paths. */ + /** + * Like the above, but instead of querying the Nix database for + * realisations, uses a given mapping from input derivation paths + + * output names to actual output store paths. + */ std::optional tryResolve( Store & store, const std::map, StorePath> & inputDrvOutputs) const; + /* Check that the derivation is valid and does not present any + illegal states. + + This is mainly a matter of checking the outputs, where our C++ + representation supports all sorts of combinations we do not yet + allow. */ + void checkInvariants(Store & store, const StorePath & drvPath) const; + Derivation() = default; Derivation(const BasicDerivation & bd) : BasicDerivation(bd) { } Derivation(BasicDerivation && bd) : BasicDerivation(std::move(bd)) { } nlohmann::json toJSON(const Store & store) const; + static Derivation fromJSON( + const Store & store, + const nlohmann::json & json); + + GENERATE_CMP(Derivation, + static_cast(*me), + me->inputDrvs); }; class Store; -/* Write a derivation to the Nix store, and return its path. */ +/** + * Write a derivation to the Nix store, and return its path. + */ StorePath writeDerivation(Store & store, const Derivation & drv, RepairFlag repair = NoRepair, bool readOnly = false); -/* Read a derivation from a file. */ +/** + * Read a derivation from a file. + */ Derivation parseDerivation(const Store & store, std::string && s, std::string_view name); -// FIXME: remove +/** + * \todo Remove. + * + * Use Path::isDerivation instead. + */ bool isDerivation(std::string_view fileName); -/* Calculate the name that will be used for the store path for this - output. - - This is usually -, but is just when - the output name is "out". */ +/** + * Calculate the name that will be used for the store path for this + * output. + * + * This is usually -, but is just when + * the output name is "out". + */ std::string outputPathName(std::string_view drvName, std::string_view outputName); -// The hashes modulo of a derivation. -// -// Each output is given a hash, although in practice only the content-addressed -// derivations (fixed-output or not) will have a different hash for each -// output. +/** + * The hashes modulo of a derivation. + * + * Each output is given a hash, although in practice only the content-addressed + * derivations (fixed-output or not) will have a different hash for each + * output. + */ struct DrvHash { + /** + * Map from output names to hashes + */ std::map hashes; enum struct Kind : bool { - // Statically determined derivations. - // This hash will be directly used to compute the output paths + /** + * Statically determined derivations. + * This hash will be directly used to compute the output paths + */ Regular, - // Floating-output derivations (and their reverse dependencies). + + /** + * Floating-output derivations (and their reverse dependencies). + */ Deferred, }; + /** + * The kind of derivation this is, simplified for just "derivation hash + * modulo" purposes. + */ Kind kind; }; void operator |= (DrvHash::Kind & self, const DrvHash::Kind & other) noexcept; -/* Returns hashes with the details of fixed-output subderivations - expunged. - - A fixed-output derivation is a derivation whose outputs have a - specified content hash and hash algorithm. (Currently they must have - exactly one output (`out'), which is specified using the `outputHash' - and `outputHashAlgo' attributes, but the algorithm doesn't assume - this.) We don't want changes to such derivations to propagate upwards - through the dependency graph, changing output paths everywhere. - - For instance, if we change the url in a call to the `fetchurl' - function, we do not want to rebuild everything depending on it---after - all, (the hash of) the file being downloaded is unchanged. So the - *output paths* should not change. On the other hand, the *derivation - paths* should change to reflect the new dependency graph. - - For fixed-output derivations, this returns a map from the name of - each output to its hash, unique up to the output's contents. - - For regular derivations, it returns a single hash of the derivation - ATerm, after subderivations have been likewise expunged from that - derivation. +/** + * Returns hashes with the details of fixed-output subderivations + * expunged. + * + * A fixed-output derivation is a derivation whose outputs have a + * specified content hash and hash algorithm. (Currently they must have + * exactly one output (`out`), which is specified using the `outputHash` + * and `outputHashAlgo` attributes, but the algorithm doesn't assume + * this.) We don't want changes to such derivations to propagate upwards + * through the dependency graph, changing output paths everywhere. + * + * For instance, if we change the url in a call to the `fetchurl` + * function, we do not want to rebuild everything depending on it---after + * all, (the hash of) the file being downloaded is unchanged. So the + * *output paths* should not change. On the other hand, the *derivation + * paths* should change to reflect the new dependency graph. + * + * For fixed-output derivations, this returns a map from the name of + * each output to its hash, unique up to the output's contents. + * + * For regular derivations, it returns a single hash of the derivation + * ATerm, after subderivations have been likewise expunged from that + * derivation. */ DrvHash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutputs); -/* - Return a map associating each output to a hash that uniquely identifies its - derivation (modulo the self-references). - - FIXME: what is the Hash in this map? +/** + * Return a map associating each output to a hash that uniquely identifies its + * derivation (modulo the self-references). + * + * \todo What is the Hash in this map? */ std::map staticOutputHashes(Store & store, const Derivation & drv); -/* Memoisation of hashDerivationModulo(). */ +/** + * Memoisation of hashDerivationModulo(). + */ typedef std::map DrvHashes; // FIXME: global, though at least thread-safe. @@ -308,21 +481,25 @@ struct Sink; Source & readDerivation(Source & in, const Store & store, BasicDerivation & drv, std::string_view name); void writeDerivation(Sink & out, const Store & store, const BasicDerivation & drv); -/* This creates an opaque and almost certainly unique string - deterministically from the output name. - - It is used as a placeholder to allow derivations to refer to their - own outputs without needing to use the hash of a derivation in - itself, making the hash near-impossible to calculate. */ +/** + * This creates an opaque and almost certainly unique string + * deterministically from the output name. + * + * It is used as a placeholder to allow derivations to refer to their + * own outputs without needing to use the hash of a derivation in + * itself, making the hash near-impossible to calculate. + */ std::string hashPlaceholder(const std::string_view outputName); -/* This creates an opaque and almost certainly unique string - deterministically from a derivation path and output name. - - It is used as a placeholder to allow derivations to refer to - content-addressed paths whose content --- and thus the path - themselves --- isn't yet known. This occurs when a derivation has a - dependency which is a CA derivation. */ +/** + * This creates an opaque and almost certainly unique string + * deterministically from a derivation path and output name. + * + * It is used as a placeholder to allow derivations to refer to + * content-addressed paths whose content --- and thus the path + * themselves --- isn't yet known. This occurs when a derivation has a + * dependency which is a CA derivation. + */ std::string downstreamPlaceholder(const Store & store, const StorePath & drvPath, std::string_view outputName); extern const Hash impureOutputHash; diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc index e0d86a42f..9a2ffda39 100644 --- a/src/libstore/derived-path.cc +++ b/src/libstore/derived-path.cc @@ -62,15 +62,31 @@ std::string DerivedPath::Opaque::to_string(const Store & store) const std::string DerivedPath::Built::to_string(const Store & store) const { return store.printStorePath(drvPath) - + "!" + + '^' + + outputs.to_string(); +} + +std::string DerivedPath::Built::to_string_legacy(const Store & store) const +{ + return store.printStorePath(drvPath) + + '!' + outputs.to_string(); } std::string DerivedPath::to_string(const Store & store) const { - return std::visit( - [&](const auto & req) { return req.to_string(store); }, - this->raw()); + return std::visit(overloaded { + [&](const DerivedPath::Built & req) { return req.to_string(store); }, + [&](const DerivedPath::Opaque & req) { return req.to_string(store); }, + }, this->raw()); +} + +std::string DerivedPath::to_string_legacy(const Store & store) const +{ + return std::visit(overloaded { + [&](const DerivedPath::Built & req) { return req.to_string_legacy(store); }, + [&](const DerivedPath::Opaque & req) { return req.to_string(store); }, + }, this->raw()); } @@ -87,14 +103,24 @@ DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_vi }; } -DerivedPath DerivedPath::parse(const Store & store, std::string_view s) +static inline DerivedPath parseWith(const Store & store, std::string_view s, std::string_view separator) { - size_t n = s.find("!"); + size_t n = s.find(separator); return n == s.npos ? (DerivedPath) DerivedPath::Opaque::parse(store, s) : (DerivedPath) DerivedPath::Built::parse(store, s.substr(0, n), s.substr(n + 1)); } +DerivedPath DerivedPath::parse(const Store & store, std::string_view s) +{ + return parseWith(store, s, "^"); +} + +DerivedPath DerivedPath::parseLegacy(const Store & store, std::string_view s) +{ + return parseWith(store, s, "!"); +} + RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const { RealisedPath::Set res; @@ -105,7 +131,7 @@ RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const auto drvHashes = staticOutputHashes(store, store.readDerivation(p.drvPath)); for (auto& [outputName, outputPath] : p.outputs) { - if (settings.isExperimentalFeatureEnabled( + if (experimentalFeatureSettings.isEnabled( Xp::CaDerivations)) { auto drvOutput = get(drvHashes, outputName); if (!drvOutput) diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh index 9e0cce377..5f7acbebc 100644 --- a/src/libstore/derived-path.hh +++ b/src/libstore/derived-path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "util.hh" #include "path.hh" @@ -47,8 +48,18 @@ struct DerivedPathBuilt { StorePath drvPath; OutputsSpec outputs; + /** + * Uses `^` as the separator + */ std::string to_string(const Store & store) const; - static DerivedPathBuilt parse(const Store & store, std::string_view, std::string_view); + /** + * Uses `!` as the separator + */ + std::string to_string_legacy(const Store & store) const; + /** + * The caller splits on the separator, so it works for both variants. + */ + static DerivedPathBuilt parse(const Store & store, std::string_view drvPath, std::string_view outputs); nlohmann::json toJSON(ref store) const; GENERATE_CMP(DerivedPathBuilt, me->drvPath, me->outputs); @@ -80,8 +91,22 @@ struct DerivedPath : _DerivedPathRaw { return static_cast(*this); } + /** + * Uses `^` as the separator + */ std::string to_string(const Store & store) const; + /** + * Uses `!` as the separator + */ + std::string to_string_legacy(const Store & store) const; + /** + * Uses `^` as the separator + */ static DerivedPath parse(const Store & store, std::string_view); + /** + * Uses `!` as the separator + */ + static DerivedPath parseLegacy(const Store & store, std::string_view); }; /** @@ -105,7 +130,7 @@ using _BuiltPathRaw = std::variant< >; /** - * A built path. Similar to a `DerivedPath`, but enriched with the corresponding + * A built path. Similar to a DerivedPath, but enriched with the corresponding * output path(s). */ struct BuiltPath : _BuiltPathRaw { diff --git a/src/libstore/dummy-store.cc b/src/libstore/dummy-store.cc index b4fbe0b70..74d6ed3b5 100644 --- a/src/libstore/dummy-store.cc +++ b/src/libstore/dummy-store.cc @@ -7,6 +7,13 @@ struct DummyStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; const std::string name() override { return "Dummy Store"; } + + std::string doc() override + { + return + #include "dummy-store.md" + ; + } }; struct DummyStore : public virtual DummyStoreConfig, public virtual Store @@ -32,6 +39,14 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store callback(nullptr); } + /** + * The dummy store is incapable of *not* trusting! :) + */ + virtual std::optional isTrustedClient() override + { + return Trusted; + } + static std::set uriSchemes() { return {"dummy"}; } @@ -56,6 +71,9 @@ struct DummyStore : public virtual DummyStoreConfig, public virtual Store void queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override { callback(nullptr); } + + virtual ref getFSAccessor() override + { unsupported("getFSAccessor"); } }; static RegisterStoreImplementation regDummyStore; diff --git a/src/libstore/dummy-store.md b/src/libstore/dummy-store.md new file mode 100644 index 000000000..eb7b4ba0d --- /dev/null +++ b/src/libstore/dummy-store.md @@ -0,0 +1,13 @@ +R"( + +**Store URL format**: `dummy://` + +This store type represents a store that contains no store paths and +cannot be written to. It's useful when you want to use the Nix +evaluator when no actual Nix store exists, e.g. + +```console +# nix eval --store dummy:// --expr '1 + 2' +``` + +)" diff --git a/src/libstore/export-import.cc b/src/libstore/export-import.cc index 9875da909..4eb838b68 100644 --- a/src/libstore/export-import.cc +++ b/src/libstore/export-import.cc @@ -16,7 +16,7 @@ void Store::exportPaths(const StorePathSet & paths, Sink & sink) //logger->incExpected(doneLabel, sorted.size()); for (auto & path : sorted) { - //Activity act(*logger, lvlInfo, format("exporting path '%s'") % path); + //Activity act(*logger, lvlInfo, "exporting path '%s'", path); sink << 1; exportPath(path, sink); //logger->incProgress(doneLabel); @@ -71,7 +71,7 @@ StorePaths Store::importPaths(Source & source, CheckSigsFlag checkSigs) auto path = parseStorePath(readString(source)); - //Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path); + //Activity act(*logger, lvlInfo, "importing path '%s'", info.path); auto references = worker_proto::read(*this, source, Phantom {}); auto deriver = readString(source); diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index b25089ec3..2346accbe 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -88,6 +88,10 @@ struct curlFileTransfer : public FileTransfer {request.uri}, request.parentAct) , callback(std::move(callback)) , finalSink([this](std::string_view data) { + if (errorSink) { + (*errorSink)(data); + } + if (this->request.dataCallback) { auto httpStatus = getHTTPStatus(); @@ -163,8 +167,6 @@ struct curlFileTransfer : public FileTransfer } } - if (errorSink) - (*errorSink)({(char *) contents, realSize}); (*decompressionSink)({(char *) contents, realSize}); return realSize; @@ -183,7 +185,7 @@ struct curlFileTransfer : public FileTransfer { size_t realSize = size * nmemb; std::string line((char *) contents, realSize); - printMsg(lvlVomit, format("got header for '%s': %s") % request.uri % trim(line)); + printMsg(lvlVomit, "got header for '%s': %s", request.uri, trim(line)); static std::regex statusLine("HTTP/[^ ]+ +[0-9]+(.*)", std::regex::extended | std::regex::icase); std::smatch match; if (std::regex_match(line, match, statusLine)) { @@ -207,7 +209,7 @@ struct curlFileTransfer : public FileTransfer long httpStatus = 0; curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus); if (result.etag == request.expectedETag && httpStatus == 200) { - debug(format("shutting down on 200 HTTP response with expected ETag")); + debug("shutting down on 200 HTTP response with expected ETag"); return 0; } } else if (name == "content-encoding") @@ -316,7 +318,7 @@ struct curlFileTransfer : public FileTransfer if (request.verifyTLS) { if (settings.caFile != "") - curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str()); + curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.get().c_str()); } else { curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0); curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0); @@ -405,6 +407,10 @@ struct curlFileTransfer : public FileTransfer err = Misc; } else { // Don't bother retrying on certain cURL errors either + + // Allow selecting a subset of enum values + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wswitch-enum" switch (code) { case CURLE_FAILED_INIT: case CURLE_URL_MALFORMAT: @@ -425,6 +431,7 @@ struct curlFileTransfer : public FileTransfer default: // Shut up warnings break; } + #pragma GCC diagnostic pop } attempt++; @@ -829,7 +836,7 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink) { auto state(_state->lock()); - while (state->data.empty()) { + if (state->data.empty()) { if (state->quit) { if (state->exc) std::rethrow_exception(state->exc); @@ -837,6 +844,8 @@ void FileTransfer::download(FileTransferRequest && request, Sink & sink) } state.wait(state->avail); + + if (state->data.empty()) continue; } chunk = std::move(state->data); diff --git a/src/libstore/filetransfer.hh b/src/libstore/filetransfer.hh index 07d58f53a..378c6ff78 100644 --- a/src/libstore/filetransfer.hh +++ b/src/libstore/filetransfer.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "hash.hh" @@ -87,39 +88,56 @@ struct FileTransfer { virtual ~FileTransfer() { } - /* Enqueue a data transfer request, returning a future to the result of - the download. The future may throw a FileTransferError - exception. */ + /** + * Enqueue a data transfer request, returning a future to the result of + * the download. The future may throw a FileTransferError + * exception. + */ virtual void enqueueFileTransfer(const FileTransferRequest & request, Callback callback) = 0; std::future enqueueFileTransfer(const FileTransferRequest & request); - /* Synchronously download a file. */ + /** + * Synchronously download a file. + */ FileTransferResult download(const FileTransferRequest & request); - /* Synchronously upload a file. */ + /** + * Synchronously upload a file. + */ FileTransferResult upload(const FileTransferRequest & request); - /* Download a file, writing its data to a sink. The sink will be - invoked on the thread of the caller. */ + /** + * Download a file, writing its data to a sink. The sink will be + * invoked on the thread of the caller. + */ void download(FileTransferRequest && request, Sink & sink); enum Error { NotFound, Forbidden, Misc, Transient, Interrupted }; }; -/* Return a shared FileTransfer object. Using this object is preferred - because it enables connection reuse and HTTP/2 multiplexing. */ +/** + * @return a shared FileTransfer object. + * + * Using this object is preferred because it enables connection reuse + * and HTTP/2 multiplexing. + */ ref getFileTransfer(); -/* Return a new FileTransfer object. */ +/** + * @return a new FileTransfer object + * + * Prefer getFileTransfer() to this; see its docs for why. + */ ref makeFileTransfer(); class FileTransferError : public Error { public: FileTransfer::Error error; - std::optional response; // intentionally optional + /// intentionally optional + std::optional response; template FileTransferError(FileTransfer::Error error, std::optional response, const Args & ... args); diff --git a/src/libstore/fs-accessor.hh b/src/libstore/fs-accessor.hh index c825e84f2..1df19e647 100644 --- a/src/libstore/fs-accessor.hh +++ b/src/libstore/fs-accessor.hh @@ -1,11 +1,14 @@ #pragma once +///@file #include "types.hh" namespace nix { -/* An abstract class for accessing a filesystem-like structure, such - as a (possibly remote) Nix store or the contents of a NAR file. */ +/** + * An abstract class for accessing a filesystem-like structure, such + * as a (possibly remote) Nix store or the contents of a NAR file. + */ class FSAccessor { public: @@ -14,8 +17,17 @@ public: struct Stat { Type type = tMissing; - uint64_t fileSize = 0; // regular files only + /** + * regular files only + */ + uint64_t fileSize = 0; + /** + * regular files only + */ bool isExecutable = false; // regular files only + /** + * regular files only + */ uint64_t narOffset = 0; // regular files only }; diff --git a/src/libstore/gc-store.hh b/src/libstore/gc-store.hh index b3cbbad74..2c26c65c4 100644 --- a/src/libstore/gc-store.hh +++ b/src/libstore/gc-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" @@ -11,19 +12,20 @@ typedef std::unordered_map> Roots; struct GCOptions { - /* Garbage collector operation: - - - `gcReturnLive': return the set of paths reachable from - (i.e. in the closure of) the roots. - - - `gcReturnDead': return the set of paths not reachable from - the roots. - - - `gcDeleteDead': actually delete the latter set. - - - `gcDeleteSpecific': delete the paths listed in - `pathsToDelete', insofar as they are not reachable. - */ + /** + * Garbage collector operation: + * + * - `gcReturnLive`: return the set of paths reachable from + * (i.e. in the closure of) the roots. + * + * - `gcReturnDead`: return the set of paths not reachable from + * the roots. + * + * - `gcDeleteDead`: actually delete the latter set. + * + * - `gcDeleteSpecific`: delete the paths listed in + * `pathsToDelete`, insofar as they are not reachable. + */ typedef enum { gcReturnLive, gcReturnDead, @@ -33,28 +35,38 @@ struct GCOptions GCAction action{gcDeleteDead}; - /* If `ignoreLiveness' is set, then reachability from the roots is - ignored (dangerous!). However, the paths must still be - unreferenced *within* the store (i.e., there can be no other - store paths that depend on them). */ + /** + * If `ignoreLiveness` is set, then reachability from the roots is + * ignored (dangerous!). However, the paths must still be + * unreferenced *within* the store (i.e., there can be no other + * store paths that depend on them). + */ bool ignoreLiveness{false}; - /* For `gcDeleteSpecific', the paths to delete. */ + /** + * For `gcDeleteSpecific`, the paths to delete. + */ StorePathSet pathsToDelete; - /* Stop after at least `maxFreed' bytes have been freed. */ + /** + * Stop after at least `maxFreed` bytes have been freed. + */ uint64_t maxFreed{std::numeric_limits::max()}; }; struct GCResults { - /* Depending on the action, the GC roots, or the paths that would - be or have been deleted. */ + /** + * Depending on the action, the GC roots, or the paths that would + * be or have been deleted. + */ PathSet paths; - /* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the - number of bytes that would be or was freed. */ + /** + * For `gcReturnDead`, `gcDeleteDead` and `gcDeleteSpecific`, the + * number of bytes that would be or was freed. + */ uint64_t bytesFreed = 0; }; @@ -63,21 +75,27 @@ struct GcStore : public virtual Store { inline static std::string operationName = "Garbage collection"; - /* Add an indirect root, which is merely a symlink to `path' from - /nix/var/nix/gcroots/auto/. `path' is supposed - to be a symlink to a store path. The garbage collector will - automatically remove the indirect root when it finds that - `path' has disappeared. */ + /** + * Add an indirect root, which is merely a symlink to `path` from + * `/nix/var/nix/gcroots/auto/`. `path` is supposed + * to be a symlink to a store path. The garbage collector will + * automatically remove the indirect root when it finds that + * `path` has disappeared. + */ virtual void addIndirectRoot(const Path & path) = 0; - /* Find the roots of the garbage collector. Each root is a pair - (link, storepath) where `link' is the path of the symlink - outside of the Nix store that point to `storePath'. If - 'censor' is true, privacy-sensitive information about roots - found in /proc is censored. */ + /** + * Find the roots of the garbage collector. Each root is a pair + * `(link, storepath)` where `link` is the path of the symlink + * outside of the Nix store that point to `storePath`. If + * `censor` is true, privacy-sensitive information about roots + * found in `/proc` is censored. + */ virtual Roots findRoots(bool censor) = 0; - /* Perform a garbage collection. */ + /** + * Perform a garbage collection. + */ virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0; }; diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index 996f26a95..0038ec802 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -34,8 +34,7 @@ static void makeSymlink(const Path & link, const Path & target) createDirs(dirOf(link)); /* Create the new symlink. */ - Path tempLink = (format("%1%.tmp-%2%-%3%") - % link % getpid() % random()).str(); + Path tempLink = fmt("%1%.tmp-%2%-%3%", link, getpid(), random()); createSymlink(target, tempLink); /* Atomically replace the old one. */ @@ -197,7 +196,7 @@ void LocalStore::findTempRoots(Roots & tempRoots, bool censor) pid_t pid = std::stoi(i.name); - debug(format("reading temporary root file '%1%'") % path); + debug("reading temporary root file '%1%'", path); AutoCloseFD fd(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)); if (!fd) { /* It's okay if the file has disappeared. */ @@ -263,7 +262,7 @@ void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots) target = absPath(target, dirOf(path)); if (!pathExists(target)) { if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) { - printInfo(format("removing stale link from '%1%' to '%2%'") % path % target); + printInfo("removing stale link from '%1%' to '%2%'", path, target); unlink(path.c_str()); } } else { @@ -372,29 +371,29 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor) while (errno = 0, ent = readdir(procDir.get())) { checkInterrupt(); if (std::regex_match(ent->d_name, digitsRegex)) { - readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked); - readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked); - - auto fdStr = fmt("/proc/%s/fd", ent->d_name); - auto fdDir = AutoCloseDir(opendir(fdStr.c_str())); - if (!fdDir) { - if (errno == ENOENT || errno == EACCES) - continue; - throw SysError("opening %1%", fdStr); - } - struct dirent * fd_ent; - while (errno = 0, fd_ent = readdir(fdDir.get())) { - if (fd_ent->d_name[0] != '.') - readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked); - } - if (errno) { - if (errno == ESRCH) - continue; - throw SysError("iterating /proc/%1%/fd", ent->d_name); - } - fdDir.reset(); - try { + readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked); + readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked); + + auto fdStr = fmt("/proc/%s/fd", ent->d_name); + auto fdDir = AutoCloseDir(opendir(fdStr.c_str())); + if (!fdDir) { + if (errno == ENOENT || errno == EACCES) + continue; + throw SysError("opening %1%", fdStr); + } + struct dirent * fd_ent; + while (errno = 0, fd_ent = readdir(fdDir.get())) { + if (fd_ent->d_name[0] != '.') + readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked); + } + if (errno) { + if (errno == ESRCH) + continue; + throw SysError("iterating /proc/%1%/fd", ent->d_name); + } + fdDir.reset(); + auto mapFile = fmt("/proc/%s/maps", ent->d_name); auto mapLines = tokenizeString>(readFile(mapFile), "\n"); for (const auto & line : mapLines) { @@ -863,7 +862,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results) continue; } - printMsg(lvlTalkative, format("deleting unused link '%1%'") % path); + printMsg(lvlTalkative, "deleting unused link '%1%'", path); if (unlink(path.c_str()) == -1) throw SysError("deleting '%1%'", path); diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 6f21aff06..4c66d08ee 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -7,12 +7,23 @@ #include #include +#include #include #include #include #include +#include + +#ifdef __GLIBC__ +#include +#include +#include +#endif + +#include "config-impl.hh" + namespace nix { @@ -30,28 +41,22 @@ static GlobalConfig::Register rSettings(&settings); Settings::Settings() : nixPrefix(NIX_PREFIX) - , nixStore(canonPath(getEnv("NIX_STORE_DIR").value_or(getEnv("NIX_STORE").value_or(NIX_STORE_DIR)))) - , nixDataDir(canonPath(getEnv("NIX_DATA_DIR").value_or(NIX_DATA_DIR))) - , nixLogDir(canonPath(getEnv("NIX_LOG_DIR").value_or(NIX_LOG_DIR))) - , nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR))) - , nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR))) + , nixStore(canonPath(getEnvNonEmpty("NIX_STORE_DIR").value_or(getEnvNonEmpty("NIX_STORE").value_or(NIX_STORE_DIR)))) + , nixDataDir(canonPath(getEnvNonEmpty("NIX_DATA_DIR").value_or(NIX_DATA_DIR))) + , nixLogDir(canonPath(getEnvNonEmpty("NIX_LOG_DIR").value_or(NIX_LOG_DIR))) + , nixStateDir(canonPath(getEnvNonEmpty("NIX_STATE_DIR").value_or(NIX_STATE_DIR))) + , nixConfDir(canonPath(getEnvNonEmpty("NIX_CONF_DIR").value_or(NIX_CONF_DIR))) , nixUserConfFiles(getUserConfigFiles()) - , nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR))) + , nixBinDir(canonPath(getEnvNonEmpty("NIX_BIN_DIR").value_or(NIX_BIN_DIR))) , nixManDir(canonPath(NIX_MAN_DIR)) - , nixDaemonSocketFile(canonPath(getEnv("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH))) + , nixDaemonSocketFile(canonPath(getEnvNonEmpty("NIX_DAEMON_SOCKET_PATH").value_or(nixStateDir + DEFAULT_SOCKET_PATH))) { buildUsersGroup = getuid() == 0 ? "nixbld" : ""; - lockCPU = getEnv("NIX_AFFINITY_HACK") == "1"; allowSymlinkedStore = getEnv("NIX_IGNORE_SYMLINK_STORE") == "1"; - caFile = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or("")); - if (caFile == "") { - for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"}) - if (pathExists(fn)) { - caFile = fn; - break; - } - } + auto sslOverride = getEnv("NIX_SSL_CERT_FILE").value_or(getEnv("SSL_CERT_FILE").value_or("")); + if (sslOverride != "") + caFile = sslOverride; /* Backwards compatibility. */ auto s = getEnv("NIX_REMOTE_SYSTEMS"); @@ -166,17 +171,6 @@ StringSet Settings::getDefaultExtraPlatforms() return extraPlatforms; } -bool Settings::isExperimentalFeatureEnabled(const ExperimentalFeature & feature) -{ - auto & f = experimentalFeatures.get(); - return std::find(f.begin(), f.end(), feature) != f.end(); -} - -void Settings::requireExperimentalFeature(const ExperimentalFeature & feature) -{ - return; -} - bool Settings::isWSL1() { struct utsname utsbuf; @@ -186,6 +180,13 @@ bool Settings::isWSL1() return hasSuffix(utsbuf.release, "-Microsoft"); } +Path Settings::getDefaultSSLCertFile() +{ + for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"}) + if (pathExists(fn)) return fn; + return ""; +} + const std::string nixVersion = PACKAGE_VERSION; NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, { @@ -194,18 +195,18 @@ NLOHMANN_JSON_SERIALIZE_ENUM(SandboxMode, { {SandboxMode::smDisabled, false}, }); -template<> void BaseSetting::set(const std::string & str, bool append) +template<> SandboxMode BaseSetting::parse(const std::string & str) const { - if (str == "true") value = smEnabled; - else if (str == "relaxed") value = smRelaxed; - else if (str == "false") value = smDisabled; + if (str == "true") return smEnabled; + else if (str == "relaxed") return smRelaxed; + else if (str == "false") return smDisabled; else throw UsageError("option '%s' has invalid value '%s'", name, str); } -template<> bool BaseSetting::isAppendable() +template<> struct BaseSetting::trait { - return false; -} + static constexpr bool appendable = false; +}; template<> std::string BaseSetting::to_string() const { @@ -237,23 +238,23 @@ template<> void BaseSetting::convertToArg(Args & args, const std::s }); } -void MaxBuildJobsSetting::set(const std::string & str, bool append) +unsigned int MaxBuildJobsSetting::parse(const std::string & str) const { - if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency()); + if (str == "auto") return std::max(1U, std::thread::hardware_concurrency()); else { if (auto n = string2Int(str)) - value = *n; + return *n; else throw UsageError("configuration setting '%s' should be 'auto' or an integer", name); } } -void PluginFilesSetting::set(const std::string & str, bool append) +Paths PluginFilesSetting::parse(const std::string & str) const { if (pluginsLoaded) throw UsageError("plugin-files set after plugins were loaded, you may need to move the flag before the subcommand"); - BaseSetting::set(str, append); + return BaseSetting::parse(str); } @@ -290,6 +291,42 @@ void initPlugins() settings.pluginFiles.pluginsLoaded = true; } +static void preloadNSS() +{ + /* builtin:fetchurl can trigger a DNS lookup, which with glibc can trigger a dynamic library load of + one of the glibc NSS libraries in a sandboxed child, which will fail unless the library's already + been loaded in the parent. So we force a lookup of an invalid domain to force the NSS machinery to + load its lookup libraries in the parent before any child gets a chance to. */ + static std::once_flag dns_resolve_flag; + + std::call_once(dns_resolve_flag, []() { +#ifdef __GLIBC__ + /* On linux, glibc will run every lookup through the nss layer. + * That means every lookup goes, by default, through nscd, which acts as a local + * cache. + * Because we run builds in a sandbox, we also remove access to nscd otherwise + * lookups would leak into the sandbox. + * + * But now we have a new problem, we need to make sure the nss_dns backend that + * does the dns lookups when nscd is not available is loaded or available. + * + * We can't make it available without leaking nix's environment, so instead we'll + * load the backend, and configure nss so it does not try to run dns lookups + * through nscd. + * + * This is technically only used for builtins:fetch* functions so we only care + * about dns. + * + * All other platforms are unaffected. + */ + if (!dlopen(LIBNSS_DNS_SO, RTLD_NOW)) + warn("unable to load nss_dns backend"); + // FIXME: get hosts entry from nsswitch.conf. + __nss_configure_lookup("hosts", "files dns"); +#endif + }); +} + static bool initLibStoreDone = false; void assertLibStoreInitialized() { @@ -300,6 +337,24 @@ void assertLibStoreInitialized() { } void initLibStore() { + + initLibUtil(); + + if (sodium_init() == -1) + throw Error("could not initialise libsodium"); + + loadConfFile(); + + preloadNSS(); + + /* On macOS, don't use the per-session TMPDIR (as set e.g. by + sshd). This breaks build users because they don't have access + to the TMPDIR, in particular in ‘nix-store --serve’. */ +#if __APPLE__ + if (hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/")) + unsetenv("TMPDIR"); +#endif + initLibStoreDone = true; } diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 0a4912f67..609cf53b8 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -1,9 +1,9 @@ #pragma once +///@file #include "types.hh" #include "config.hh" #include "util.hh" -#include "experimental-features.hh" #include #include @@ -26,7 +26,7 @@ struct MaxBuildJobsSetting : public BaseSetting options->addSetting(this); } - void set(const std::string & str, bool append = false) override; + unsigned int parse(const std::string & str) const override; }; struct PluginFilesSetting : public BaseSetting @@ -43,7 +43,7 @@ struct PluginFilesSetting : public BaseSetting options->addSetting(this); } - void set(const std::string & str, bool append = false) override; + Paths parse(const std::string & str) const override; }; const uint32_t maxIdsPerBuild = @@ -64,40 +64,63 @@ class Settings : public Config { bool isWSL1(); + Path getDefaultSSLCertFile(); + public: Settings(); Path nixPrefix; - /* The directory where we store sources and derived files. */ + /** + * The directory where we store sources and derived files. + */ Path nixStore; Path nixDataDir; /* !!! fix */ - /* The directory where we log various operations. */ + /** + * The directory where we log various operations. + */ Path nixLogDir; - /* The directory where state is stored. */ + /** + * The directory where state is stored. + */ Path nixStateDir; - /* The directory where system configuration files are stored. */ + /** + * The directory where system configuration files are stored. + */ Path nixConfDir; - /* A list of user configuration files to load. */ + /** + * A list of user configuration files to load. + */ std::vector nixUserConfFiles; - /* The directory where the main programs are stored. */ + /** + * The directory where the main programs are stored. + */ Path nixBinDir; - /* The directory where the man pages are stored. */ + /** + * The directory where the man pages are stored. + */ Path nixManDir; - /* File name of the socket the daemon listens to. */ + /** + * File name of the socket the daemon listens to. + */ Path nixDaemonSocketFile; Setting storeUri{this, getEnv("NIX_REMOTE").value_or("auto"), "store", - "The default Nix store to use."}; + R"( + The [URL of the Nix store](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) + to use for most operations. + See [`nix help-stores`](@docroot@/command-ref/new-cli/nix3-help-stores.md) + for supported store types and settings. + )"}; Setting keepFailed{this, false, "keep-failed", "Whether to keep temporary directories of failed builds."}; @@ -114,7 +137,9 @@ public: )", {"build-fallback"}}; - /* Whether to show build log output in real time. */ + /** + * Whether to show build log output in real time. + */ bool verboseBuild = true; Setting logLines{this, 10, "log-lines", @@ -150,8 +175,10 @@ public: )", {"build-cores"}, false}; - /* Read-only mode. Don't copy stuff to the store, don't change - the database. */ + /** + * Read-only mode. Don't copy stuff to the store, don't change + * the database. + */ bool readOnlyMode = false; Setting thisSystem{ @@ -201,7 +228,16 @@ public: {"build-timeout"}}; PathSetting buildHook{this, true, "", "build-hook", - "The path of the helper program that executes builds to remote machines."}; + R"( + The path to the helper program that executes remote builds. + + Nix communicates with the build hook over `stdio` using a custom protocol to request builds that cannot be performed directly by the Nix daemon. + The default value is the internal Nix binary that implements remote building. + + > **Important** + > + > Change this setting only if you really know what you’re doing. + )"}; Setting builders{ this, "@" + nixConfDir + "/machines", "builders", @@ -292,16 +328,6 @@ public: users in `build-users-group`. UIDs are allocated starting at 872415232 (0x34000000) on Linux and 56930 on macOS. - - > **Warning** - > This is an experimental feature. - - To enable it, add the following to [`nix.conf`](#): - - ``` - extra-experimental-features = auto-allocate-uids - auto-allocate-uids = true - ``` )"}; Setting startId{this, @@ -331,16 +357,6 @@ public: Cgroups are required and enabled automatically for derivations that require the `uid-range` system feature. - - > **Warning** - > This is an experimental feature. - - To enable it, add the following to [`nix.conf`](#): - - ``` - extra-experimental-features = cgroups - use-cgroups = true - ``` )"}; #endif @@ -442,9 +458,6 @@ public: )", {"env-keep-derivations"}}; - /* Whether to lock the Nix client and worker to the same CPU. */ - bool lockCPU; - Setting sandboxMode{ this, #if __linux__ @@ -669,8 +682,9 @@ public: Strings{"https://cache.nixos.org/"}, "substituters", R"( - A list of URLs of substituters, separated by whitespace. Substituters - are tried based on their Priority value, which each substituter can set + A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) + to be used as substituters, separated by whitespace. + Substituters are tried based on their Priority value, which each substituter can set independently. Lower value means higher priority. The default is `https://cache.nixos.org`, with a Priority of 40. @@ -688,7 +702,8 @@ public: Setting trustedSubstituters{ this, {}, "trusted-substituters", R"( - A list of URLs of substituters, separated by whitespace. These are + A list of [URLs of Nix stores](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format), + separated by whitespace. These are not used by default, but can be enabled by users of the Nix daemon by specifying `--option substituters urls` on the command line. Unprivileged users are only allowed to pass a subset of the @@ -817,8 +832,22 @@ public: > `.netrc`. )"}; - /* Path to the SSL CA file used */ - Path caFile; + Setting caFile{ + this, getDefaultSSLCertFile(), "ssl-cert-file", + R"( + The path of a file containing CA certificates used to + authenticate `https://` downloads. Nix by default will use + the first of the following files that exists: + + 1. `/etc/ssl/certs/ca-certificates.crt` + 2. `/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt` + + The path can be overridden by the following environment + variables, in order of precedence: + + 1. `NIX_SSL_CERT_FILE` + 2. `SSL_CERT_FILE` + )"}; #if __linux__ Setting filterSyscalls{ @@ -923,13 +952,6 @@ public: are loaded as plugins (non-recursively). )"}; - Setting> experimentalFeatures{this, {}, "experimental-features", - "Experimental Nix features to enable."}; - - bool isExperimentalFeatureEnabled(const ExperimentalFeature &); - - void requireExperimentalFeature(const ExperimentalFeature &); - Setting narBufferSize{this, 32 * 1024 * 1024, "nar-buffer-size", "Maximum size of NARs before spilling them to disk."}; @@ -972,8 +994,10 @@ public: // FIXME: don't use a global variable. extern Settings settings; -/* This should be called after settings are initialized, but before - anything else */ +/** + * This should be called after settings are initialized, but before + * anything else + */ void initPlugins(); void loadConfFile(); @@ -983,12 +1007,16 @@ std::vector getUserConfigFiles(); extern const std::string nixVersion; -/* NB: This is not sufficient. You need to call initNix() */ +/** + * NB: This is not sufficient. You need to call initNix() + */ void initLibStore(); -/* It's important to initialize before doing _anything_, which is why we - call upon the programmer to handle this correctly. However, we only add - this in a key locations, so as not to litter the code. */ +/** + * It's important to initialize before doing _anything_, which is why we + * call upon the programmer to handle this correctly. However, we only add + * this in a key locations, so as not to litter the code. + */ void assertLibStoreInitialized(); } diff --git a/src/libstore/http-binary-cache-store.cc b/src/libstore/http-binary-cache-store.cc index 1479822a9..85c5eed4c 100644 --- a/src/libstore/http-binary-cache-store.cc +++ b/src/libstore/http-binary-cache-store.cc @@ -12,7 +12,14 @@ struct HttpBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig { using BinaryCacheStoreConfig::BinaryCacheStoreConfig; - const std::string name() override { return "Http Binary Cache Store"; } + const std::string name() override { return "HTTP Binary Cache Store"; } + + std::string doc() override + { + return + #include "http-binary-cache-store.md" + ; + } }; class HttpBinaryCacheStore : public virtual HttpBinaryCacheStoreConfig, public virtual BinaryCacheStore @@ -187,6 +194,18 @@ protected: }}); } + /** + * This isn't actually necessary read only. We support "upsert" now, so we + * have a notion of authentication via HTTP POST/PUT. + * + * For now, we conservatively say we don't know. + * + * \todo try to expose our HTTP authentication status. + */ + std::optional isTrustedClient() override + { + return std::nullopt; + } }; static RegisterStoreImplementation regHttpBinaryCacheStore; diff --git a/src/libstore/http-binary-cache-store.md b/src/libstore/http-binary-cache-store.md new file mode 100644 index 000000000..20c26d0c2 --- /dev/null +++ b/src/libstore/http-binary-cache-store.md @@ -0,0 +1,8 @@ +R"( + +**Store URL format**: `http://...`, `https://...` + +This store allows a binary cache to be accessed via the HTTP +protocol. + +)" diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc index 2c9dd2680..2012584e0 100644 --- a/src/libstore/legacy-ssh-store.cc +++ b/src/libstore/legacy-ssh-store.cc @@ -1,3 +1,4 @@ +#include "ssh-store-config.hh" #include "archive.hh" #include "pool.hh" #include "remote-store.hh" @@ -12,17 +13,24 @@ namespace nix { -struct LegacySSHStoreConfig : virtual StoreConfig +struct LegacySSHStoreConfig : virtual CommonSSHStoreConfig { - using StoreConfig::StoreConfig; - const Setting maxConnections{(StoreConfig*) this, 1, "max-connections", "maximum number of concurrent SSH connections"}; - const Setting sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"}; - const Setting sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"}; - const Setting compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"}; - const Setting remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"}; - const Setting remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"}; + using CommonSSHStoreConfig::CommonSSHStoreConfig; - const std::string name() override { return "Legacy SSH Store"; } + const Setting remoteProgram{(StoreConfig*) this, "nix-store", "remote-program", + "Path to the `nix-store` executable on the remote machine."}; + + const Setting maxConnections{(StoreConfig*) this, 1, "max-connections", + "Maximum number of concurrent SSH connections."}; + + const std::string name() override { return "SSH Store"; } + + std::string doc() override + { + return + #include "legacy-ssh-store.md" + ; + } }; struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Store @@ -51,6 +59,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor LegacySSHStore(const std::string & scheme, const std::string & host, const Params & params) : StoreConfig(params) + , CommonSSHStoreConfig(params) , LegacySSHStoreConfig(params) , Store(params) , host(host) @@ -147,7 +156,7 @@ struct LegacySSHStore : public virtual LegacySSHStoreConfig, public virtual Stor throw Error("NAR hash is now mandatory"); info->narHash = Hash::parseAnyPrefixed(s); } - info->ca = parseContentAddressOpt(readString(conn->from)); + info->ca = ContentAddress::parseOpt(readString(conn->from)); info->sigs = readStrings(conn->from); auto s = readString(conn->from); @@ -278,19 +287,18 @@ public: conn->to.flush(); - BuildResult status { - .path = DerivedPath::Built { - .drvPath = drvPath, - .outputs = OutputsSpec::All { }, - }, - }; + BuildResult status; status.status = (BuildResult::Status) readInt(conn->from); conn->from >> status.errorMsg; if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3) conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime; if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 6) { - status.builtOutputs = worker_proto::read(*this, conn->from, Phantom {}); + auto builtOutputs = worker_proto::read(*this, conn->from, Phantom {}); + for (auto && [output, realisation] : builtOutputs) + status.builtOutputs.insert_or_assign( + std::move(output.outputName), + std::move(realisation)); } return status; } @@ -321,7 +329,7 @@ public: conn->to.flush(); - BuildResult result { .path = DerivedPath::Opaque { StorePath::dummy } }; + BuildResult result; result.status = (BuildResult::Status) readInt(conn->from); if (!result.success()) { @@ -333,6 +341,9 @@ public: void ensurePath(const StorePath & path) override { unsupported("ensurePath"); } + virtual ref getFSAccessor() override + { unsupported("getFSAccessor"); } + void computeFSClosure(const StorePathSet & paths, StorePathSet & out, bool flipDirection = false, bool includeOutputs = false, bool includeDerivers = false) override @@ -380,6 +391,15 @@ public: return conn->remoteVersion; } + /** + * The legacy ssh protocol doesn't support checking for trusted-user. + * Try using ssh-ng:// instead if you want to know. + */ + std::optional isTrustedClient() override + { + return std::nullopt; + } + void queryRealisationUncached(const DrvOutput &, Callback> callback) noexcept override // TODO: Implement diff --git a/src/libstore/legacy-ssh-store.md b/src/libstore/legacy-ssh-store.md new file mode 100644 index 000000000..043acebd6 --- /dev/null +++ b/src/libstore/legacy-ssh-store.md @@ -0,0 +1,8 @@ +R"( + +**Store URL format**: `ssh://[username@]hostname` + +This store type allows limited access to a remote store on another +machine via SSH. + +)" diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc index f20b1fa02..5481dd762 100644 --- a/src/libstore/local-binary-cache-store.cc +++ b/src/libstore/local-binary-cache-store.cc @@ -11,6 +11,13 @@ struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig using BinaryCacheStoreConfig::BinaryCacheStoreConfig; const std::string name() override { return "Local Binary Cache Store"; } + + std::string doc() override + { + return + #include "local-binary-cache-store.md" + ; + } }; class LocalBinaryCacheStore : public virtual LocalBinaryCacheStoreConfig, public virtual BinaryCacheStore @@ -88,6 +95,10 @@ protected: return paths; } + std::optional isTrustedClient() override + { + return Trusted; + } }; void LocalBinaryCacheStore::init() diff --git a/src/libstore/local-binary-cache-store.md b/src/libstore/local-binary-cache-store.md new file mode 100644 index 000000000..93fddc840 --- /dev/null +++ b/src/libstore/local-binary-cache-store.md @@ -0,0 +1,16 @@ +R"( + +**Store URL format**: `file://`*path* + +This store allows reading and writing a binary cache stored in *path* +in the local filesystem. If *path* does not exist, it will be created. + +For example, the following builds or downloads `nixpkgs#hello` into +the local store and then copies it to the binary cache in +`/tmp/binary-cache`: + +``` +# nix copy --to file:///tmp/binary-cache nixpkgs#hello +``` + +)" diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh index 947707341..a03bb88f5 100644 --- a/src/libstore/local-fs-store.hh +++ b/src/libstore/local-fs-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" #include "gc-store.hh" @@ -9,20 +10,28 @@ namespace nix { struct LocalFSStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; + // FIXME: the (StoreConfig*) cast works around a bug in gcc that causes // it to omit the call to the Setting constructor. Clang works fine // either way. + const PathSetting rootDir{(StoreConfig*) this, true, "", - "root", "directory prefixed to all other paths"}; + "root", + "Directory prefixed to all other paths."}; + const PathSetting stateDir{(StoreConfig*) this, false, rootDir != "" ? rootDir + "/nix/var/nix" : settings.nixStateDir, - "state", "directory where Nix will store state"}; + "state", + "Directory where Nix will store state."}; + const PathSetting logDir{(StoreConfig*) this, false, rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir, - "log", "directory where Nix will store state"}; + "log", + "directory where Nix will store log files."}; + const PathSetting realStoreDir{(StoreConfig*) this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real", - "physical path to the Nix store"}; + "Physical path of the Nix store."}; }; class LocalFSStore : public virtual LocalFSStoreConfig, @@ -39,7 +48,9 @@ public: void narFromPath(const StorePath & path, Sink & sink) override; ref getFSAccessor() override; - /* Register a permanent GC root. */ + /** + * Register a permanent GC root. + */ Path addPermRoot(const StorePath & storePath, const Path & gcRoot); virtual Path getRealStoreDir() { return realStoreDir; } diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index 82edaa9bf..7fb312c37 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -44,6 +44,13 @@ namespace nix { +std::string LocalStoreConfig::doc() +{ + return + #include "local-store.md" + ; +} + struct LocalStore::State::Stmts { /* Some precompiled SQLite statements. */ SQLiteStmt RegisterValidPath; @@ -280,7 +287,7 @@ LocalStore::LocalStore(const Params & params) else if (curSchema == 0) { /* new store */ curSchema = nixSchemaVersion; openDB(*state, true); - writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true); + writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); } else if (curSchema < nixSchemaVersion) { @@ -329,14 +336,14 @@ LocalStore::LocalStore(const Params & params) txn.commit(); } - writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str(), 0666, true); + writeFile(schemaPath, fmt("%1%", nixSchemaVersion), 0666, true); lockFile(globalLock.get(), ltRead, true); } else openDB(*state, false); - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { migrateCASchema(state->db, dbDir + "/ca-schema", globalLock); } @@ -366,7 +373,7 @@ LocalStore::LocalStore(const Params & params) state->stmts->QueryPathFromHashPart.create(state->db, "select path from ValidPaths where path >= ? limit 1;"); state->stmts->QueryValidPaths.create(state->db, "select path from ValidPaths"); - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { state->stmts->RegisterRealisedOutput.create(state->db, R"( insert into Realisations (drvPath, outputName, outputPath, signatures) @@ -413,6 +420,13 @@ LocalStore::LocalStore(const Params & params) } +LocalStore::LocalStore(std::string scheme, std::string path, const Params & params) + : LocalStore(params) +{ + throw UnimplementedError("LocalStore"); +} + + AutoCloseFD LocalStore::openGCLock() { Path fnGCLock = stateDir + "/gc.lock"; @@ -697,64 +711,9 @@ void canonicalisePathMetaData(const Path & path, } -void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv) -{ - assert(drvPath.isDerivation()); - std::string drvName(drvPath.name()); - drvName = drvName.substr(0, drvName.size() - drvExtension.size()); - - auto envHasRightPath = [&](const StorePath & actual, const std::string & varName) - { - auto j = drv.env.find(varName); - if (j == drv.env.end() || parseStorePath(j->second) != actual) - throw Error("derivation '%s' has incorrect environment variable '%s', should be '%s'", - printStorePath(drvPath), varName, printStorePath(actual)); - }; - - - // Don't need the answer, but do this anyways to assert is proper - // combination. The code below is more general and naturally allows - // combinations that are currently prohibited. - drv.type(); - - std::optional hashesModulo; - for (auto & i : drv.outputs) { - std::visit(overloaded { - [&](const DerivationOutput::InputAddressed & doia) { - if (!hashesModulo) { - // somewhat expensive so we do lazily - hashesModulo = hashDerivationModulo(*this, drv, true); - } - auto currentOutputHash = get(hashesModulo->hashes, i.first); - if (!currentOutputHash) - throw Error("derivation '%s' has unexpected output '%s' (local-store / hashesModulo) named '%s'", - printStorePath(drvPath), printStorePath(doia.path), i.first); - StorePath recomputed = makeOutputPath(i.first, *currentOutputHash, drvName); - if (doia.path != recomputed) - throw Error("derivation '%s' has incorrect output '%s', should be '%s'", - printStorePath(drvPath), printStorePath(doia.path), printStorePath(recomputed)); - envHasRightPath(doia.path, i.first); - }, - [&](const DerivationOutput::CAFixed & dof) { - StorePath path = makeFixedOutputPath(dof.hash.method, dof.hash.hash, drvName); - envHasRightPath(path, i.first); - }, - [&](const DerivationOutput::CAFloating &) { - /* Nothing to check */ - }, - [&](const DerivationOutput::Deferred &) { - /* Nothing to check */ - }, - [&](const DerivationOutput::Impure &) { - /* Nothing to check */ - }, - }, i.second.raw()); - } -} - void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) { - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info)) registerDrvOutput(info); else @@ -763,7 +722,7 @@ void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag check void LocalStore::registerDrvOutput(const Realisation & info) { - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); retrySQLite([&]() { auto state(_state.lock()); if (auto oldR = queryRealisation_(*state, info.id)) { @@ -862,7 +821,7 @@ uint64_t LocalStore::addValidPath(State & state, derivations). Note that if this throws an error, then the DB transaction is rolled back, so the path validity registration above is undone. */ - if (checkOutputs) checkDerivationOutputs(info.path, drv); + if (checkOutputs) drv.checkInvariants(*this, info.path); for (auto & i : drv.outputsAndOptPaths(*this)) { /* Floating CA derivations have indeterminate output paths until @@ -930,7 +889,7 @@ std::shared_ptr LocalStore::queryPathInfoInternal(State & s if (s) info->sigs = tokenizeString(s, " "); s = (const char *) sqlite3_column_text(state.stmts->QueryPathInfo, 7); - if (s) info->ca = parseContentAddressOpt(s); + if (s) info->ca = ContentAddress::parseOpt(s); /* Get the references. */ auto useQueryReferences(state.stmts->QueryReferences.use()(info->id)); @@ -1052,7 +1011,7 @@ LocalStore::queryPartialDerivationOutputMap(const StorePath & path_) return outputs; }); - if (!settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) + if (!experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) return outputs; auto drv = readInvalidDerivation(path); @@ -1120,54 +1079,6 @@ StorePathSet LocalStore::querySubstitutablePaths(const StorePathSet & paths) } -// FIXME: move this, it's not specific to LocalStore. -void LocalStore::querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos) -{ - if (!settings.useSubstitutes) return; - for (auto & sub : getDefaultSubstituters()) { - for (auto & path : paths) { - if (infos.count(path.first)) - // Choose first succeeding substituter. - continue; - - auto subPath(path.first); - - // Recompute store path so that we can use a different store root. - if (path.second) { - subPath = makeFixedOutputPathFromCA(path.first.name(), *path.second); - if (sub->storeDir == storeDir) - assert(subPath == path.first); - if (subPath != path.first) - debug("replaced path '%s' with '%s' for substituter '%s'", printStorePath(path.first), sub->printStorePath(subPath), sub->getUri()); - } else if (sub->storeDir != storeDir) continue; - - debug("checking substituter '%s' for path '%s'", sub->getUri(), sub->printStorePath(subPath)); - try { - auto info = sub->queryPathInfo(subPath); - - if (sub->storeDir != storeDir && !(info->isContentAddressed(*sub) && info->references.empty())) - continue; - - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - infos.insert_or_assign(path.first, SubstitutablePathInfo{ - info->deriver, - info->references, - narInfo ? narInfo->fileSize : 0, - info->narSize}); - } catch (InvalidPath &) { - } catch (SubstituterDisabled &) { - } catch (Error & e) { - if (settings.tryFallback) - logError(e.info()); - else - throw; - } - } - } -} - - void LocalStore::registerValidPath(const ValidPathInfo & info) { registerValidPaths({{info.path, info}}); @@ -1209,8 +1120,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos & infos) for (auto & [_, i] : infos) if (i.path.isDerivation()) { // FIXME: inefficient; we already loaded the derivation in addValidPath(). - checkDerivationOutputs(i.path, - readInvalidDerivation(i.path)); + readInvalidDerivation(i.path).checkInvariants(*this, i.path); } /* Do a topological sort of the paths. This will throw an @@ -1312,7 +1222,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, printStorePath(info.path), info.narSize, hashResult.second); if (info.ca) { - if (auto foHash = std::get_if(&*info.ca)) { + if (auto foHash = std::get_if(&info.ca->raw)) { auto actualFoHash = hashCAPath( foHash->method, foHash->hash.type, @@ -1325,7 +1235,7 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source, actualFoHash.hash.to_string(Base32, true)); } } - if (auto textHash = std::get_if(&*info.ca)) { + if (auto textHash = std::get_if(&info.ca->raw)) { auto actualTextHash = hashString(htSHA256, readFile(realPath)); if (textHash->hash != actualTextHash) { throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s", @@ -1411,7 +1321,19 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name auto [hash, size] = hashSink->finish(); - auto dstPath = makeFixedOutputPath(method, hash, name, references); + ContentAddressWithReferences desc = FixedOutputInfo { + .hash = { + .method = method, + .hash = hash, + }, + .references = { + .others = references, + // caller is not capable of creating a self-reference, because this is content-addressed without modulus + .self = false, + }, + }; + + auto dstPath = makeFixedOutputPathFromCA(name, desc); addTempRoot(dstPath); @@ -1431,7 +1353,7 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name autoGC(); if (inMemory) { - StringSource dumpSource { dump }; + StringSource dumpSource { dump }; /* Restore from the NAR in memory. */ if (method == FileIngestionMethod::Recursive) restorePath(realPath, dumpSource); @@ -1455,10 +1377,13 @@ StorePath LocalStore::addToStoreFromDump(Source & source0, std::string_view name optimisePath(realPath, repair); - ValidPathInfo info { dstPath, narHash.first }; + ValidPathInfo info { + *this, + name, + std::move(desc), + narHash.first + }; info.narSize = narHash.second; - info.references = references; - info.ca = FixedOutputHash { .method = method, .hash = hash }; registerValidPath(info); } @@ -1475,7 +1400,10 @@ StorePath LocalStore::addTextToStore( const StorePathSet & references, RepairFlag repair) { auto hash = hashString(htSHA256, s); - auto dstPath = makeTextPath(name, hash, references); + auto dstPath = makeTextPath(name, TextInfo { + { .hash = hash }, + references, + }); addTempRoot(dstPath); @@ -1560,7 +1488,7 @@ void LocalStore::invalidatePathChecked(const StorePath & path) bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { - printInfo(format("reading the Nix store...")); + printInfo("reading the Nix store..."); bool errors = false; @@ -1719,6 +1647,11 @@ unsigned int LocalStore::getProtocol() return PROTOCOL_VERSION; } +std::optional LocalStore::isTrustedClient() +{ + return Trusted; +} + #if defined(FS_IOC_SETFLAGS) && defined(FS_IOC_GETFLAGS) && defined(FS_IMMUTABLE_FL) @@ -1950,5 +1883,6 @@ std::optional LocalStore::getVersion() return nixVersion; } +static RegisterStoreImplementation regLocalStore; } // namespace nix diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index a84eb7c26..55add18dd 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "sqlite.hh" @@ -18,10 +19,14 @@ namespace nix { -/* Nix store and database schema version. Version 1 (or 0) was Nix <= - 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. - Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is - Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */ +/** + * Nix store and database schema version. + * + * Version 1 (or 0) was Nix <= + * 0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10. + * Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is + * Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. + */ const int nixSchemaVersion = 10; @@ -38,40 +43,52 @@ struct LocalStoreConfig : virtual LocalFSStoreConfig Setting requireSigs{(StoreConfig*) this, settings.requireSigs, - "require-sigs", "whether store paths should have a trusted signature on import"}; + "require-sigs", + "Whether store paths copied into this store should have a trusted signature."}; const std::string name() override { return "Local Store"; } -}; + std::string doc() override; +}; class LocalStore : public virtual LocalStoreConfig, public virtual LocalFSStore, public virtual GcStore { private: - /* Lock file used for upgrading. */ + /** + * Lock file used for upgrading. + */ AutoCloseFD globalLock; struct State { - /* The SQLite database object. */ + /** + * The SQLite database object. + */ SQLite db; struct Stmts; std::unique_ptr stmts; - /* The last time we checked whether to do an auto-GC, or an - auto-GC finished. */ + /** + * The last time we checked whether to do an auto-GC, or an + * auto-GC finished. + */ std::chrono::time_point lastGCCheck; - /* Whether auto-GC is running. If so, get gcFuture to wait for - the GC to finish. */ + /** + * Whether auto-GC is running. If so, get gcFuture to wait for + * the GC to finish. + */ bool gcRunning = false; std::shared_future gcFuture; - /* How much disk space was available after the previous - auto-GC. If the current available disk space is below - minFree but not much below availAfterGC, then there is no - point in starting a new GC. */ + /** + * How much disk space was available after the previous + * auto-GC. If the current available disk space is below + * minFree but not much below availAfterGC, then there is no + * point in starting a new GC. + */ uint64_t availAfterGC = std::numeric_limits::max(); std::unique_ptr publicKeys; @@ -94,16 +111,26 @@ private: public: - // Hack for build-remote.cc. + /** + * Hack for build-remote.cc. + */ PathSet locksHeld; - /* Initialise the local store, upgrading the schema if - necessary. */ + /** + * Initialise the local store, upgrading the schema if + * necessary. + */ LocalStore(const Params & params); + LocalStore(std::string scheme, std::string path, const Params & params); ~LocalStore(); - /* Implementations of abstract store API methods. */ + static std::set uriSchemes() + { return {}; } + + /** + * Implementations of abstract store API methods. + */ std::string getUri() override; @@ -127,9 +154,6 @@ public: StorePathSet querySubstitutablePaths(const StorePathSet & paths) override; - void querySubstitutablePathInfos(const StorePathCAMap & paths, - SubstitutablePathInfos & infos) override; - bool pathInfoIsUntrusted(const ValidPathInfo &) override; bool realisationIsUntrusted(const Realisation & ) override; @@ -151,13 +175,19 @@ private: void createTempRootsFile(); - /* The file to which we write our temporary roots. */ + /** + * The file to which we write our temporary roots. + */ Sync _fdTempRoots; - /* The global GC lock. */ + /** + * The global GC lock. + */ Sync _fdGCLock; - /* Connection to the garbage collector. */ + /** + * Connection to the garbage collector. + */ Sync _fdRootsSocket; public: @@ -176,42 +206,54 @@ public: void collectGarbage(const GCOptions & options, GCResults & results) override; - /* Optimise the disk space usage of the Nix store by hard-linking - files with the same contents. */ + /** + * Optimise the disk space usage of the Nix store by hard-linking + * files with the same contents. + */ void optimiseStore(OptimiseStats & stats); void optimiseStore() override; - /* Optimise a single store path. Optionally, test the encountered - symlinks for corruption. */ + /** + * Optimise a single store path. Optionally, test the encountered + * symlinks for corruption. + */ void optimisePath(const Path & path, RepairFlag repair); bool verifyStore(bool checkContents, RepairFlag repair) override; - /* Register the validity of a path, i.e., that `path' exists, that - the paths referenced by it exists, and in the case of an output - path of a derivation, that it has been produced by a successful - execution of the derivation (or something equivalent). Also - register the hash of the file system contents of the path. The - hash must be a SHA-256 hash. */ + /** + * Register the validity of a path, i.e., that `path` exists, that + * the paths referenced by it exists, and in the case of an output + * path of a derivation, that it has been produced by a successful + * execution of the derivation (or something equivalent). Also + * register the hash of the file system contents of the path. The + * hash must be a SHA-256 hash. + */ void registerValidPath(const ValidPathInfo & info); void registerValidPaths(const ValidPathInfos & infos); unsigned int getProtocol() override; + std::optional isTrustedClient() override; + void vacuumDB(); void repairPath(const StorePath & path) override; void addSignatures(const StorePath & storePath, const StringSet & sigs) override; - /* If free disk space in /nix/store if below minFree, delete - garbage until it exceeds maxFree. */ + /** + * If free disk space in /nix/store if below minFree, delete + * garbage until it exceeds maxFree. + */ void autoGC(bool sync = true); - /* Register the store path 'output' as the output named 'outputName' of - derivation 'deriver'. */ + /** + * Register the store path 'output' as the output named 'outputName' of + * derivation 'deriver'. + */ void registerDrvOutput(const Realisation & info) override; void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override; void cacheDrvOutputMapping( @@ -241,7 +283,9 @@ private: void invalidatePath(State & state, const StorePath & path); - /* Delete a path from the Nix store. */ + /** + * Delete a path from the Nix store. + */ void invalidatePathChecked(const StorePath & path); void verifyPath(const Path & path, const StringSet & store, @@ -264,8 +308,6 @@ private: std::pair createTempDirInStore(); - void checkDerivationOutputs(const StorePath & drvPath, const Derivation & drv); - typedef std::unordered_set InodeHash; InodeHash loadInodeHash(); @@ -276,8 +318,10 @@ private: bool isValidPath_(State & state, const StorePath & path); void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers); - /* Add signatures to a ValidPathInfo or Realisation using the secret keys - specified by the ‘secret-key-files’ option. */ + /** + * Add signatures to a ValidPathInfo or Realisation using the secret keys + * specified by the ‘secret-key-files’ option. + */ void signPathInfo(ValidPathInfo & info); void signRealisation(Realisation &); @@ -307,18 +351,23 @@ typedef std::pair Inode; typedef std::set InodesSeen; -/* "Fix", or canonicalise, the meta-data of the files in a store path - after it has been built. In particular: - - the last modification date on each file is set to 1 (i.e., - 00:00:01 1/1/1970 UTC) - - the permissions are set of 444 or 555 (i.e., read-only with or - without execute permission; setuid bits etc. are cleared) - - the owner and group are set to the Nix user and group, if we're - running as root. - If uidRange is not empty, this function will throw an error if it - encounters files owned by a user outside of the closed interval - [uidRange->first, uidRange->second]. -*/ +/** + * "Fix", or canonicalise, the meta-data of the files in a store path + * after it has been built. In particular: + * + * - the last modification date on each file is set to 1 (i.e., + * 00:00:01 1/1/1970 UTC) + * + * - the permissions are set of 444 or 555 (i.e., read-only with or + * without execute permission; setuid bits etc. are cleared) + * + * - the owner and group are set to the Nix user and group, if we're + * running as root. + * + * If uidRange is not empty, this function will throw an error if it + * encounters files owned by a user outside of the closed interval + * [uidRange->first, uidRange->second]. + */ void canonicalisePathMetaData( const Path & path, std::optional> uidRange, diff --git a/src/libstore/local-store.md b/src/libstore/local-store.md new file mode 100644 index 000000000..8174df839 --- /dev/null +++ b/src/libstore/local-store.md @@ -0,0 +1,39 @@ +R"( + +**Store URL format**: `local`, *root* + +This store type accesses a Nix store in the local filesystem directly +(i.e. not via the Nix daemon). *root* is an absolute path that is +prefixed to other directories such as the Nix store directory. The +store pseudo-URL `local` denotes a store that uses `/` as its root +directory. + +A store that uses a *root* other than `/` is called a *chroot +store*. With such stores, the store directory is "logically" still +`/nix/store`, so programs stored in them can only be built and +executed by `chroot`-ing into *root*. Chroot stores only support +building and running on Linux when [`mount namespaces`](https://man7.org/linux/man-pages/man7/mount_namespaces.7.html) and [`user namespaces`](https://man7.org/linux/man-pages/man7/user_namespaces.7.html) are +enabled. + +For example, the following uses `/tmp/root` as the chroot environment +to build or download `nixpkgs#hello` and then execute it: + +```console +# nix run --store /tmp/root nixpkgs#hello +Hello, world! +``` + +Here, the "physical" store location is `/tmp/root/nix/store`, and +Nix's store metadata is in `/tmp/root/nix/var/nix/db`. + +It is also possible, but not recommended, to change the "logical" +location of the Nix store from its default of `/nix/store`. This makes +it impossible to use default substituters such as +`https://cache.nixos.org/`, and thus you may have to build everything +locally. Here is an example: + +```console +# nix build --store 'local?store=/tmp/my-nix/store&state=/tmp/my-nix/state&log=/tmp/my-nix/log' nixpkgs#hello +``` + +)" diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc index 4fe1fcf56..7202a64b3 100644 --- a/src/libstore/lock.cc +++ b/src/libstore/lock.cc @@ -129,7 +129,7 @@ struct AutoUserLock : UserLock useUserNamespace = false; #endif - settings.requireExperimentalFeature(Xp::AutoAllocateUids); + experimentalFeatureSettings.require(Xp::AutoAllocateUids); assert(settings.startId > 0); assert(settings.uidCount % maxIdsPerBuild == 0); assert((uint64_t) settings.startId + (uint64_t) settings.uidCount <= std::numeric_limits::max()); diff --git a/src/libstore/lock.hh b/src/libstore/lock.hh index 7f1934510..1c268e1fb 100644 --- a/src/libstore/lock.hh +++ b/src/libstore/lock.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" @@ -12,14 +13,18 @@ struct UserLock { virtual ~UserLock() { } - /* Get the first and last UID. */ + /** + * Get the first and last UID. + */ std::pair getUIDRange() { auto first = getUID(); return {first, first + getUIDCount() - 1}; } - /* Get the first UID. */ + /** + * Get the first UID. + */ virtual uid_t getUID() = 0; virtual uid_t getUIDCount() = 0; @@ -29,8 +34,10 @@ struct UserLock virtual std::vector getSupplementaryGIDs() = 0; }; -/* Acquire a user lock for a UID range of size `nrIds`. Note that this - may return nullptr if no user is available. */ +/** + * Acquire a user lock for a UID range of size `nrIds`. Note that this + * may return nullptr if no user is available. + */ std::unique_ptr acquireUserLock(uid_t nrIds, bool useUserNamespace); bool useBuildUsers(); diff --git a/src/libstore/log-store.hh b/src/libstore/log-store.hh index e4d95bab6..a84f7dbeb 100644 --- a/src/libstore/log-store.hh +++ b/src/libstore/log-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" @@ -9,8 +10,10 @@ struct LogStore : public virtual Store { inline static std::string operationName = "Build log storage and retrieval"; - /* Return the build log of the specified store path, if available, - or null otherwise. */ + /** + * Return the build log of the specified store path, if available, + * or null otherwise. + */ std::optional getBuildLog(const StorePath & path); virtual std::optional getBuildLogExact(const StorePath & path) = 0; diff --git a/src/libstore/machines.hh b/src/libstore/machines.hh index 834626de9..1adeaf1f0 100644 --- a/src/libstore/machines.hh +++ b/src/libstore/machines.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" diff --git a/src/libstore/make-content-addressed.cc b/src/libstore/make-content-addressed.cc index 64d172918..53fe04704 100644 --- a/src/libstore/make-content-addressed.cc +++ b/src/libstore/make-content-addressed.cc @@ -27,18 +27,17 @@ std::map makeContentAddressed( StringMap rewrites; - StorePathSet references; - bool hasSelfReference = false; + StoreReferences refs; for (auto & ref : oldInfo->references) { if (ref == path) - hasSelfReference = true; + refs.self = true; else { auto i = remappings.find(ref); auto replacement = i != remappings.end() ? i->second : ref; // FIXME: warn about unremapped paths? if (replacement != ref) rewrites.insert_or_assign(srcStore.printStorePath(ref), srcStore.printStorePath(replacement)); - references.insert(std::move(replacement)); + refs.others.insert(std::move(replacement)); } } @@ -49,24 +48,28 @@ std::map makeContentAddressed( auto narModuloHash = hashModuloSink.finish().first; - auto dstPath = dstStore.makeFixedOutputPath( - FileIngestionMethod::Recursive, narModuloHash, path.name(), references, hasSelfReference); + ValidPathInfo info { + dstStore, + path.name(), + FixedOutputInfo { + .hash = { + .method = FileIngestionMethod::Recursive, + .hash = narModuloHash, + }, + .references = std::move(refs), + }, + Hash::dummy, + }; - printInfo("rewriting '%s' to '%s'", pathS, srcStore.printStorePath(dstPath)); + printInfo("rewriting '%s' to '%s'", pathS, dstStore.printStorePath(info.path)); StringSink sink2; - RewritingSink rsink2(oldHashPart, std::string(dstPath.hashPart()), sink2); + RewritingSink rsink2(oldHashPart, std::string(info.path.hashPart()), sink2); rsink2(sink.s); rsink2.flush(); - ValidPathInfo info { dstPath, hashString(htSHA256, sink2.s) }; - info.references = std::move(references); - if (hasSelfReference) info.references.insert(info.path); + info.narHash = hashString(htSHA256, sink2.s); info.narSize = sink.s.size(); - info.ca = FixedOutputHash { - .method = FileIngestionMethod::Recursive, - .hash = narModuloHash, - }; StringSource source(sink2.s); dstStore.addToStore(info, source); diff --git a/src/libstore/make-content-addressed.hh b/src/libstore/make-content-addressed.hh index c4a66ed41..2ce6ec7bc 100644 --- a/src/libstore/make-content-addressed.hh +++ b/src/libstore/make-content-addressed.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc index b28768459..89148d415 100644 --- a/src/libstore/misc.cc +++ b/src/libstore/misc.cc @@ -326,7 +326,7 @@ OutputPathMap resolveDerivedPath(Store & store, const DerivedPath::Built & bfd, throw Error( "the derivation '%s' doesn't have an output named '%s'", store.printStorePath(bfd.drvPath), output); - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { DrvOutput outputId { *outputHash, output }; auto realisation = store.queryRealisation(outputId); if (!realisation) diff --git a/src/libstore/names.hh b/src/libstore/names.hh index 3977fc6cc..d82b99bb4 100644 --- a/src/libstore/names.hh +++ b/src/libstore/names.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 9a0003588..f0dfcb19b 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -275,6 +275,7 @@ json listNar(ref accessor, const Path & path, bool recurse) obj["type"] = "symlink"; obj["target"] = accessor->readLink(path); break; + case FSAccessor::Type::tMissing: default: throw Error("path '%s' does not exist in NAR", path); } diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh index 7d998ae0b..5e19bd3c7 100644 --- a/src/libstore/nar-accessor.hh +++ b/src/libstore/nar-accessor.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include @@ -9,24 +10,30 @@ namespace nix { struct Source; -/* Return an object that provides access to the contents of a NAR - file. */ +/** + * Return an object that provides access to the contents of a NAR + * file. + */ ref makeNarAccessor(std::string && nar); ref makeNarAccessor(Source & source); -/* Create a NAR accessor from a NAR listing (in the format produced by - listNar()). The callback getNarBytes(offset, length) is used by the - readFile() method of the accessor to get the contents of files - inside the NAR. */ +/** + * Create a NAR accessor from a NAR listing (in the format produced by + * listNar()). The callback getNarBytes(offset, length) is used by the + * readFile() method of the accessor to get the contents of files + * inside the NAR. + */ typedef std::function GetNarBytes; ref makeLazyNarAccessor( const std::string & listing, GetNarBytes getNarBytes); -/* Write a JSON representation of the contents of a NAR (except file - contents). */ +/** + * Write a JSON representation of the contents of a NAR (except file + * contents). + */ nlohmann::json listNar(ref accessor, const Path & path, bool recurse); } diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index 2645f468b..c7176d30f 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -273,7 +273,7 @@ public: narInfo->deriver = StorePath(queryNAR.getStr(9)); for (auto & sig : tokenizeString(queryNAR.getStr(10), " ")) narInfo->sigs.insert(sig); - narInfo->ca = parseContentAddressOpt(queryNAR.getStr(11)); + narInfo->ca = ContentAddress::parseOpt(queryNAR.getStr(11)); return {oValid, narInfo}; }); diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh index 4877f56d8..bbd1d05d5 100644 --- a/src/libstore/nar-info-disk-cache.hh +++ b/src/libstore/nar-info-disk-cache.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "ref.hh" #include "nar-info.hh" @@ -42,8 +43,10 @@ public: const std::string & uri, const DrvOutput & id) = 0; }; -/* Return a singleton cache object that can be used concurrently by - multiple threads. */ +/** + * Return a singleton cache object that can be used concurrently by + * multiple threads. + */ ref getNarInfoDiskCache(); ref getTestNarInfoDiskCache(Path dbPath); diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index 071d8355e..d17253741 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -7,15 +7,18 @@ namespace nix { NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence) : ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack { - auto corrupt = [&]() { - return Error("NAR info file '%1%' is corrupt", whence); + unsigned line = 1; + + auto corrupt = [&](const char * reason) { + return Error("NAR info file '%1%' is corrupt: %2%", whence, + std::string(reason) + (line > 0 ? " at line " + std::to_string(line) : "")); }; auto parseHashField = [&](const std::string & s) { try { return Hash::parseAnyPrefixed(s); } catch (BadHash &) { - throw corrupt(); + throw corrupt("bad hash"); } }; @@ -26,12 +29,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & while (pos < s.size()) { size_t colon = s.find(':', pos); - if (colon == std::string::npos) throw corrupt(); + if (colon == std::string::npos) throw corrupt("expecting ':'"); std::string name(s, pos, colon - pos); size_t eol = s.find('\n', colon + 2); - if (eol == std::string::npos) throw corrupt(); + if (eol == std::string::npos) throw corrupt("expecting '\\n'"); std::string value(s, colon + 2, eol - colon - 2); @@ -47,7 +50,7 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & fileHash = parseHashField(value); else if (name == "FileSize") { auto n = string2Int(value); - if (!n) throw corrupt(); + if (!n) throw corrupt("invalid FileSize"); fileSize = *n; } else if (name == "NarHash") { @@ -56,12 +59,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & } else if (name == "NarSize") { auto n = string2Int(value); - if (!n) throw corrupt(); + if (!n) throw corrupt("invalid NarSize"); narSize = *n; } else if (name == "References") { auto refs = tokenizeString(value, " "); - if (!references.empty()) throw corrupt(); + if (!references.empty()) throw corrupt("extra References"); for (auto & r : refs) references.insert(StorePath(r)); } @@ -72,17 +75,26 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & else if (name == "Sig") sigs.insert(value); else if (name == "CA") { - if (ca) throw corrupt(); + if (ca) throw corrupt("extra CA"); // FIXME: allow blank ca or require skipping field? - ca = parseContentAddressOpt(value); + ca = ContentAddress::parseOpt(value); } pos = eol + 1; + line += 1; } if (compression == "") compression = "bzip2"; - if (!havePath || !haveNarHash || url.empty() || narSize == 0) throw corrupt(); + if (!havePath || !haveNarHash || url.empty() || narSize == 0) { + line = 0; // don't include line information in the error + throw corrupt( + !havePath ? "StorePath missing" : + !haveNarHash ? "NarHash missing" : + url.empty() ? "URL missing" : + narSize == 0 ? "NarSize missing or zero" + : "?"); + } } std::string NarInfo::to_string(const Store & store) const diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh index 01683ec73..5dbdafac3 100644 --- a/src/libstore/nar-info.hh +++ b/src/libstore/nar-info.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "hash.hh" @@ -16,6 +17,9 @@ struct NarInfo : ValidPathInfo uint64_t fileSize = 0; NarInfo() = delete; + NarInfo(const Store & store, std::string && name, ContentAddressWithReferences && ca, Hash narHash) + : ValidPathInfo(store, std::move(name), std::move(ca), narHash) + { } NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { } NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } NarInfo(const Store & store, const std::string & s, const std::string & whence); diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 4d2781180..4a79cf4a1 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -55,7 +55,7 @@ LocalStore::InodeHash LocalStore::loadInodeHash() } if (errno) throw SysError("reading directory '%1%'", linksDir); - printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size()); + printMsg(lvlTalkative, "loaded %1% hash inodes", inodeHash.size()); return inodeHash; } @@ -73,7 +73,7 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHa checkInterrupt(); if (inodeHash.count(dirent->d_ino)) { - debug(format("'%1%' is already linked") % dirent->d_name); + debug("'%1%' is already linked", dirent->d_name); continue; } @@ -102,7 +102,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, if (std::regex_search(path, std::regex("\\.app/Contents/.+$"))) { - debug(format("'%1%' is not allowed to be linked in macOS") % path); + debug("'%1%' is not allowed to be linked in macOS", path); return; } #endif @@ -146,7 +146,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, contents of the symlink (i.e. the result of readlink()), not the contents of the target (which may not even exist). */ Hash hash = hashPath(htSHA256, path).first; - debug(format("'%1%' has hash '%2%'") % path % hash.to_string(Base32, true)); + debug("'%1%' has hash '%2%'", path, hash.to_string(Base32, true)); /* Check if this is a known hash. */ Path linkPath = linksDir + "/" + hash.to_string(Base32, false); @@ -196,11 +196,11 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, auto stLink = lstat(linkPath); if (st.st_ino == stLink.st_ino) { - debug(format("'%1%' is already linked to '%2%'") % path % linkPath); + debug("'%1%' is already linked to '%2%'", path, linkPath); return; } - printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath); + printMsg(lvlTalkative, "linking '%1%' to '%2%'", path, linkPath); /* Make the containing directory writable, but only if it's not the store itself (we don't want or need to mess with its @@ -213,8 +213,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, its timestamp back to 0. */ MakeReadOnly makeReadOnly(mustToggle ? dirOfPath : ""); - Path tempLink = (format("%1%/.tmp-link-%2%-%3%") - % realStoreDir % getpid() % random()).str(); + Path tempLink = fmt("%1%/.tmp-link-%2%-%3%", realStoreDir, getpid(), random()); if (link(linkPath.c_str(), tempLink.c_str()) == -1) { if (errno == EMLINK) { @@ -222,7 +221,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats, systems). This is likely to happen with empty files. Just shrug and ignore. */ if (st.st_size) - printInfo(format("'%1%' has maximum number of links") % linkPath); + printInfo("'%1%' has maximum number of links", linkPath); return; } throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath); diff --git a/src/libstore/outputs-spec.hh b/src/libstore/outputs-spec.hh index 46bc35ebc..5a726fe90 100644 --- a/src/libstore/outputs-spec.hh +++ b/src/libstore/outputs-spec.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -9,6 +10,9 @@ namespace nix { +/** + * A non-empty set of outputs, specified by name + */ struct OutputNames : std::set { using std::set::set; @@ -18,6 +22,9 @@ struct OutputNames : std::set { : std::set(s) { assert(!empty()); } + /** + * Needs to be "inherited manually" + */ OutputNames(std::set && s) : std::set(s) { assert(!empty()); } @@ -28,6 +35,9 @@ struct OutputNames : std::set { OutputNames() = delete; }; +/** + * The set of all outputs, without needing to name them explicitly + */ struct AllOutputs : std::monostate { }; typedef std::variant _OutputsSpecRaw; @@ -36,7 +46,9 @@ struct OutputsSpec : _OutputsSpecRaw { using Raw = _OutputsSpecRaw; using Raw::Raw; - /* Force choosing a variant */ + /** + * Force choosing a variant + */ OutputsSpec() = delete; using Names = OutputNames; @@ -52,14 +64,20 @@ struct OutputsSpec : _OutputsSpecRaw { bool contains(const std::string & output) const; - /* Create a new OutputsSpec which is the union of this and that. */ + /** + * Create a new OutputsSpec which is the union of this and that. + */ OutputsSpec union_(const OutputsSpec & that) const; - /* Whether this OutputsSpec is a subset of that. */ + /** + * Whether this OutputsSpec is a subset of that. + */ bool isSubsetOf(const OutputsSpec & outputs) const; - /* Parse a string of the form 'output1,...outputN' or - '*', returning the outputs spec. */ + /** + * Parse a string of the form 'output1,...outputN' or '*', returning + * the outputs spec. + */ static OutputsSpec parse(std::string_view s); static std::optional parseOpt(std::string_view s); @@ -81,8 +99,10 @@ struct ExtendedOutputsSpec : _ExtendedOutputsSpecRaw { return static_cast(*this); } - /* Parse a string of the form 'prefix^output1,...outputN' or - 'prefix^*', returning the prefix and the extended outputs spec. */ + /** + * Parse a string of the form 'prefix^output1,...outputN' or + * 'prefix^*', returning the prefix and the extended outputs spec. + */ static std::pair parse(std::string_view s); static std::optional> parseOpt(std::string_view s); diff --git a/src/libstore/parsed-derivations.hh b/src/libstore/parsed-derivations.hh index bfb3857c0..71085a604 100644 --- a/src/libstore/parsed-derivations.hh +++ b/src/libstore/parsed-derivations.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "derivations.hh" #include "store-api.hh" diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index bd55a9d06..e60d7abe0 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -21,25 +21,45 @@ void ValidPathInfo::sign(const Store & store, const SecretKey & secretKey) sigs.insert(secretKey.signDetached(fingerprint(store))); } - -bool ValidPathInfo::isContentAddressed(const Store & store) const +std::optional ValidPathInfo::contentAddressWithReferences() const { - if (! ca) return false; + if (! ca) + return std::nullopt; - auto caPath = std::visit(overloaded { - [&](const TextHash & th) { - return store.makeTextPath(path.name(), th.hash, references); + return std::visit(overloaded { + [&](const TextHash & th) -> ContentAddressWithReferences { + assert(references.count(path) == 0); + return TextInfo { + .hash = th, + .references = references, + }; }, - [&](const FixedOutputHash & fsh) { + [&](const FixedOutputHash & foh) -> ContentAddressWithReferences { auto refs = references; bool hasSelfReference = false; if (refs.count(path)) { hasSelfReference = true; refs.erase(path); } - return store.makeFixedOutputPath(fsh.method, fsh.hash, path.name(), refs, hasSelfReference); - } - }, *ca); + return FixedOutputInfo { + .hash = foh, + .references = { + .others = std::move(refs), + .self = hasSelfReference, + }, + }; + }, + }, ca->raw); +} + +bool ValidPathInfo::isContentAddressed(const Store & store) const +{ + auto fullCaOpt = contentAddressWithReferences(); + + if (! fullCaOpt) + return false; + + auto caPath = store.makeFixedOutputPathFromCA(path.name(), *fullCaOpt); bool res = caPath == path; @@ -77,6 +97,29 @@ Strings ValidPathInfo::shortRefs() const } +ValidPathInfo::ValidPathInfo( + const Store & store, + std::string_view name, + ContentAddressWithReferences && ca, + Hash narHash) + : path(store.makeFixedOutputPathFromCA(name, ca)) + , narHash(narHash) +{ + std::visit(overloaded { + [this](TextInfo && ti) { + this->references = std::move(ti.references); + this->ca = std::move((TextHash &&) ti); + }, + [this](FixedOutputInfo && foi) { + this->references = std::move(foi.references.others); + if (foi.references.self) + this->references.insert(path); + this->ca = std::move((FixedOutputHash &&) foi); + }, + }, std::move(ca).raw); +} + + ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned int format) { return read(source, store, format, store.parseStorePath(readString(source))); @@ -93,7 +136,7 @@ ValidPathInfo ValidPathInfo::read(Source & source, const Store & store, unsigned if (format >= 16) { source >> info.ultimate; info.sigs = readStrings(source); - info.ca = parseContentAddressOpt(readString(source)); + info.ca = ContentAddress::parseOpt(readString(source)); } return info; } diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh index a7fcbd232..221523622 100644 --- a/src/libstore/path-info.hh +++ b/src/libstore/path-info.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "crypto.hh" #include "path.hh" @@ -18,8 +19,14 @@ struct SubstitutablePathInfo { std::optional deriver; StorePathSet references; - uint64_t downloadSize; /* 0 = unknown or inapplicable */ - uint64_t narSize; /* 0 = unknown */ + /** + * 0 = unknown or inapplicable + */ + uint64_t downloadSize; + /** + * 0 = unknown + */ + uint64_t narSize; }; typedef std::map SubstitutablePathInfos; @@ -29,35 +36,40 @@ struct ValidPathInfo { StorePath path; std::optional deriver; - // TODO document this + /** + * \todo document this + */ Hash narHash; StorePathSet references; time_t registrationTime = 0; uint64_t narSize = 0; // 0 = unknown uint64_t id; // internal use only - /* Whether the path is ultimately trusted, that is, it's a - derivation output that was built locally. */ + /** + * Whether the path is ultimately trusted, that is, it's a + * derivation output that was built locally. + */ bool ultimate = false; StringSet sigs; // note: not necessarily verified - /* If non-empty, an assertion that the path is content-addressed, - i.e., that the store path is computed from a cryptographic hash - of the contents of the path, plus some other bits of data like - the "name" part of the path. Such a path doesn't need - signatures, since we don't have to trust anybody's claim that - the path is the output of a particular derivation. (In the - extensional store model, we have to trust that the *contents* - of an output path of a derivation were actually produced by - that derivation. In the intensional model, we have to trust - that a particular output path was produced by a derivation; the - path then implies the contents.) - - Ideally, the content-addressability assertion would just be a Boolean, - and the store path would be computed from the name component, ‘narHash’ - and ‘references’. However, we support many types of content addresses. - */ + /** + * If non-empty, an assertion that the path is content-addressed, + * i.e., that the store path is computed from a cryptographic hash + * of the contents of the path, plus some other bits of data like + * the "name" part of the path. Such a path doesn't need + * signatures, since we don't have to trust anybody's claim that + * the path is the output of a particular derivation. (In the + * extensional store model, we have to trust that the *contents* + * of an output path of a derivation were actually produced by + * that derivation. In the intensional model, we have to trust + * that a particular output path was produced by a derivation; the + * path then implies the contents.) + * + * Ideally, the content-addressability assertion would just be a Boolean, + * and the store path would be computed from the name component, ‘narHash’ + * and ‘references’. However, we support many types of content addresses. + */ std::optional ca; bool operator == (const ValidPathInfo & i) const @@ -68,27 +80,42 @@ struct ValidPathInfo && references == i.references; } - /* Return a fingerprint of the store path to be used in binary - cache signatures. It contains the store path, the base-32 - SHA-256 hash of the NAR serialisation of the path, the size of - the NAR, and the sorted references. The size field is strictly - speaking superfluous, but might prevent endless/excessive data - attacks. */ + /** + * Return a fingerprint of the store path to be used in binary + * cache signatures. It contains the store path, the base-32 + * SHA-256 hash of the NAR serialisation of the path, the size of + * the NAR, and the sorted references. The size field is strictly + * speaking superfluous, but might prevent endless/excessive data + * attacks. + */ std::string fingerprint(const Store & store) const; void sign(const Store & store, const SecretKey & secretKey); - /* Return true iff the path is verifiably content-addressed. */ + /** + * @return The `ContentAddressWithReferences` that determines the + * store path for a content-addressed store object, `std::nullopt` + * for an input-addressed store object. + */ + std::optional contentAddressWithReferences() const; + + /** + * @return true iff the path is verifiably content-addressed. + */ bool isContentAddressed(const Store & store) const; static const size_t maxSigs = std::numeric_limits::max(); - /* Return the number of signatures on this .narinfo that were - produced by one of the specified keys, or maxSigs if the path - is content-addressed. */ + /** + * Return the number of signatures on this .narinfo that were + * produced by one of the specified keys, or maxSigs if the path + * is content-addressed. + */ size_t checkSignatures(const Store & store, const PublicKeys & publicKeys) const; - /* Verify a single signature. */ + /** + * Verify a single signature. + */ bool checkSignature(const Store & store, const PublicKeys & publicKeys, const std::string & sig) const; Strings shortRefs() const; @@ -98,6 +125,9 @@ struct ValidPathInfo ValidPathInfo(StorePath && path, Hash narHash) : path(std::move(path)), narHash(narHash) { }; ValidPathInfo(const StorePath & path, Hash narHash) : path(path), narHash(narHash) { }; + ValidPathInfo(const Store & store, + std::string_view name, ContentAddressWithReferences && ca, Hash narHash); + virtual ~ValidPathInfo() { } static ValidPathInfo read(Source & source, const Store & store, unsigned int format); diff --git a/src/libstore/path-regex.hh b/src/libstore/path-regex.hh index 6893c3876..4f8dc4c1f 100644 --- a/src/libstore/path-regex.hh +++ b/src/libstore/path-regex.hh @@ -1,4 +1,5 @@ #pragma once +///@file namespace nix { diff --git a/src/libstore/path-with-outputs.hh b/src/libstore/path-with-outputs.hh index 5d25656a5..d75850868 100644 --- a/src/libstore/path-with-outputs.hh +++ b/src/libstore/path-with-outputs.hh @@ -1,17 +1,19 @@ #pragma once +///@file #include "path.hh" #include "derived-path.hh" namespace nix { -/* This is a deprecated old type just for use by the old CLI, and older - versions of the RPC protocols. In new code don't use it; you want - `DerivedPath` instead. - - `DerivedPath` is better because it handles more cases, and does so more - explicitly without devious punning tricks. -*/ +/** + * This is a deprecated old type just for use by the old CLI, and older + * versions of the RPC protocols. In new code don't use it; you want + * `DerivedPath` instead. + * + * `DerivedPath` is better because it handles more cases, and does so more + * explicitly without devious punning tricks. + */ struct StorePathWithOutputs { StorePath path; @@ -30,9 +32,11 @@ std::pair parsePathWithOutputs(std::string_view s); class Store; -/* Split a string specifying a derivation and a set of outputs - (/nix/store/hash-foo!out1,out2,...) into the derivation path - and the outputs. */ +/** + * Split a string specifying a derivation and a set of outputs + * (/nix/store/hash-foo!out1,out2,...) into the derivation path + * and the outputs. + */ StorePathWithOutputs parsePathWithOutputs(const Store & store, std::string_view pathWithOutputs); StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs); diff --git a/src/libstore/path.hh b/src/libstore/path.hh index 1e5579b90..4ca6747b3 100644 --- a/src/libstore/path.hh +++ b/src/libstore/path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include @@ -8,13 +9,22 @@ namespace nix { struct Hash; +/** + * \ref StorePath "Store path" is the fundamental reference type of Nix. + * A store paths refers to a Store object. + * + * See glossary.html#gloss-store-path for more information on a + * conceptual level. + */ class StorePath { std::string baseName; public: - /* Size of the hash part of store paths, in base-32 characters. */ + /** + * Size of the hash part of store paths, in base-32 characters. + */ constexpr static size_t HashLen = 32; // i.e. 160 bits constexpr static size_t MaxPathLen = 211; @@ -45,8 +55,9 @@ public: return baseName != other.baseName; } - /* Check whether a file name ends with the extension for - derivations. */ + /** + * Check whether a file name ends with the extension for derivations. + */ bool isDerivation() const; std::string_view name() const @@ -67,7 +78,10 @@ public: typedef std::set StorePathSet; typedef std::vector StorePaths; -/* Extension of derivations in the Nix store. */ +/** + * The file extension of \ref Derivation derivations when serialized + * into store objects. + */ const std::string drvExtension = ".drv"; } diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index 42023cd0a..adc763e6a 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -96,7 +96,7 @@ bool PathLocks::lockPaths(const PathSet & paths, checkInterrupt(); Path lockPath = path + ".lock"; - debug(format("locking path '%1%'") % path); + debug("locking path '%1%'", path); AutoCloseFD fd; @@ -118,7 +118,7 @@ bool PathLocks::lockPaths(const PathSet & paths, } } - debug(format("lock acquired on '%1%'") % lockPath); + debug("lock acquired on '%1%'", lockPath); /* Check that the lock file hasn't become stale (i.e., hasn't been unlinked). */ @@ -130,7 +130,7 @@ bool PathLocks::lockPaths(const PathSet & paths, a lock on a deleted file. This means that other processes may create and acquire a lock on `lockPath', and proceed. So we must retry. */ - debug(format("open lock file '%1%' has become stale") % lockPath); + debug("open lock file '%1%' has become stale", lockPath); else break; } @@ -163,7 +163,7 @@ void PathLocks::unlock() "error (ignored): cannot close lock file on '%1%'", i.second); - debug(format("lock released on '%1%'") % i.second); + debug("lock released on '%1%'", i.second); } fds.clear(); diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh index 5e3a734b4..4921df352 100644 --- a/src/libstore/pathlocks.hh +++ b/src/libstore/pathlocks.hh @@ -1,15 +1,20 @@ #pragma once +///@file #include "util.hh" namespace nix { -/* Open (possibly create) a lock file and return the file descriptor. - -1 is returned if create is false and the lock could not be opened - because it doesn't exist. Any other error throws an exception. */ +/** + * Open (possibly create) a lock file and return the file descriptor. + * -1 is returned if create is false and the lock could not be opened + * because it doesn't exist. Any other error throws an exception. + */ AutoCloseFD openLockFile(const Path & path, bool create); -/* Delete an open lock file. */ +/** + * Delete an open lock file. + */ void deleteLockFile(const Path & path, int fd); enum LockType { ltRead, ltWrite, ltNone }; diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index c551c5f3e..ba5c8583f 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -64,7 +64,7 @@ std::pair> findGenerations(Path pro static void makeName(const Path & profile, GenerationNumber num, Path & outLink) { - Path prefix = (format("%1%-%2%") % profile % num).str(); + Path prefix = fmt("%1%-%2%", profile, num); outLink = prefix + "-link"; } @@ -269,7 +269,7 @@ void switchGeneration( void lockProfile(PathLocks & lock, const Path & profile) { - lock.lockPaths({profile}, (format("waiting for lock on profile '%1%'") % profile).str()); + lock.lockPaths({profile}, fmt("waiting for lock on profile '%1%'", profile)); lock.setDeletion(true); } @@ -282,28 +282,48 @@ std::string optimisticLockProfile(const Path & profile) Path profilesDir() { - auto profileRoot = createNixStateDir() + "/profiles"; + auto profileRoot = + (getuid() == 0) + ? rootProfilesDir() + : createNixStateDir() + "/profiles"; createDirs(profileRoot); return profileRoot; } +Path rootProfilesDir() +{ + return settings.nixStateDir + "/profiles/per-user/root"; +} + Path getDefaultProfile() { Path profileLink = settings.useXDGBaseDirectories ? createNixStateDir() + "/profile" : getHome() + "/.nix-profile"; try { - auto profile = - getuid() == 0 - ? settings.nixStateDir + "/profiles/default" - : profilesDir() + "/profile"; + auto profile = profilesDir() + "/profile"; if (!pathExists(profileLink)) { replaceSymlink(profile, profileLink); } + // Backwards compatibiliy measure: Make root's profile available as + // `.../default` as it's what NixOS and most of the init scripts expect + Path globalProfileLink = settings.nixStateDir + "/profiles/default"; + if (getuid() == 0 && !pathExists(globalProfileLink)) { + replaceSymlink(profile, globalProfileLink); + } return absPath(readLink(profileLink), dirOf(profileLink)); } catch (Error &) { return profileLink; } } +Path defaultChannelsDir() +{ + return profilesDir() + "/channels"; +} + +Path rootChannelsDir() +{ + return rootProfilesDir() + "/channels"; +} } diff --git a/src/libstore/profiles.hh b/src/libstore/profiles.hh index fbf95b850..4e1f42e83 100644 --- a/src/libstore/profiles.hh +++ b/src/libstore/profiles.hh @@ -1,6 +1,7 @@ #pragma once +///@file -#include "types.hh" + #include "types.hh" #include "pathlocks.hh" #include @@ -23,9 +24,11 @@ struct Generation typedef std::list Generations; -/* Returns the list of currently present generations for the specified - profile, sorted by generation number. Also returns the number of - the current generation. */ +/** + * Returns the list of currently present generations for the specified + * profile, sorted by generation number. Also returns the number of + * the current generation. + */ std::pair> findGenerations(Path profile); class LocalFSStore; @@ -46,35 +49,60 @@ void deleteGenerationsOlderThan(const Path & profile, std::string_view timeSpec, void switchLink(Path link, Path target); -/* Roll back a profile to the specified generation, or to the most - recent one older than the current. */ +/** + * Roll back a profile to the specified generation, or to the most + * recent one older than the current. + */ void switchGeneration( const Path & profile, std::optional dstGen, bool dryRun); -/* Ensure exclusive access to a profile. Any command that modifies - the profile first acquires this lock. */ +/** + * Ensure exclusive access to a profile. Any command that modifies + * the profile first acquires this lock. + */ void lockProfile(PathLocks & lock, const Path & profile); -/* Optimistic locking is used by long-running operations like `nix-env - -i'. Instead of acquiring the exclusive lock for the entire - duration of the operation, we just perform the operation - optimistically (without an exclusive lock), and check at the end - whether the profile changed while we were busy (i.e., the symlink - target changed). If so, the operation is restarted. Restarting is - generally cheap, since the build results are still in the Nix - store. Most of the time, only the user environment has to be - rebuilt. */ +/** + * Optimistic locking is used by long-running operations like `nix-env + * -i'. Instead of acquiring the exclusive lock for the entire + * duration of the operation, we just perform the operation + * optimistically (without an exclusive lock), and check at the end + * whether the profile changed while we were busy (i.e., the symlink + * target changed). If so, the operation is restarted. Restarting is + * generally cheap, since the build results are still in the Nix + * store. Most of the time, only the user environment has to be + * rebuilt. + */ std::string optimisticLockProfile(const Path & profile); -/* Creates and returns the path to a directory suitable for storing the user’s - profiles. */ +/** + * Create and return the path to a directory suitable for storing the user’s + * profiles. + */ Path profilesDir(); -/* Resolve the default profile (~/.nix-profile by default, $XDG_STATE_HOME/ - nix/profile if XDG Base Directory Support is enabled), and create if doesn't - exist */ +/** + * Return the path to the profile directory for root (but don't try creating it) + */ +Path rootProfilesDir(); + +/** + * Create and return the path to the file used for storing the users's channels + */ +Path defaultChannelsDir(); + +/** + * Return the path to the channel directory for root (but don't try creating it) + */ +Path rootChannelsDir(); + +/** + * Resolve the default profile (~/.nix-profile by default, + * $XDG_STATE_HOME/nix/profile if XDG Base Directory Support is enabled), + * and create if doesn't exist + */ Path getDefaultProfile(); } diff --git a/src/libstore/realisation.hh b/src/libstore/realisation.hh index 48d0283de..3922d1267 100644 --- a/src/libstore/realisation.hh +++ b/src/libstore/realisation.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include @@ -12,9 +13,25 @@ namespace nix { class Store; +/** + * A general `Realisation` key. + * + * This is similar to a `DerivedPath::Opaque`, but the derivation is + * identified by its "hash modulo" instead of by its store path. + */ struct DrvOutput { - // The hash modulo of the derivation + /** + * The hash modulo of the derivation. + * + * Computed from the derivation itself for most types of + * derivations, but computed from the (fixed) content address of the + * output for fixed-output derivations. + */ Hash drvHash; + + /** + * The name of the output. + */ std::string outputName; std::string to_string() const; @@ -59,6 +76,21 @@ struct Realisation { GENERATE_CMP(Realisation, me->id, me->outPath); }; +/** + * Collection type for a single derivation's outputs' `Realisation`s. + * + * Since these are the outputs of a single derivation, we know the + * output names are unique so we can use them as the map key. + */ +typedef std::map SingleDrvOutputs; + +/** + * Collection type for multiple derivations' outputs' `Realisation`s. + * + * `DrvOutput` is used because in general the derivations are not all + * the same, so we need to identify firstly which derivation, and + * secondly which output of that derivation. + */ typedef std::map DrvOutputs; struct OpaquePath { diff --git a/src/libstore/references.cc b/src/libstore/references.cc index 3bb297fc8..345f4528b 100644 --- a/src/libstore/references.cc +++ b/src/libstore/references.cc @@ -39,8 +39,7 @@ static void search( if (!match) continue; std::string ref(s.substr(i, refLength)); if (hashes.erase(ref)) { - debug(format("found reference to '%1%' at offset '%2%'") - % ref % i); + debug("found reference to '%1%' at offset '%2%'", ref, i); seen.insert(ref); } ++i; diff --git a/src/libstore/references.hh b/src/libstore/references.hh index 6f381f96c..52d71b333 100644 --- a/src/libstore/references.hh +++ b/src/libstore/references.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "hash.hh" #include "path.hh" diff --git a/src/libstore/remote-fs-accessor.hh b/src/libstore/remote-fs-accessor.hh index 99f5544ef..e2673b6f6 100644 --- a/src/libstore/remote-fs-accessor.hh +++ b/src/libstore/remote-fs-accessor.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "fs-accessor.hh" #include "ref.hh" diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc index d1296627a..a6e8b9577 100644 --- a/src/libstore/remote-store.cc +++ b/src/libstore/remote-store.cc @@ -42,9 +42,43 @@ void write(const Store & store, Sink & out, const StorePath & storePath) } +std::optional read(const Store & store, Source & from, Phantom> _) +{ + auto temp = readNum(from); + switch (temp) { + case 0: + return std::nullopt; + case 1: + return { Trusted }; + case 2: + return { NotTrusted }; + default: + throw Error("Invalid trusted status from remote"); + } +} + +void write(const Store & store, Sink & out, const std::optional & optTrusted) +{ + if (!optTrusted) + out << (uint8_t)0; + else { + switch (*optTrusted) { + case Trusted: + out << (uint8_t)1; + break; + case NotTrusted: + out << (uint8_t)2; + break; + default: + assert(false); + }; + } +} + + ContentAddress read(const Store & store, Source & from, Phantom _) { - return parseContentAddress(readString(from)); + return ContentAddress::parse(readString(from)); } void write(const Store & store, Sink & out, const ContentAddress & ca) @@ -56,12 +90,12 @@ void write(const Store & store, Sink & out, const ContentAddress & ca) DerivedPath read(const Store & store, Source & from, Phantom _) { auto s = readString(from); - return DerivedPath::parse(store, s); + return DerivedPath::parseLegacy(store, s); } void write(const Store & store, Sink & out, const DerivedPath & req) { - out << req.to_string(store); + out << req.to_string_legacy(store); } @@ -91,10 +125,26 @@ void write(const Store & store, Sink & out, const DrvOutput & drvOutput) } -BuildResult read(const Store & store, Source & from, Phantom _) +KeyedBuildResult read(const Store & store, Source & from, Phantom _) { auto path = worker_proto::read(store, from, Phantom {}); - BuildResult res { .path = path }; + auto br = worker_proto::read(store, from, Phantom {}); + return KeyedBuildResult { + std::move(br), + /* .path = */ std::move(path), + }; +} + +void write(const Store & store, Sink & to, const KeyedBuildResult & res) +{ + worker_proto::write(store, to, res.path); + worker_proto::write(store, to, static_cast(res)); +} + + +BuildResult read(const Store & store, Source & from, Phantom _) +{ + BuildResult res; res.status = (BuildResult::Status) readInt(from); from >> res.errorMsg @@ -102,13 +152,16 @@ BuildResult read(const Store & store, Source & from, Phantom _) >> res.isNonDeterministic >> res.startTime >> res.stopTime; - res.builtOutputs = worker_proto::read(store, from, Phantom {}); + auto builtOutputs = worker_proto::read(store, from, Phantom {}); + for (auto && [output, realisation] : builtOutputs) + res.builtOutputs.insert_or_assign( + std::move(output.outputName), + std::move(realisation)); return res; } void write(const Store & store, Sink & to, const BuildResult & res) { - worker_proto::write(store, to, res.path); to << res.status << res.errorMsg @@ -116,7 +169,10 @@ void write(const Store & store, Sink & to, const BuildResult & res) << res.isNonDeterministic << res.startTime << res.stopTime; - worker_proto::write(store, to, res.builtOutputs); + DrvOutputs builtOutputs; + for (auto & [output, realisation] : res.builtOutputs) + builtOutputs.insert_or_assign(realisation.id, realisation); + worker_proto::write(store, to, builtOutputs); } @@ -134,7 +190,7 @@ void write(const Store & store, Sink & out, const std::optional & sto std::optional read(const Store & store, Source & from, Phantom> _) { - return parseContentAddressOpt(readString(from)); + return ContentAddress::parseOpt(readString(from)); } void write(const Store & store, Sink & out, const std::optional & caOpt) @@ -226,6 +282,13 @@ void RemoteStore::initConnection(Connection & conn) conn.daemonNixVersion = readString(conn.from); } + if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 35) { + conn.remoteTrustsUs = worker_proto::read(*this, conn.from, Phantom> {}); + } else { + // We don't know the answer; protocol to old. + conn.remoteTrustsUs = std::nullopt; + } + auto ex = conn.processStderr(); if (ex) std::rethrow_exception(ex); } @@ -265,7 +328,7 @@ void RemoteStore::setOptions(Connection & conn) overrides.erase(settings.buildCores.name); overrides.erase(settings.useSubstitutes.name); overrides.erase(loggerSettings.showTrace.name); - overrides.erase(settings.experimentalFeatures.name); + overrides.erase(experimentalFeatureSettings.experimentalFeatures.name); overrides.erase(settings.pluginFiles.name); conn.to << overrides.size(); for (auto & i : overrides) @@ -545,7 +608,7 @@ ref RemoteStore::addCAToStore( conn->to << wopAddToStore << name - << renderContentAddressMethod(caMethod); + << caMethod.render(); worker_proto::write(*this, conn->to, references); conn->to << repair; @@ -603,7 +666,7 @@ ref RemoteStore::addCAToStore( } } - }, caMethod); + }, caMethod.raw); auto path = parseStorePath(readString(conn->from)); // Release our connection to prevent a deadlock in queryPathInfo(). conn_.reset(); @@ -824,7 +887,7 @@ void RemoteStore::buildPaths(const std::vector & drvPaths, BuildMod readInt(conn->from); } -std::vector RemoteStore::buildPathsWithResults( +std::vector RemoteStore::buildPathsWithResults( const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) @@ -839,7 +902,7 @@ std::vector RemoteStore::buildPathsWithResults( writeDerivedPaths(*this, conn, paths); conn->to << buildMode; conn.processStderr(); - return worker_proto::read(*this, conn->from, Phantom> {}); + return worker_proto::read(*this, conn->from, Phantom> {}); } else { // Avoid deadlock. conn_.reset(); @@ -848,21 +911,25 @@ std::vector RemoteStore::buildPathsWithResults( // fails, but meh. buildPaths(paths, buildMode, evalStore); - std::vector results; + std::vector results; for (auto & path : paths) { std::visit( overloaded { [&](const DerivedPath::Opaque & bo) { - results.push_back(BuildResult { - .status = BuildResult::Substituted, - .path = bo, + results.push_back(KeyedBuildResult { + { + .status = BuildResult::Substituted, + }, + /* .path = */ bo, }); }, [&](const DerivedPath::Built & bfd) { - BuildResult res { - .status = BuildResult::Built, - .path = bfd, + KeyedBuildResult res { + { + .status = BuildResult::Built + }, + /* .path = */ bfd, }; OutputPathMap outputs; @@ -876,15 +943,15 @@ std::vector RemoteStore::buildPathsWithResults( "the derivation '%s' doesn't have an output named '%s'", printStorePath(bfd.drvPath), output); auto outputId = DrvOutput{ *outputHash, output }; - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { auto realisation = queryRealisation(outputId); if (!realisation) throw MissingRealisation(outputId); - res.builtOutputs.emplace(realisation->id, *realisation); + res.builtOutputs.emplace(output, *realisation); } else { res.builtOutputs.emplace( - outputId, + output, Realisation { .id = outputId, .outPath = outputPath, @@ -911,12 +978,7 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD writeDerivation(conn->to, *this, drv); conn->to << buildMode; conn.processStderr(); - BuildResult res { - .path = DerivedPath::Built { - .drvPath = drvPath, - .outputs = OutputsSpec::All { }, - }, - }; + BuildResult res; res.status = (BuildResult::Status) readInt(conn->from); conn->from >> res.errorMsg; if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) { @@ -924,7 +986,10 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD } if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) { auto builtOutputs = worker_proto::read(*this, conn->from, Phantom {}); - res.builtOutputs = builtOutputs; + for (auto && [output, realisation] : builtOutputs) + res.builtOutputs.insert_or_assign( + std::move(output.outputName), + std::move(realisation)); } return res; } @@ -1082,6 +1147,11 @@ unsigned int RemoteStore::getProtocol() return conn->daemonVersion; } +std::optional RemoteStore::isTrustedClient() +{ + auto conn(getConnection()); + return conn->remoteTrustsUs; +} void RemoteStore::flushBadConnections() { diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh index 11d089cd2..a30466647 100644 --- a/src/libstore/remote-store.hh +++ b/src/libstore/remote-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -22,15 +23,19 @@ struct RemoteStoreConfig : virtual StoreConfig { using StoreConfig::StoreConfig; - const Setting maxConnections{(StoreConfig*) this, 1, - "max-connections", "maximum number of concurrent connections to the Nix daemon"}; + const Setting maxConnections{(StoreConfig*) this, 1, "max-connections", + "Maximum number of concurrent connections to the Nix daemon."}; - const Setting maxConnectionAge{(StoreConfig*) this, std::numeric_limits::max(), - "max-connection-age", "number of seconds to reuse a connection"}; + const Setting maxConnectionAge{(StoreConfig*) this, + std::numeric_limits::max(), + "max-connection-age", + "Maximum age of a connection before it is closed."}; }; -/* FIXME: RemoteStore is a misnomer - should be something like - DaemonStore. */ +/** + * \todo RemoteStore is a misnomer - should be something like + * DaemonStore. + */ class RemoteStore : public virtual RemoteStoreConfig, public virtual Store, public virtual GcStore, @@ -38,8 +43,6 @@ class RemoteStore : public virtual RemoteStoreConfig, { public: - virtual bool sameMachine() = 0; - RemoteStore(const Params & params); /* Implementations of abstract store API methods. */ @@ -68,7 +71,9 @@ public: void querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos) override; - /* Add a content-addressable store path. `dump` will be drained. */ + /** + * Add a content-addressable store path. `dump` will be drained. + */ ref addCAToStore( Source & dump, std::string_view name, @@ -76,7 +81,9 @@ public: const StorePathSet & references, RepairFlag repair); - /* Add a content-addressable store path. Does not support references. `dump` will be drained. */ + /** + * Add a content-addressable store path. Does not support references. `dump` will be drained. + */ StorePath addToStoreFromDump(Source & dump, std::string_view name, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()) override; @@ -107,7 +114,7 @@ public: void buildPaths(const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; - std::vector buildPathsWithResults( + std::vector buildPathsWithResults( const std::vector & paths, BuildMode buildMode, std::shared_ptr evalStore) override; @@ -143,6 +150,8 @@ public: unsigned int getProtocol() override; + std::optional isTrustedClient() override; + void flushBadConnections(); struct Connection @@ -150,6 +159,7 @@ public: FdSink to; FdSource from; unsigned int daemonVersion; + std::optional remoteTrustsUs; std::optional daemonNixVersion; std::chrono::time_point startTime; diff --git a/src/libstore/repair-flag.hh b/src/libstore/repair-flag.hh index a13cda312..f412d6a20 100644 --- a/src/libstore/repair-flag.hh +++ b/src/libstore/repair-flag.hh @@ -1,4 +1,5 @@ #pragma once +///@file namespace nix { diff --git a/src/libstore/s3-binary-cache-store.cc b/src/libstore/s3-binary-cache-store.cc index 8d76eee99..d2fc6abaf 100644 --- a/src/libstore/s3-binary-cache-store.cc +++ b/src/libstore/s3-binary-cache-store.cc @@ -40,12 +40,12 @@ struct S3Error : public Error /* Helper: given an Outcome, return R in case of success, or throw an exception in case of an error. */ template -R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome && outcome) +R && checkAws(std::string_view s, Aws::Utils::Outcome && outcome) { if (!outcome.IsSuccess()) throw S3Error( outcome.GetError().GetErrorType(), - fs.s + ": " + outcome.GetError().GetMessage()); + s + ": " + outcome.GetError().GetMessage()); return outcome.GetResultWithOwnership(); } @@ -192,19 +192,72 @@ S3BinaryCacheStore::S3BinaryCacheStore(const Params & params) struct S3BinaryCacheStoreConfig : virtual BinaryCacheStoreConfig { using BinaryCacheStoreConfig::BinaryCacheStoreConfig; - const Setting profile{(StoreConfig*) this, "", "profile", "The name of the AWS configuration profile to use."}; - const Setting region{(StoreConfig*) this, Aws::Region::US_EAST_1, "region", {"aws-region"}}; - const Setting scheme{(StoreConfig*) this, "", "scheme", "The scheme to use for S3 requests, https by default."}; - const Setting endpoint{(StoreConfig*) this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."}; - const Setting narinfoCompression{(StoreConfig*) this, "", "narinfo-compression", "compression method for .narinfo files"}; - const Setting lsCompression{(StoreConfig*) this, "", "ls-compression", "compression method for .ls files"}; - const Setting logCompression{(StoreConfig*) this, "", "log-compression", "compression method for log/* files"}; + + const Setting profile{(StoreConfig*) this, "", "profile", + R"( + The name of the AWS configuration profile to use. By default + Nix will use the `default` profile. + )"}; + + const Setting region{(StoreConfig*) this, Aws::Region::US_EAST_1, "region", + R"( + The region of the S3 bucket. If your bucket is not in + `us–east-1`, you should always explicitly specify the region + parameter. + )"}; + + const Setting scheme{(StoreConfig*) this, "", "scheme", + R"( + The scheme used for S3 requests, `https` (default) or `http`. This + option allows you to disable HTTPS for binary caches which don't + support it. + + > **Note** + > + > HTTPS should be used if the cache might contain sensitive + > information. + )"}; + + const Setting endpoint{(StoreConfig*) this, "", "endpoint", + R"( + The URL of the endpoint of an S3-compatible service such as MinIO. + Do not specify this setting if you're using Amazon S3. + + > **Note** + > + > This endpoint must support HTTPS and will use path-based + > addressing instead of virtual host based addressing. + )"}; + + const Setting narinfoCompression{(StoreConfig*) this, "", "narinfo-compression", + "Compression method for `.narinfo` files."}; + + const Setting lsCompression{(StoreConfig*) this, "", "ls-compression", + "Compression method for `.ls` files."}; + + const Setting logCompression{(StoreConfig*) this, "", "log-compression", + R"( + Compression method for `log/*` files. It is recommended to + use a compression method supported by most web browsers + (e.g. `brotli`). + )"}; + const Setting multipartUpload{ - (StoreConfig*) this, false, "multipart-upload", "whether to use multi-part uploads"}; + (StoreConfig*) this, false, "multipart-upload", + "Whether to use multi-part uploads."}; + const Setting bufferSize{ - (StoreConfig*) this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"}; + (StoreConfig*) this, 5 * 1024 * 1024, "buffer-size", + "Size (in bytes) of each part in multi-part uploads."}; const std::string name() override { return "S3 Binary Cache Store"; } + + std::string doc() override + { + return + #include "s3-binary-cache-store.md" + ; + } }; struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual S3BinaryCacheStore @@ -430,9 +483,9 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual std::string marker; do { - debug(format("listing bucket 's3://%s' from key '%s'...") % bucketName % marker); + debug("listing bucket 's3://%s' from key '%s'...", bucketName, marker); - auto res = checkAws(format("AWS error listing bucket '%s'") % bucketName, + auto res = checkAws(fmt("AWS error listing bucket '%s'", bucketName), s3Helper.client->ListObjects( Aws::S3::Model::ListObjectsRequest() .WithBucket(bucketName) @@ -441,8 +494,8 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual auto & contents = res.GetContents(); - debug(format("got %d keys, next marker '%s'") - % contents.size() % res.GetNextMarker()); + debug("got %d keys, next marker '%s'", + contents.size(), res.GetNextMarker()); for (auto object : contents) { auto & key = object.GetKey(); @@ -456,6 +509,16 @@ struct S3BinaryCacheStoreImpl : virtual S3BinaryCacheStoreConfig, public virtual return paths; } + /** + * For now, we conservatively say we don't know. + * + * \todo try to expose our S3 authentication status. + */ + std::optional isTrustedClient() override + { + return std::nullopt; + } + static std::set uriSchemes() { return {"s3"}; } }; diff --git a/src/libstore/s3-binary-cache-store.hh b/src/libstore/s3-binary-cache-store.hh index bce828b11..c62ea5147 100644 --- a/src/libstore/s3-binary-cache-store.hh +++ b/src/libstore/s3-binary-cache-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "binary-cache-store.hh" diff --git a/src/libstore/s3-binary-cache-store.md b/src/libstore/s3-binary-cache-store.md new file mode 100644 index 000000000..70fe0eb09 --- /dev/null +++ b/src/libstore/s3-binary-cache-store.md @@ -0,0 +1,8 @@ +R"( + +**Store URL format**: `s3://`*bucket-name* + +This store allows reading and writing a binary cache stored in an AWS +S3 bucket. + +)" diff --git a/src/libstore/s3.hh b/src/libstore/s3.hh index cdb3e5908..f0aeb3bed 100644 --- a/src/libstore/s3.hh +++ b/src/libstore/s3.hh @@ -1,4 +1,5 @@ #pragma once +///@file #if ENABLE_S3 diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh index 3f76baa82..553fd3a09 100644 --- a/src/libstore/serve-protocol.hh +++ b/src/libstore/serve-protocol.hh @@ -1,4 +1,5 @@ #pragma once +///@file namespace nix { diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 871f2f3be..df334c23c 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -239,14 +239,11 @@ SQLiteTxn::~SQLiteTxn() } } -void handleSQLiteBusy(const SQLiteBusy & e) +void handleSQLiteBusy(const SQLiteBusy & e, time_t & nextWarning) { - static std::atomic lastWarned{0}; - time_t now = time(0); - - if (now > lastWarned + 10) { - lastWarned = now; + if (now > nextWarning) { + nextWarning = now + 10; logWarning({ .msg = hintfmt(e.what()) }); diff --git a/src/libstore/sqlite.hh b/src/libstore/sqlite.hh index 1853731a2..6e14852cb 100644 --- a/src/libstore/sqlite.hh +++ b/src/libstore/sqlite.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -10,7 +11,9 @@ struct sqlite3_stmt; namespace nix { -/* RAII wrapper to close a SQLite database automatically. */ +/** + * RAII wrapper to close a SQLite database automatically. + */ struct SQLite { sqlite3 * db = 0; @@ -22,7 +25,9 @@ struct SQLite ~SQLite(); operator sqlite3 * () { return db; } - /* Disable synchronous mode, set truncate journal mode. */ + /** + * Disable synchronous mode, set truncate journal mode. + */ void isCache(); void exec(const std::string & stmt); @@ -30,7 +35,9 @@ struct SQLite uint64_t getLastInsertedRowId(); }; -/* RAII wrapper to create and destroy SQLite prepared statements. */ +/** + * RAII wrapper to create and destroy SQLite prepared statements. + */ struct SQLiteStmt { sqlite3 * db = 0; @@ -42,7 +49,9 @@ struct SQLiteStmt ~SQLiteStmt(); operator sqlite3_stmt * () { return stmt; } - /* Helper for binding / executing statements. */ + /** + * Helper for binding / executing statements. + */ class Use { friend struct SQLiteStmt; @@ -55,7 +64,9 @@ struct SQLiteStmt ~Use(); - /* Bind the next parameter. */ + /** + * Bind the next parameter. + */ Use & operator () (std::string_view value, bool notNull = true); Use & operator () (const unsigned char * data, size_t len, bool notNull = true); Use & operator () (int64_t value, bool notNull = true); @@ -63,11 +74,15 @@ struct SQLiteStmt int step(); - /* Execute a statement that does not return rows. */ + /** + * Execute a statement that does not return rows. + */ void exec(); - /* For statements that return 0 or more rows. Returns true iff - a row is available. */ + /** + * For statements that return 0 or more rows. Returns true iff + * a row is available. + */ bool next(); std::string getStr(int col); @@ -81,8 +96,10 @@ struct SQLiteStmt } }; -/* RAII helper that ensures transactions are aborted unless explicitly - committed. */ +/** + * RAII helper that ensures transactions are aborted unless explicitly + * committed. + */ struct SQLiteTxn { bool active = false; @@ -122,18 +139,22 @@ protected: MakeError(SQLiteBusy, SQLiteError); -void handleSQLiteBusy(const SQLiteBusy & e); +void handleSQLiteBusy(const SQLiteBusy & e, time_t & nextWarning); -/* Convenience function for retrying a SQLite transaction when the - database is busy. */ +/** + * Convenience function for retrying a SQLite transaction when the + * database is busy. + */ template T retrySQLite(F && fun) { + time_t nextWarning = time(0) + 1; + while (true) { try { return fun(); } catch (SQLiteBusy & e) { - handleSQLiteBusy(e); + handleSQLiteBusy(e, nextWarning); } } } diff --git a/src/libstore/ssh-store-config.hh b/src/libstore/ssh-store-config.hh new file mode 100644 index 000000000..c27a5d00f --- /dev/null +++ b/src/libstore/ssh-store-config.hh @@ -0,0 +1,29 @@ +#pragma once +///@file + +#include "store-api.hh" + +namespace nix { + +struct CommonSSHStoreConfig : virtual StoreConfig +{ + using StoreConfig::StoreConfig; + + const Setting sshKey{(StoreConfig*) this, "", "ssh-key", + "Path to the SSH private key used to authenticate to the remote machine."}; + + const Setting sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", + "The public host key of the remote machine."}; + + const Setting compress{(StoreConfig*) this, false, "compress", + "Whether to enable SSH compression."}; + + const Setting remoteStore{(StoreConfig*) this, "", "remote-store", + R"( + [Store URL](@docroot@/command-ref/new-cli/nix3-help-stores.md#store-url-format) + to be used on the remote machine. The default is `auto` + (i.e. use the Nix daemon or `/nix/store` directly). + )"}; +}; + +} diff --git a/src/libstore/ssh-store.cc b/src/libstore/ssh-store.cc index a1d4daafd..962221ad2 100644 --- a/src/libstore/ssh-store.cc +++ b/src/libstore/ssh-store.cc @@ -1,3 +1,4 @@ +#include "ssh-store-config.hh" #include "store-api.hh" #include "remote-store.hh" #include "remote-fs-accessor.hh" @@ -8,17 +9,22 @@ namespace nix { -struct SSHStoreConfig : virtual RemoteStoreConfig +struct SSHStoreConfig : virtual RemoteStoreConfig, virtual CommonSSHStoreConfig { using RemoteStoreConfig::RemoteStoreConfig; + using CommonSSHStoreConfig::CommonSSHStoreConfig; - const Setting sshKey{(StoreConfig*) this, "", "ssh-key", "path to an SSH private key"}; - const Setting sshPublicHostKey{(StoreConfig*) this, "", "base64-ssh-public-host-key", "The public half of the host's SSH key"}; - const Setting compress{(StoreConfig*) this, false, "compress", "whether to compress the connection"}; - const Setting remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program", "path to the nix-daemon executable on the remote system"}; - const Setting remoteStore{(StoreConfig*) this, "", "remote-store", "URI of the store on the remote system"}; + const Setting remoteProgram{(StoreConfig*) this, "nix-daemon", "remote-program", + "Path to the `nix-daemon` executable on the remote machine."}; - const std::string name() override { return "SSH Store"; } + const std::string name() override { return "Experimental SSH Store"; } + + std::string doc() override + { + return + #include "ssh-store.md" + ; + } }; class SSHStore : public virtual SSHStoreConfig, public virtual RemoteStore @@ -28,6 +34,7 @@ public: SSHStore(const std::string & scheme, const std::string & host, const Params & params) : StoreConfig(params) , RemoteStoreConfig(params) + , CommonSSHStoreConfig(params) , SSHStoreConfig(params) , Store(params) , RemoteStore(params) @@ -49,9 +56,6 @@ public: return *uriSchemes().begin() + "://" + host; } - bool sameMachine() override - { return false; } - // FIXME extend daemon protocol, move implementation to RemoteStore std::optional getBuildLogExact(const StorePath & path) override { unsupported("getBuildLogExact"); } diff --git a/src/libstore/ssh-store.md b/src/libstore/ssh-store.md new file mode 100644 index 000000000..881537e71 --- /dev/null +++ b/src/libstore/ssh-store.md @@ -0,0 +1,8 @@ +R"( + +**Store URL format**: `ssh-ng://[username@]hostname` + +Experimental store type that allows full access to a Nix store on a +remote machine. + +)" diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 69bfe3418..6f6deda51 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -1,4 +1,5 @@ #include "ssh.hh" +#include "finally.hh" namespace nix { @@ -35,6 +36,9 @@ void SSHMaster::addCommonSSHOpts(Strings & args) } if (compress) args.push_back("-C"); + + args.push_back("-oPermitLocalCommand=yes"); + args.push_back("-oLocalCommand=echo started"); } std::unique_ptr SSHMaster::startCommand(const std::string & command) @@ -49,6 +53,11 @@ std::unique_ptr SSHMaster::startCommand(const std::string ProcessOptions options; options.dieWithParent = false; + if (!fakeSSH && !useMaster) { + logger->pause(); + } + Finally cleanup = [&]() { logger->resume(); }; + conn->sshPid = startProcess([&]() { restoreProcessContext(); @@ -86,6 +95,18 @@ std::unique_ptr SSHMaster::startCommand(const std::string in.readSide = -1; out.writeSide = -1; + // Wait for the SSH connection to be established, + // So that we don't overwrite the password prompt with our progress bar. + if (!fakeSSH && !useMaster) { + std::string reply; + try { + reply = readLine(out.readSide.get()); + } catch (EndOfFile & e) { } + + if (reply != "started") + throw Error("failed to start SSH connection to '%s'", host); + } + conn->out = std::move(out.readSide); conn->in = std::move(in.writeSide); @@ -109,6 +130,9 @@ Path SSHMaster::startMaster() ProcessOptions options; options.dieWithParent = false; + logger->pause(); + Finally cleanup = [&]() { logger->resume(); }; + state->sshMaster = startProcess([&]() { restoreProcessContext(); @@ -117,11 +141,7 @@ Path SSHMaster::startMaster() if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1) throw SysError("duping over stdout"); - Strings args = - { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath - , "-o", "LocalCommand=echo started" - , "-o", "PermitLocalCommand=yes" - }; + Strings args = { "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath }; if (verbosity >= lvlChatty) args.push_back("-v"); addCommonSSHOpts(args); diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index dabbcedda..c86a8a986 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "util.hh" #include "sync.hh" diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 601efa1cc..5bee1af9f 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -7,6 +7,7 @@ #include "nar-info-disk-cache.hh" #include "thread-pool.hh" #include "url.hh" +#include "references.hh" #include "archive.hh" #include "callback.hh" #include "remote-store.hh" @@ -98,10 +99,12 @@ StorePath Store::followLinksToStorePath(std::string_view path) const silly, but it's done that way for compatibility). is the name of the output (usually, "out"). -

= base-16 representation of a SHA-256 hash of: +

= base-16 representation of a SHA-256 hash of + + = if = "text:...": the string written to the resulting store path - if = "source": + if = "source:...": the serialisation of the path from which this store path is copied, as returned by hashPath() if = "output:": @@ -162,63 +165,63 @@ StorePath Store::makeOutputPath(std::string_view id, } +/* Stuff the references (if any) into the type. This is a bit + hacky, but we can't put them in, say, (per the grammar above) + since that would be ambiguous. */ static std::string makeType( const Store & store, std::string && type, - const StorePathSet & references, - bool hasSelfReference = false) + const StoreReferences & references) { - for (auto & i : references) { + for (auto & i : references.others) { type += ":"; type += store.printStorePath(i); } - if (hasSelfReference) type += ":self"; + if (references.self) type += ":self"; return std::move(type); } -StorePath Store::makeFixedOutputPath( - FileIngestionMethod method, - const Hash & hash, - std::string_view name, - const StorePathSet & references, - bool hasSelfReference) const +StorePath Store::makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const { - if (hash.type == htSHA256 && method == FileIngestionMethod::Recursive) { - return makeStorePath(makeType(*this, "source", references, hasSelfReference), hash, name); + if (info.hash.hash.type == htSHA256 && info.hash.method == FileIngestionMethod::Recursive) { + return makeStorePath(makeType(*this, "source", info.references), info.hash.hash, name); } else { - assert(references.empty()); + assert(info.references.size() == 0); return makeStorePath("output:out", hashString(htSHA256, "fixed:out:" - + makeFileIngestionPrefix(method) - + hash.to_string(Base16, true) + ":"), + + makeFileIngestionPrefix(info.hash.method) + + info.hash.hash.to_string(Base16, true) + ":"), name); } } -StorePath Store::makeFixedOutputPathFromCA(std::string_view name, ContentAddress ca, - const StorePathSet & references, bool hasSelfReference) const + +StorePath Store::makeTextPath(std::string_view name, const TextInfo & info) const +{ + assert(info.hash.hash.type == htSHA256); + return makeStorePath( + makeType(*this, "text", StoreReferences { + .others = info.references, + .self = false, + }), + info.hash.hash, + name); +} + + +StorePath Store::makeFixedOutputPathFromCA(std::string_view name, const ContentAddressWithReferences & ca) const { // New template return std::visit(overloaded { - [&](const TextHash & th) { - return makeTextPath(name, th.hash, references); + [&](const TextInfo & ti) { + return makeTextPath(name, ti); }, - [&](const FixedOutputHash & fsh) { - return makeFixedOutputPath(fsh.method, fsh.hash, name, references, hasSelfReference); + [&](const FixedOutputInfo & foi) { + return makeFixedOutputPath(name, foi); } - }, ca); -} - -StorePath Store::makeTextPath(std::string_view name, const Hash & hash, - const StorePathSet & references) const -{ - assert(hash.type == htSHA256); - /* Stuff the references (if any) into the type. This is a bit - hacky, but we can't put them in `s' since that would be - ambiguous. */ - return makeStorePath(makeType(*this, "text", references), hash, name); + }, ca.raw); } @@ -228,7 +231,14 @@ std::pair Store::computeStorePathForPath(std::string_view name, Hash h = method == FileIngestionMethod::Recursive ? hashPath(hashAlgo, srcPath, filter).first : hashFile(hashAlgo, srcPath); - return std::make_pair(makeFixedOutputPath(method, h, name), h); + FixedOutputInfo caInfo { + .hash = { + .method = method, + .hash = h, + }, + .references = {}, + }; + return std::make_pair(makeFixedOutputPath(name, caInfo), h); } @@ -237,7 +247,10 @@ StorePath Store::computeStorePathForText( std::string_view s, const StorePathSet & references) const { - return makeTextPath(name, hashString(htSHA256, s), references); + return makeTextPath(name, TextInfo { + { .hash = hashString(htSHA256, s) }, + references, + }); } @@ -425,11 +438,18 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath, throw Error("hash mismatch for '%s'", srcPath); ValidPathInfo info { - makeFixedOutputPath(method, hash, name), + *this, + name, + FixedOutputInfo { + .hash = { + .method = method, + .hash = hash, + }, + .references = {}, + }, narHash, }; info.narSize = narSize; - info.ca = FixedOutputHash { .method = method, .hash = hash }; if (!isValidPath(info.path)) { auto source = sinkToSource([&](Sink & scratchpadSink) { @@ -445,10 +465,10 @@ StringSet StoreConfig::getDefaultSystemFeatures() { auto res = settings.systemFeatures.get(); - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) res.insert("ca-derivations"); - if (settings.isExperimentalFeatureEnabled(Xp::RecursiveNix)) + if (experimentalFeatureSettings.isEnabled(Xp::RecursiveNix)) res.insert("recursive-nix"); return res; @@ -507,6 +527,57 @@ StorePathSet Store::queryDerivationOutputs(const StorePath & path) return outputPaths; } + +void Store::querySubstitutablePathInfos(const StorePathCAMap & paths, SubstitutablePathInfos & infos) +{ + if (!settings.useSubstitutes) return; + for (auto & sub : getDefaultSubstituters()) { + for (auto & path : paths) { + if (infos.count(path.first)) + // Choose first succeeding substituter. + continue; + + auto subPath(path.first); + + // Recompute store path so that we can use a different store root. + if (path.second) { + subPath = makeFixedOutputPathFromCA( + path.first.name(), + ContentAddressWithReferences::withoutRefs(*path.second)); + if (sub->storeDir == storeDir) + assert(subPath == path.first); + if (subPath != path.first) + debug("replaced path '%s' with '%s' for substituter '%s'", printStorePath(path.first), sub->printStorePath(subPath), sub->getUri()); + } else if (sub->storeDir != storeDir) continue; + + debug("checking substituter '%s' for path '%s'", sub->getUri(), sub->printStorePath(subPath)); + try { + auto info = sub->queryPathInfo(subPath); + + if (sub->storeDir != storeDir && !(info->isContentAddressed(*sub) && info->references.empty())) + continue; + + auto narInfo = std::dynamic_pointer_cast( + std::shared_ptr(info)); + infos.insert_or_assign(path.first, SubstitutablePathInfo{ + .deriver = info->deriver, + .references = info->references, + .downloadSize = narInfo ? narInfo->fileSize : 0, + .narSize = info->narSize, + }); + } catch (InvalidPath &) { + } catch (SubstituterDisabled &) { + } catch (Error & e) { + if (settings.tryFallback) + logError(e.info()); + else + throw; + } + } + } +} + + bool Store::isValidPath(const StorePath & storePath) { { @@ -790,13 +861,13 @@ std::string Store::makeValidityRegistration(const StorePathSet & paths, if (showHash) { s += info->narHash.to_string(Base16, false) + "\n"; - s += (format("%1%\n") % info->narSize).str(); + s += fmt("%1%\n", info->narSize); } auto deriver = showDerivers && info->deriver ? printStorePath(*info->deriver) : ""; s += deriver + "\n"; - s += (format("%1%\n") % info->references.size()).str(); + s += fmt("%1%\n", info->references.size()); for (auto & j : info->references) s += printStorePath(j) + "\n"; @@ -855,6 +926,7 @@ json Store::pathInfoToJSON(const StorePathSet & storePaths, auto info = queryPathInfo(storePath); jsonPath["path"] = printStorePath(info->path); + jsonPath["valid"] = true; jsonPath["narHash"] = info->narHash.to_string(hashBase, true); jsonPath["narSize"] = info->narSize; @@ -976,7 +1048,9 @@ void copyStorePath( // recompute store path on the chance dstStore does it differently if (info->ca && info->references.empty()) { auto info2 = make_ref(*info); - info2->path = dstStore.makeFixedOutputPathFromCA(info->path.name(), *info->ca); + info2->path = dstStore.makeFixedOutputPathFromCA( + info->path.name(), + info->contentAddressWithReferences().value()); if (dstStore.storeDir == srcStore.storeDir) assert(info->path == info2->path); info = info2; @@ -1016,7 +1090,7 @@ std::map copyPaths( for (auto & path : paths) { storePaths.insert(path.path()); if (auto realisation = std::get_if(&path.raw)) { - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); toplevelRealisations.insert(*realisation); } } @@ -1088,7 +1162,9 @@ std::map copyPaths( auto storePathForSrc = currentPathInfo.path; auto storePathForDst = storePathForSrc; if (currentPathInfo.ca && currentPathInfo.references.empty()) { - storePathForDst = dstStore.makeFixedOutputPathFromCA(storePathForSrc.name(), *currentPathInfo.ca); + storePathForDst = dstStore.makeFixedOutputPathFromCA( + currentPathInfo.path.name(), + currentPathInfo.contentAddressWithReferences().value()); if (dstStore.storeDir == srcStore.storeDir) assert(storePathForDst == storePathForSrc); if (storePathForDst != storePathForSrc) @@ -1100,6 +1176,9 @@ std::map copyPaths( return storePathForDst; }; + // total is accessed by each copy, which are each handled in separate threads + std::atomic total = 0; + for (auto & missingPath : sortedMissing) { auto info = srcStore.queryPathInfo(missingPath); @@ -1120,7 +1199,13 @@ std::map copyPaths( {storePathS, srcUri, dstUri}); PushActivity pact(act.id); - srcStore.narFromPath(missingPath, sink); + LambdaSink progressSink([&](std::string_view data) { + total += data.size(); + act.progress(total, info->narSize); + }); + TeeSink tee { sink, progressSink }; + + srcStore.narFromPath(missingPath, tee); }); pathsToCopy.push_back(std::pair{infoForDst, std::move(source)}); } @@ -1241,7 +1326,7 @@ std::optional Store::getBuildDerivationPath(const StorePath & path) } } - if (!settings.isExperimentalFeatureEnabled(Xp::CaDerivations) || !isValidPath(path)) + if (!experimentalFeatureSettings.isEnabled(Xp::CaDerivations) || !isValidPath(path)) return path; auto drv = readDerivation(path); diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 4d8db3596..c910d1c96 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "nar-info.hh" #include "realisation.hh" @@ -55,7 +56,10 @@ namespace nix { */ MakeError(SubstError, Error); -MakeError(BuildError, Error); // denotes a permanent build failure +/** + * denotes a permanent build failure + */ +MakeError(BuildError, Error); MakeError(InvalidPath, Error); MakeError(Unsupported, Error); MakeError(SubstituteGone, Error); @@ -78,13 +82,17 @@ enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true }; enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true }; enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true }; -/* Magic header of exportPath() output (obsolete). */ +/** + * Magic header of exportPath() output (obsolete). + */ const uint32_t exportMagic = 0x4558494e; enum BuildMode { bmNormal, bmRepair, bmCheck }; +enum TrustedFlag : bool { NotTrusted = false, Trusted = true }; struct BuildResult; +struct KeyedBuildResult; typedef std::map> StorePathCAMap; @@ -101,17 +109,41 @@ struct StoreConfig : public Config virtual const std::string name() = 0; + virtual std::string doc() + { + return ""; + } + const PathSetting storeDir_{this, false, settings.nixStore, - "store", "path to the Nix store"}; + "store", + R"( + Logical location of the Nix store, usually + `/nix/store`. Note that you can only copy store paths + between stores if they have the same `store` setting. + )"}; const Path storeDir = storeDir_; - const Setting pathInfoCacheSize{this, 65536, "path-info-cache-size", "size of the in-memory store path information cache"}; + const Setting pathInfoCacheSize{this, 65536, "path-info-cache-size", + "Size of the in-memory store path metadata cache."}; - const Setting isTrusted{this, false, "trusted", "whether paths from this store can be used as substitutes even when they lack trusted signatures"}; + const Setting isTrusted{this, false, "trusted", + R"( + Whether paths from this store can be used as substitutes + even if they are not signed by a key listed in the + [`trusted-public-keys`](@docroot@/command-ref/conf-file.md#conf-trusted-public-keys) + setting. + )"}; - Setting priority{this, 0, "priority", "priority of this substituter (lower value means higher priority)"}; + Setting priority{this, 0, "priority", + R"( + Priority of this store when used as a substituter. A lower value means a higher priority. + )"}; - Setting wantMassQuery{this, false, "want-mass-query", "whether this substituter can be queried efficiently for path validity"}; + Setting wantMassQuery{this, false, "want-mass-query", + R"( + Whether this store (when used as a substituter) can be + queried efficiently for path validity. + )"}; Setting systemFeatures{this, getDefaultSystemFeatures(), "system-features", @@ -125,23 +157,30 @@ public: typedef std::map Params; - - protected: struct PathInfoCacheValue { - // Time of cache entry creation or update + /** + * Time of cache entry creation or update + */ std::chrono::time_point time_point = std::chrono::steady_clock::now(); - // Null if missing + /** + * Null if missing + */ std::shared_ptr value; - // Whether the value is valid as a cache entry. The path may not exist. + /** + * Whether the value is valid as a cache entry. The path may not + * exist. + */ bool isKnownNow(); - // Past tense, because a path can only be assumed to exists when - // isKnownNow() && didExist() + /** + * Past tense, because a path can only be assumed to exists when + * isKnownNow() && didExist() + */ inline bool didExist() { return value != nullptr; } @@ -175,35 +214,53 @@ public: std::string printStorePath(const StorePath & path) const; - // FIXME: remove + /** + * Deprecated + * + * \todo remove + */ StorePathSet parseStorePathSet(const PathSet & paths) const; PathSet printStorePathSet(const StorePathSet & path) const; - /* Display a set of paths in human-readable form (i.e., between quotes - and separated by commas). */ + /** + * Display a set of paths in human-readable form (i.e., between quotes + * and separated by commas). + */ std::string showPaths(const StorePathSet & paths); - /* Return true if ‘path’ is in the Nix store (but not the Nix - store itself). */ + /** + * @return true if ‘path’ is in the Nix store (but not the Nix + * store itself). + */ bool isInStore(PathView path) const; - /* Return true if ‘path’ is a store path, i.e. a direct child of - the Nix store. */ + /** + * @return true if ‘path’ is a store path, i.e. a direct child of the + * Nix store. + */ bool isStorePath(std::string_view path) const; - /* Split a path like /nix/store/-/ into - /nix/store/- and /. */ + /** + * Split a path like /nix/store/-/ into + * /nix/store/- and /. + */ std::pair toStorePath(PathView path) const; - /* Follow symlinks until we end up with a path in the Nix store. */ + /** + * Follow symlinks until we end up with a path in the Nix store. + */ Path followLinksToStore(std::string_view path) const; - /* Same as followLinksToStore(), but apply toStorePath() to the - result. */ + /** + * Same as followLinksToStore(), but apply toStorePath() to the + * result. + */ StorePath followLinksToStorePath(std::string_view path) const; - /* Constructs a unique store path name. */ + /** + * Constructs a unique store path name. + */ StorePath makeStorePath(std::string_view type, std::string_view hash, std::string_view name) const; StorePath makeStorePath(std::string_view type, @@ -212,45 +269,46 @@ public: StorePath makeOutputPath(std::string_view id, const Hash & hash, std::string_view name) const; - StorePath makeFixedOutputPath(FileIngestionMethod method, - const Hash & hash, std::string_view name, - const StorePathSet & references = {}, - bool hasSelfReference = false) const; + StorePath makeFixedOutputPath(std::string_view name, const FixedOutputInfo & info) const; - StorePath makeTextPath(std::string_view name, const Hash & hash, - const StorePathSet & references = {}) const; + StorePath makeTextPath(std::string_view name, const TextInfo & info) const; - StorePath makeFixedOutputPathFromCA(std::string_view name, ContentAddress ca, - const StorePathSet & references = {}, - bool hasSelfReference = false) const; + StorePath makeFixedOutputPathFromCA(std::string_view name, const ContentAddressWithReferences & ca) const; - /* This is the preparatory part of addToStore(); it computes the - store path to which srcPath is to be copied. Returns the store - path and the cryptographic hash of the contents of srcPath. */ + /** + * Preparatory part of addToStore(). + * + * @return the store path to which srcPath is to be copied + * and the cryptographic hash of the contents of srcPath. + */ std::pair computeStorePathForPath(std::string_view name, const Path & srcPath, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, PathFilter & filter = defaultPathFilter) const; - /* Preparatory part of addTextToStore(). - - !!! Computation of the path should take the references given to - addTextToStore() into account, otherwise we have a (relatively - minor) security hole: a caller can register a source file with - bogus references. If there are too many references, the path may - not be garbage collected when it has to be (not really a problem, - the caller could create a root anyway), or it may be garbage - collected when it shouldn't be (more serious). - - Hashing the references would solve this (bogus references would - simply yield a different store path, so other users wouldn't be - affected), but it has some backwards compatibility issues (the - hashing scheme changes), so I'm not doing that for now. */ + /** + * Preparatory part of addTextToStore(). + * + * !!! Computation of the path should take the references given to + * addTextToStore() into account, otherwise we have a (relatively + * minor) security hole: a caller can register a source file with + * bogus references. If there are too many references, the path may + * not be garbage collected when it has to be (not really a problem, + * the caller could create a root anyway), or it may be garbage + * collected when it shouldn't be (more serious). + * + * Hashing the references would solve this (bogus references would + * simply yield a different store path, so other users wouldn't be + * affected), but it has some backwards compatibility issues (the + * hashing scheme changes), so I'm not doing that for now. + */ StorePath computeStorePathForText( std::string_view name, std::string_view s, const StorePathSet & references) const; - /* Check whether a path is valid. */ + /** + * Check whether a path is valid. + */ bool isValidPath(const StorePath & path); protected: @@ -259,53 +317,68 @@ protected: public: - /* If requested, substitute missing paths. This - implements nix-copy-closure's --use-substitutes - flag. */ + /** + * If requested, substitute missing paths. This + * implements nix-copy-closure's --use-substitutes + * flag. + */ void substitutePaths(const StorePathSet & paths); - /* Query which of the given paths is valid. Optionally, try to - substitute missing paths. */ + /** + * Query which of the given paths is valid. Optionally, try to + * substitute missing paths. + */ virtual StorePathSet queryValidPaths(const StorePathSet & paths, SubstituteFlag maybeSubstitute = NoSubstitute); - /* Query the set of all valid paths. Note that for some store - backends, the name part of store paths may be replaced by 'x' - (i.e. you'll get /nix/store/-x rather than - /nix/store/-). Use queryPathInfo() to obtain the - full store path. FIXME: should return a set of - std::variant to get rid of this hack. */ + /** + * Query the set of all valid paths. Note that for some store + * backends, the name part of store paths may be replaced by 'x' + * (i.e. you'll get /nix/store/-x rather than + * /nix/store/-). Use queryPathInfo() to obtain the + * full store path. FIXME: should return a set of + * std::variant to get rid of this hack. + */ virtual StorePathSet queryAllValidPaths() { unsupported("queryAllValidPaths"); } constexpr static const char * MissingName = "x"; - /* Query information about a valid path. It is permitted to omit - the name part of the store path. */ + /** + * Query information about a valid path. It is permitted to omit + * the name part of the store path. + */ ref queryPathInfo(const StorePath & path); - /* Asynchronous version of queryPathInfo(). */ + /** + * Asynchronous version of queryPathInfo(). + */ void queryPathInfo(const StorePath & path, Callback> callback) noexcept; - /* Query the information about a realisation. */ + /** + * Query the information about a realisation. + */ std::shared_ptr queryRealisation(const DrvOutput &); - /* Asynchronous version of queryRealisation(). */ + /** + * Asynchronous version of queryRealisation(). + */ void queryRealisation(const DrvOutput &, Callback> callback) noexcept; - /* Check whether the given valid path info is sufficiently attested, by - either being signed by a trusted public key or content-addressed, in - order to be included in the given store. - - These same checks would be performed in addToStore, but this allows an - earlier failure in the case where dependencies need to be added too, but - the addToStore wouldn't fail until those dependencies are added. Also, - we don't really want to add the dependencies listed in a nar info we - don't trust anyyways. - */ + /** + * Check whether the given valid path info is sufficiently attested, by + * either being signed by a trusted public key or content-addressed, in + * order to be included in the given store. + * + * These same checks would be performed in addToStore, but this allows an + * earlier failure in the case where dependencies need to be added too, but + * the addToStore wouldn't fail until those dependencies are added. Also, + * we don't really want to add the dependencies listed in a nar info we + * don't trust anyyways. + */ virtual bool pathInfoIsUntrusted(const ValidPathInfo &) { return true; @@ -325,53 +398,77 @@ protected: public: - /* Queries the set of incoming FS references for a store path. - The result is not cleared. */ + /** + * Queries the set of incoming FS references for a store path. + * The result is not cleared. + */ virtual void queryReferrers(const StorePath & path, StorePathSet & referrers) { unsupported("queryReferrers"); } - /* Return all currently valid derivations that have `path' as an - output. (Note that the result of `queryDeriver()' is the - derivation that was actually used to produce `path', which may - not exist anymore.) */ + /** + * @return all currently valid derivations that have `path` as an + * output. + * + * (Note that the result of `queryDeriver()` is the derivation that + * was actually used to produce `path`, which may not exist + * anymore.) + */ virtual StorePathSet queryValidDerivers(const StorePath & path) { return {}; }; - /* Query the outputs of the derivation denoted by `path'. */ + /** + * Query the outputs of the derivation denoted by `path`. + */ virtual StorePathSet queryDerivationOutputs(const StorePath & path); - /* Query the mapping outputName => outputPath for the given derivation. All - outputs are mentioned so ones mising the mapping are mapped to - `std::nullopt`. */ + /** + * Query the mapping outputName => outputPath for the given + * derivation. All outputs are mentioned so ones mising the mapping + * are mapped to `std::nullopt`. + */ virtual std::map> queryPartialDerivationOutputMap(const StorePath & path); - /* Query the mapping outputName=>outputPath for the given derivation. - Assume every output has a mapping and throw an exception otherwise. */ + /** + * Query the mapping outputName=>outputPath for the given derivation. + * Assume every output has a mapping and throw an exception otherwise. + */ OutputPathMap queryDerivationOutputMap(const StorePath & path); - /* Query the full store path given the hash part of a valid store - path, or empty if the path doesn't exist. */ + /** + * Query the full store path given the hash part of a valid store + * path, or empty if the path doesn't exist. + */ virtual std::optional queryPathFromHashPart(const std::string & hashPart) = 0; - /* Query which of the given paths have substitutes. */ + /** + * Query which of the given paths have substitutes. + */ virtual StorePathSet querySubstitutablePaths(const StorePathSet & paths) { return {}; }; - /* Query substitute info (i.e. references, derivers and download - sizes) of a map of paths to their optional ca values. The info - of the first succeeding substituter for each path will be - returned. If a path does not have substitute info, it's omitted - from the resulting ‘infos’ map. */ + /** + * Query substitute info (i.e. references, derivers and download + * sizes) of a map of paths to their optional ca values. The info of + * the first succeeding substituter for each path will be returned. + * If a path does not have substitute info, it's omitted from the + * resulting ‘infos’ map. + */ virtual void querySubstitutablePathInfos(const StorePathCAMap & paths, - SubstitutablePathInfos & infos) { return; }; + SubstitutablePathInfos & infos); - /* Import a path into the store. */ + /** + * Import a path into the store. + */ virtual void addToStore(const ValidPathInfo & info, Source & narSource, RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs) = 0; - // A list of paths infos along with a source providing the content of the - // associated store path + /** + * A list of paths infos along with a source providing the content + * of the associated store path + */ using PathsSource = std::vector>>; - /* Import multiple paths into the store. */ + /** + * Import multiple paths into the store. + */ virtual void addMultipleToStore( Source & source, RepairFlag repair = NoRepair, @@ -383,10 +480,14 @@ public: RepairFlag repair = NoRepair, CheckSigsFlag checkSigs = CheckSigs); - /* Copy the contents of a path to the store and register the - validity the resulting path. The resulting path is returned. - The function object `filter' can be used to exclude files (see - libutil/archive.hh). */ + /** + * Copy the contents of a path to the store and register the + * validity the resulting path. + * + * @return The resulting path is returned. + * @param filter This function can be used to exclude files (see + * libutil/archive.hh). + */ virtual StorePath addToStore( std::string_view name, const Path & srcPath, @@ -396,26 +497,33 @@ public: RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()); - /* Copy the contents of a path to the store and register the - validity the resulting path, using a constant amount of - memory. */ + /** + * Copy the contents of a path to the store and register the + * validity the resulting path, using a constant amount of + * memory. + */ ValidPathInfo addToStoreSlow(std::string_view name, const Path & srcPath, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, std::optional expectedCAHash = {}); - /* Like addToStore(), but the contents of the path are contained - in `dump', which is either a NAR serialisation (if recursive == - true) or simply the contents of a regular file (if recursive == - false). - `dump` may be drained */ - // FIXME: remove? + /** + * Like addToStore(), but the contents of the path are contained + * in `dump`, which is either a NAR serialisation (if recursive == + * true) or simply the contents of a regular file (if recursive == + * false). + * `dump` may be drained + * + * \todo remove? + */ virtual StorePath addToStoreFromDump(Source & dump, std::string_view name, FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair, const StorePathSet & references = StorePathSet()) { unsupported("addToStoreFromDump"); } - /* Like addToStore, but the contents written to the output path is - a regular file containing the given string. */ + /** + * Like addToStore, but the contents written to the output path is a + * regular file containing the given string. + */ virtual StorePath addTextToStore( std::string_view name, std::string_view s, @@ -436,140 +544,179 @@ public: virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs) { return registerDrvOutput(output); } - /* Write a NAR dump of a store path. */ + /** + * Write a NAR dump of a store path. + */ virtual void narFromPath(const StorePath & path, Sink & sink) = 0; - /* For each path, if it's a derivation, build it. Building a - derivation means ensuring that the output paths are valid. If - they are already valid, this is a no-op. Otherwise, validity - can be reached in two ways. First, if the output paths is - substitutable, then build the path that way. Second, the - output paths can be created by running the builder, after - recursively building any sub-derivations. For inputs that are - not derivations, substitute them. */ + /** + * For each path, if it's a derivation, build it. Building a + * derivation means ensuring that the output paths are valid. If + * they are already valid, this is a no-op. Otherwise, validity + * can be reached in two ways. First, if the output paths is + * substitutable, then build the path that way. Second, the + * output paths can be created by running the builder, after + * recursively building any sub-derivations. For inputs that are + * not derivations, substitute them. + */ virtual void buildPaths( const std::vector & paths, BuildMode buildMode = bmNormal, std::shared_ptr evalStore = nullptr); - /* Like `buildPaths()`, but return a vector of `BuildResult`s - corresponding to each element in `paths`. Note that in case of - a build/substitution error, this function won't throw an - exception, but return a `BuildResult` containing an error - message. */ - virtual std::vector buildPathsWithResults( + /** + * Like buildPaths(), but return a vector of \ref BuildResult + * BuildResults corresponding to each element in paths. Note that in + * case of a build/substitution error, this function won't throw an + * exception, but return a BuildResult containing an error message. + */ + virtual std::vector buildPathsWithResults( const std::vector & paths, BuildMode buildMode = bmNormal, std::shared_ptr evalStore = nullptr); - /* Build a single non-materialized derivation (i.e. not from an - on-disk .drv file). - - ‘drvPath’ is used to deduplicate worker goals so it is imperative that - is correct. That said, it doesn't literally need to be store path that - would be calculated from writing this derivation to the store: it is OK - if it instead is that of a Derivation which would resolve to this (by - taking the outputs of it's input derivations and adding them as input - sources) such that the build time referenceable-paths are the same. - - In the input-addressed case, we usually *do* use an "original" - unresolved derivations's path, as that is what will be used in the - `buildPaths` case. Also, the input-addressed output paths are verified - only by that contents of that specific unresolved derivation, so it is - nice to keep that information around so if the original derivation is - ever obtained later, it can be verified whether the trusted user in fact - used the proper output path. - - In the content-addressed case, we want to always use the - resolved drv path calculated from the provided derivation. This serves - two purposes: - - - It keeps the operation trustless, by ruling out a maliciously - invalid drv path corresponding to a non-resolution-equivalent - derivation. - - - For the floating case in particular, it ensures that the derivation - to output mapping respects the resolution equivalence relation, so - one cannot choose different resolution-equivalent derivations to - subvert dependency coherence (i.e. the property that one doesn't end - up with multiple different versions of dependencies without - explicitly choosing to allow it). - */ + /** + * Build a single non-materialized derivation (i.e. not from an + * on-disk .drv file). + * + * @param drvPath This is used to deduplicate worker goals so it is + * imperative that is correct. That said, it doesn't literally need + * to be store path that would be calculated from writing this + * derivation to the store: it is OK if it instead is that of a + * Derivation which would resolve to this (by taking the outputs of + * it's input derivations and adding them as input sources) such + * that the build time referenceable-paths are the same. + * + * In the input-addressed case, we usually *do* use an "original" + * unresolved derivations's path, as that is what will be used in the + * buildPaths case. Also, the input-addressed output paths are verified + * only by that contents of that specific unresolved derivation, so it is + * nice to keep that information around so if the original derivation is + * ever obtained later, it can be verified whether the trusted user in fact + * used the proper output path. + * + * In the content-addressed case, we want to always use the resolved + * drv path calculated from the provided derivation. This serves two + * purposes: + * + * - It keeps the operation trustless, by ruling out a maliciously + * invalid drv path corresponding to a non-resolution-equivalent + * derivation. + * + * - For the floating case in particular, it ensures that the derivation + * to output mapping respects the resolution equivalence relation, so + * one cannot choose different resolution-equivalent derivations to + * subvert dependency coherence (i.e. the property that one doesn't end + * up with multiple different versions of dependencies without + * explicitly choosing to allow it). + */ virtual BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv, BuildMode buildMode = bmNormal); - /* Ensure that a path is valid. If it is not currently valid, it - may be made valid by running a substitute (if defined for the - path). */ + /** + * Ensure that a path is valid. If it is not currently valid, it + * may be made valid by running a substitute (if defined for the + * path). + */ virtual void ensurePath(const StorePath & path); - /* Add a store path as a temporary root of the garbage collector. - The root disappears as soon as we exit. */ + /** + * Add a store path as a temporary root of the garbage collector. + * The root disappears as soon as we exit. + */ virtual void addTempRoot(const StorePath & path) { debug("not creating temporary root, store doesn't support GC"); } - /* Return a string representing information about the path that - can be loaded into the database using `nix-store --load-db' or - `nix-store --register-validity'. */ + /** + * @return a string representing information about the path that + * can be loaded into the database using `nix-store --load-db` or + * `nix-store --register-validity`. + */ std::string makeValidityRegistration(const StorePathSet & paths, bool showDerivers, bool showHash); - /* Write a JSON representation of store path metadata, such as the - hash and the references. If ‘includeImpureInfo’ is true, - variable elements such as the registration time are - included. If ‘showClosureSize’ is true, the closure size of - each path is included. */ + /** + * Write a JSON representation of store path metadata, such as the + * hash and the references. + * + * @param includeImpureInfo If true, variable elements such as the + * registration time are included. + * + * @param showClosureSize If true, the closure size of each path is + * included. + */ nlohmann::json pathInfoToJSON(const StorePathSet & storePaths, bool includeImpureInfo, bool showClosureSize, Base hashBase = Base32, AllowInvalidFlag allowInvalid = DisallowInvalid); - /* Return the size of the closure of the specified path, that is, - the sum of the size of the NAR serialisation of each path in - the closure. */ + /** + * @return the size of the closure of the specified path, that is, + * the sum of the size of the NAR serialisation of each path in the + * closure. + */ std::pair getClosureSize(const StorePath & storePath); - /* Optimise the disk space usage of the Nix store by hard-linking files - with the same contents. */ + /** + * Optimise the disk space usage of the Nix store by hard-linking files + * with the same contents. + */ virtual void optimiseStore() { }; - /* Check the integrity of the Nix store. Returns true if errors - remain. */ + /** + * Check the integrity of the Nix store. + * + * @return true if errors remain. + */ virtual bool verifyStore(bool checkContents, RepairFlag repair = NoRepair) { return false; }; - /* Return an object to access files in the Nix store. */ - virtual ref getFSAccessor() - { unsupported("getFSAccessor"); } + /** + * @return An object to access files in the Nix store. + */ + virtual ref getFSAccessor() = 0; - /* Repair the contents of the given path by redownloading it using - a substituter (if available). */ + /** + * Repair the contents of the given path by redownloading it using + * a substituter (if available). + */ virtual void repairPath(const StorePath & path) { unsupported("repairPath"); } - /* Add signatures to the specified store path. The signatures are - not verified. */ + /** + * Add signatures to the specified store path. The signatures are + * not verified. + */ virtual void addSignatures(const StorePath & storePath, const StringSet & sigs) { unsupported("addSignatures"); } /* Utility functions. */ - /* Read a derivation, after ensuring its existence through - ensurePath(). */ + /** + * Read a derivation, after ensuring its existence through + * ensurePath(). + */ Derivation derivationFromPath(const StorePath & drvPath); - /* Read a derivation (which must already be valid). */ + /** + * Read a derivation (which must already be valid). + */ Derivation readDerivation(const StorePath & drvPath); - /* Read a derivation from a potentially invalid path. */ + /** + * Read a derivation from a potentially invalid path. + */ Derivation readInvalidDerivation(const StorePath & drvPath); - /* Place in `out' the set of all store paths in the file system - closure of `storePath'; that is, all paths than can be directly - or indirectly reached from it. `out' is not cleared. If - `flipDirection' is true, the set of paths that can reach - `storePath' is returned; that is, the closures under the - `referrers' relation instead of the `references' relation is - returned. */ + /** + * @param [out] out Place in here the set of all store paths in the + * file system closure of `storePath`; that is, all paths than can + * be directly or indirectly reached from it. `out` is not cleared. + * + * @param flipDirection If true, the set of paths that can reach + * `storePath` is returned; that is, the closures under the + * `referrers` relation instead of the `references` relation is + * returned. + */ virtual void computeFSClosure(const StorePathSet & paths, StorePathSet & out, bool flipDirection = false, bool includeOutputs = false, bool includeDerivers = false); @@ -578,27 +725,34 @@ public: StorePathSet & out, bool flipDirection = false, bool includeOutputs = false, bool includeDerivers = false); - /* Given a set of paths that are to be built, return the set of - derivations that will be built, and the set of output paths - that will be substituted. */ + /** + * Given a set of paths that are to be built, return the set of + * derivations that will be built, and the set of output paths that + * will be substituted. + */ virtual void queryMissing(const std::vector & targets, StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown, uint64_t & downloadSize, uint64_t & narSize); - /* Sort a set of paths topologically under the references - relation. If p refers to q, then p precedes q in this list. */ + /** + * Sort a set of paths topologically under the references + * relation. If p refers to q, then p precedes q in this list. + */ StorePaths topoSortPaths(const StorePathSet & paths); - /* Export multiple paths in the format expected by ‘nix-store - --import’. */ + /** + * Export multiple paths in the format expected by ‘nix-store + * --import’. + */ void exportPaths(const StorePathSet & paths, Sink & sink); void exportPath(const StorePath & path, Sink & sink); - /* Import a sequence of NAR dumps created by exportPaths() into - the Nix store. Optionally, the contents of the NARs are - preloaded into the specified FS accessor to speed up subsequent - access. */ + /** + * Import a sequence of NAR dumps created by exportPaths() into the + * Nix store. Optionally, the contents of the NARs are preloaded + * into the specified FS accessor to speed up subsequent access. + */ StorePaths importPaths(Source & source, CheckSigsFlag checkSigs = CheckSigs); struct Stats @@ -620,8 +774,9 @@ public: const Stats & getStats(); - /* Computes the full closure of of a set of store-paths for e.g. - derivations that need this information for `exportReferencesGraph`. + /** + * Computes the full closure of of a set of store-paths for e.g. + * derivations that need this information for `exportReferencesGraph`. */ StorePathSet exportReferences(const StorePathSet & storePaths, const StorePathSet & inputPaths); @@ -632,23 +787,40 @@ public: */ std::optional getBuildDerivationPath(const StorePath &); - /* Hack to allow long-running processes like hydra-queue-runner to - occasionally flush their path info cache. */ + /** + * Hack to allow long-running processes like hydra-queue-runner to + * occasionally flush their path info cache. + */ void clearPathInfoCache() { state.lock()->pathInfoCache.clear(); } - /* Establish a connection to the store, for store types that have - a notion of connection. Otherwise this is a no-op. */ + /** + * Establish a connection to the store, for store types that have + * a notion of connection. Otherwise this is a no-op. + */ virtual void connect() { }; - /* Get the protocol version of this store or it's connection. */ + /** + * Get the protocol version of this store or it's connection. + */ virtual unsigned int getProtocol() { return 0; }; + /** + * @return/ whether store trusts *us*. + * + * `std::nullopt` means we do not know. + * + * @note This is the opposite of the StoreConfig::isTrusted + * store setting. That is about whether *we* trust the store. + */ + virtual std::optional isTrustedClient() = 0; + + virtual Path toRealPath(const Path & storePath) { return storePath; @@ -659,7 +831,7 @@ public: return toRealPath(printStorePath(storePath)); } - /* + /** * Synchronises the options of the client with those of the daemon * (a no-op when there’s no daemon) */ @@ -671,7 +843,13 @@ protected: Stats stats; - /* Unsupported methods. */ + /** + * Helper for methods that are not unsupported: this is used for + * default definitions for virtual methods that are meant to be overriden. + * + * \todo Using this should be a last resort. It is better to make + * the method "virtual pure" and/or move it to a subclass. + */ [[noreturn]] void unsupported(const std::string & op) { throw Unsupported("operation '%s' is not supported by store '%s'", op, getUri()); @@ -680,7 +858,9 @@ protected: }; -/* Copy a path from one store to another. */ +/** + * Copy a path from one store to another. + */ void copyStorePath( Store & srcStore, Store & dstStore, @@ -689,12 +869,14 @@ void copyStorePath( CheckSigsFlag checkSigs = CheckSigs); -/* Copy store paths from one store to another. The paths may be copied - in parallel. They are copied in a topologically sorted order (i.e. - if A is a reference of B, then A is copied before B), but the set - of store paths is not automatically closed; use copyClosure() for - that. Returns a map of what each path was copied to the dstStore - as. */ +/** + * Copy store paths from one store to another. The paths may be copied + * in parallel. They are copied in a topologically sorted order (i.e. if + * A is a reference of B, then A is copied before B), but the set of + * store paths is not automatically closed; use copyClosure() for that. + * + * @return a map of what each path was copied to the dstStore as. + */ std::map copyPaths( Store & srcStore, Store & dstStore, const RealisedPath::Set &, @@ -709,7 +891,9 @@ std::map copyPaths( CheckSigsFlag checkSigs = CheckSigs, SubstituteFlag substitute = NoSubstitute); -/* Copy the closure of `paths` from `srcStore` to `dstStore`. */ +/** + * Copy the closure of `paths` from `srcStore` to `dstStore`. + */ void copyClosure( Store & srcStore, Store & dstStore, const RealisedPath::Set & paths, @@ -724,52 +908,61 @@ void copyClosure( CheckSigsFlag checkSigs = CheckSigs, SubstituteFlag substitute = NoSubstitute); -/* Remove the temporary roots file for this process. Any temporary - root becomes garbage after this point unless it has been registered - as a (permanent) root. */ +/** + * Remove the temporary roots file for this process. Any temporary + * root becomes garbage after this point unless it has been registered + * as a (permanent) root. + */ void removeTempRoots(); -/* Resolve the derived path completely, failing if any derivation output - is unknown. */ +/** + * Resolve the derived path completely, failing if any derivation output + * is unknown. + */ OutputPathMap resolveDerivedPath(Store &, const DerivedPath::Built &, Store * evalStore = nullptr); -/* Return a Store object to access the Nix store denoted by - ‘uri’ (slight misnomer...). Supported values are: - - * ‘local’: The Nix store in /nix/store and database in - /nix/var/nix/db, accessed directly. - - * ‘daemon’: The Nix store accessed via a Unix domain socket - connection to nix-daemon. - - * ‘unix://’: The Nix store accessed via a Unix domain socket - connection to nix-daemon, with the socket located at . - - * ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on - whether the user has write access to the local Nix - store/database. - - * ‘file://’: A binary cache stored in . - - * ‘https://’: A binary cache accessed via HTTP. - - * ‘s3://’: A writable binary cache stored on Amazon's Simple - Storage Service. - - * ‘ssh://[user@]’: A remote Nix store accessed by running - ‘nix-store --serve’ via SSH. - - You can pass parameters to the store implementation by appending - ‘?key=value&key=value&...’ to the URI. -*/ +/** + * @return a Store object to access the Nix store denoted by + * ‘uri’ (slight misnomer...). + * + * @param uri Supported values are: + * + * - ‘local’: The Nix store in /nix/store and database in + * /nix/var/nix/db, accessed directly. + * + * - ‘daemon’: The Nix store accessed via a Unix domain socket + * connection to nix-daemon. + * + * - ‘unix://’: The Nix store accessed via a Unix domain socket + * connection to nix-daemon, with the socket located at . + * + * - ‘auto’ or ‘’: Equivalent to ‘local’ or ‘daemon’ depending on + * whether the user has write access to the local Nix + * store/database. + * + * - ‘file://’: A binary cache stored in . + * + * - ‘https://’: A binary cache accessed via HTTP. + * + * - ‘s3://’: A writable binary cache stored on Amazon's Simple + * Storage Service. + * + * - ‘ssh://[user@]’: A remote Nix store accessed by running + * ‘nix-store --serve’ via SSH. + * + * You can pass parameters to the store implementation by appending + * ‘?key=value&key=value&...’ to the URI. + */ ref openStore(const std::string & uri = settings.storeUri.get(), const Store::Params & extraParams = Store::Params()); -/* Return the default substituter stores, defined by the - ‘substituters’ option and various legacy options. */ +/** + * @return the default substituter stores, defined by the + * ‘substituters’ option and various legacy options. + */ std::list> getDefaultSubstituters(); struct StoreFactory @@ -812,8 +1005,10 @@ struct RegisterStoreImplementation }; -/* Display a set of paths in human-readable form (i.e., between quotes - and separated by commas). */ +/** + * Display a set of paths in human-readable form (i.e., between quotes + * and separated by commas). + */ std::string showPaths(const PathSet & paths); @@ -822,7 +1017,9 @@ std::optional decodeValidPathInfo( std::istream & str, std::optional hashGiven = std::nullopt); -/* Split URI into protocol+hierarchy part and its parameter set. */ +/** + * Split URI into protocol+hierarchy part and its parameter set. + */ std::pair splitUriAndParams(const std::string & uri); std::optional getDerivationCA(const BasicDerivation & drv); diff --git a/src/libstore/store-cast.hh b/src/libstore/store-cast.hh index ff62fc359..2473e72c5 100644 --- a/src/libstore/store-cast.hh +++ b/src/libstore/store-cast.hh @@ -1,9 +1,17 @@ #pragma once +///@file #include "store-api.hh" namespace nix { +/** + * Helper to try downcasting a Store with a nice method if it fails. + * + * This is basically an alternative to the user-facing part of + * Store::unsupported that allows us to still have a nice message but + * better interface design. + */ template T & require(Store & store) { diff --git a/src/libstore/tests/derivation.cc b/src/libstore/tests/derivation.cc index 12be8504d..6f94904dd 100644 --- a/src/libstore/tests/derivation.cc +++ b/src/libstore/tests/derivation.cc @@ -1,6 +1,7 @@ #include #include +#include "experimental-features.hh" #include "derivations.hh" #include "tests/libstore.hh" @@ -9,17 +10,54 @@ namespace nix { class DerivationTest : public LibStoreTest { +public: + /** + * We set these in tests rather than the regular globals so we don't have + * to worry about race conditions if the tests run concurrently. + */ + ExperimentalFeatureSettings mockXpSettings; }; -#define TEST_JSON(TYPE, NAME, STR, VAL, ...) \ - TEST_F(DerivationTest, TYPE ## _ ## NAME ## _to_json) { \ - using nlohmann::literals::operator "" _json; \ - ASSERT_EQ( \ - STR ## _json, \ - (TYPE { VAL }).toJSON(*store __VA_OPT__(,) __VA_ARGS__)); \ +class CaDerivationTest : public DerivationTest +{ + void SetUp() override + { + mockXpSettings.set("experimental-features", "ca-derivations"); + } +}; + +class ImpureDerivationTest : public DerivationTest +{ + void SetUp() override + { + mockXpSettings.set("experimental-features", "impure-derivations"); + } +}; + +#define TEST_JSON(FIXTURE, NAME, STR, VAL, DRV_NAME, OUTPUT_NAME) \ + TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _to_json) { \ + using nlohmann::literals::operator "" _json; \ + ASSERT_EQ( \ + STR ## _json, \ + (DerivationOutput { VAL }).toJSON( \ + *store, \ + DRV_NAME, \ + OUTPUT_NAME)); \ + } \ + \ + TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _from_json) { \ + using nlohmann::literals::operator "" _json; \ + ASSERT_EQ( \ + DerivationOutput { VAL }, \ + DerivationOutput::fromJSON( \ + *store, \ + DRV_NAME, \ + OUTPUT_NAME, \ + STR ## _json, \ + mockXpSettings)); \ } -TEST_JSON(DerivationOutput, inputAddressed, +TEST_JSON(DerivationTest, inputAddressed, R"({ "path": "/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-drv-name-output-name" })", @@ -28,7 +66,7 @@ TEST_JSON(DerivationOutput, inputAddressed, }), "drv-name", "output-name") -TEST_JSON(DerivationOutput, caFixed, +TEST_JSON(DerivationTest, caFixed, R"({ "hashAlgo": "r:sha256", "hash": "894517c9163c896ec31a2adbd33c0681fd5f45b2c0ef08a64c92a03fb97f390f", @@ -42,7 +80,7 @@ TEST_JSON(DerivationOutput, caFixed, }), "drv-name", "output-name") -TEST_JSON(DerivationOutput, caFloating, +TEST_JSON(CaDerivationTest, caFloating, R"({ "hashAlgo": "r:sha256" })", @@ -52,12 +90,12 @@ TEST_JSON(DerivationOutput, caFloating, }), "drv-name", "output-name") -TEST_JSON(DerivationOutput, deferred, +TEST_JSON(DerivationTest, deferred, R"({ })", DerivationOutput::Deferred { }, "drv-name", "output-name") -TEST_JSON(DerivationOutput, impure, +TEST_JSON(ImpureDerivationTest, impure, R"({ "hashAlgo": "r:sha256", "impure": true @@ -68,8 +106,28 @@ TEST_JSON(DerivationOutput, impure, }), "drv-name", "output-name") -TEST_JSON(Derivation, impure, +#undef TEST_JSON + +#define TEST_JSON(NAME, STR, VAL, DRV_NAME) \ + TEST_F(DerivationTest, Derivation_ ## NAME ## _to_json) { \ + using nlohmann::literals::operator "" _json; \ + ASSERT_EQ( \ + STR ## _json, \ + (Derivation { VAL }).toJSON(*store)); \ + } \ + \ + TEST_F(DerivationTest, Derivation_ ## NAME ## _from_json) { \ + using nlohmann::literals::operator "" _json; \ + ASSERT_EQ( \ + Derivation { VAL }, \ + Derivation::fromJSON( \ + *store, \ + STR ## _json)); \ + } + +TEST_JSON(simple, R"({ + "name": "my-derivation", "inputSrcs": [ "/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1" ], @@ -92,6 +150,7 @@ TEST_JSON(Derivation, impure, })", ({ Derivation drv; + drv.name = "my-derivation"; drv.inputSrcs = { store->parseStorePath("/nix/store/c015dhfh5l0lp6wxyvdn7bmwhbbr6hr9-dep1"), }; @@ -117,7 +176,8 @@ TEST_JSON(Derivation, impure, }, }; drv; - })) + }), + "drv-name") #undef TEST_JSON diff --git a/src/libstore/tests/derived-path.cc b/src/libstore/tests/derived-path.cc index d1ac2c5e7..e6d32dbd0 100644 --- a/src/libstore/tests/derived-path.cc +++ b/src/libstore/tests/derived-path.cc @@ -51,6 +51,14 @@ TEST_F(DerivedPathTest, force_init) { } +RC_GTEST_FIXTURE_PROP( + DerivedPathTest, + prop_legacy_round_rip, + (const DerivedPath & o)) +{ + RC_ASSERT(o == DerivedPath::parseLegacy(*store, o.to_string_legacy(*store))); +} + RC_GTEST_FIXTURE_PROP( DerivedPathTest, prop_round_rip, diff --git a/src/libstore/tests/derived-path.hh b/src/libstore/tests/derived-path.hh index 3bc812440..506f3ccb1 100644 --- a/src/libstore/tests/derived-path.hh +++ b/src/libstore/tests/derived-path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libstore/tests/libstore.hh b/src/libstore/tests/libstore.hh index 05397659b..ef93457b5 100644 --- a/src/libstore/tests/libstore.hh +++ b/src/libstore/tests/libstore.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include #include diff --git a/src/libstore/tests/outputs-spec.hh b/src/libstore/tests/outputs-spec.hh index 2d455c817..ded331b33 100644 --- a/src/libstore/tests/outputs-spec.hh +++ b/src/libstore/tests/outputs-spec.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libstore/tests/path.hh b/src/libstore/tests/path.hh index d7f1a8988..21cb62310 100644 --- a/src/libstore/tests/path.hh +++ b/src/libstore/tests/path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc index 5c38323cd..0fb7c38e9 100644 --- a/src/libstore/uds-remote-store.cc +++ b/src/libstore/uds-remote-store.cc @@ -26,9 +26,9 @@ UDSRemoteStore::UDSRemoteStore(const Params & params) UDSRemoteStore::UDSRemoteStore( - const std::string scheme, - std::string socket_path, - const Params & params) + const std::string scheme, + std::string socket_path, + const Params & params) : UDSRemoteStore(params) { path.emplace(socket_path); diff --git a/src/libstore/uds-remote-store.hh b/src/libstore/uds-remote-store.hh index f8dfcca70..bd1dcb67c 100644 --- a/src/libstore/uds-remote-store.hh +++ b/src/libstore/uds-remote-store.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "remote-store.hh" #include "local-fs-store.hh" @@ -15,6 +16,13 @@ struct UDSRemoteStoreConfig : virtual LocalFSStoreConfig, virtual RemoteStoreCon } const std::string name() override { return "Local Daemon Store"; } + + std::string doc() override + { + return + #include "uds-remote-store.md" + ; + } }; class UDSRemoteStore : public virtual UDSRemoteStoreConfig, public virtual LocalFSStore, public virtual RemoteStore @@ -29,9 +37,6 @@ public: static std::set uriSchemes() { return {"unix"}; } - bool sameMachine() override - { return true; } - ref getFSAccessor() override { return LocalFSStore::getFSAccessor(); } diff --git a/src/libstore/uds-remote-store.md b/src/libstore/uds-remote-store.md new file mode 100644 index 000000000..8df0bd6ff --- /dev/null +++ b/src/libstore/uds-remote-store.md @@ -0,0 +1,9 @@ +R"( + +**Store URL format**: `daemon`, `unix://`*path* + +This store type accesses a Nix store by talking to a Nix daemon +listening on the Unix domain socket *path*. The store pseudo-URL +`daemon` is equivalent to `unix:///nix/var/nix/daemon-socket/socket`. + +)" diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index 87088a3ac..34b2fc17b 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" #include "serialise.hh" @@ -9,11 +10,15 @@ namespace nix { #define WORKER_MAGIC_1 0x6e697863 #define WORKER_MAGIC_2 0x6478696f -#define PROTOCOL_VERSION (1 << 8 | 34) +#define PROTOCOL_VERSION (1 << 8 | 35) #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00) #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff) +/** + * Enumeration of all the request types for the "worker protocol", used + * by unix:// and ssh-ng:// stores. + */ typedef enum { wopIsValidPath = 1, wopHasSubstitutes = 3, @@ -74,7 +79,12 @@ typedef enum { class Store; struct Source; -/* To guide overloading */ +/** + * Used to guide overloading + * + * See https://en.cppreference.com/w/cpp/language/adl for the broader + * concept of what is going on here. + */ template struct Phantom {}; @@ -93,6 +103,8 @@ MAKE_WORKER_PROTO(, DerivedPath); MAKE_WORKER_PROTO(, Realisation); MAKE_WORKER_PROTO(, DrvOutput); MAKE_WORKER_PROTO(, BuildResult); +MAKE_WORKER_PROTO(, KeyedBuildResult); +MAKE_WORKER_PROTO(, std::optional); MAKE_WORKER_PROTO(template, std::vector); MAKE_WORKER_PROTO(template, std::set); @@ -103,18 +115,19 @@ MAKE_WORKER_PROTO(X_, Y_); #undef X_ #undef Y_ -/* These use the empty string for the null case, relying on the fact - that the underlying types never serialize to the empty string. - - We do this instead of a generic std::optional instance because - ordinal tags (0 or 1, here) are a bit of a compatability hazard. For - the same reason, we don't have a std::variant instances (ordinal - tags 0...n). - - We could the generic instances and then these as specializations for - compatability, but that's proven a bit finnicky, and also makes the - worker protocol harder to implement in other languages where such - specializations may not be allowed. +/** + * These use the empty string for the null case, relying on the fact + * that the underlying types never serialize to the empty string. + * + * We do this instead of a generic std::optional instance because + * ordinal tags (0 or 1, here) are a bit of a compatability hazard. For + * the same reason, we don't have a std::variant instances (ordinal + * tags 0...n). + * + * We could the generic instances and then these as specializations for + * compatability, but that's proven a bit finnicky, and also makes the + * worker protocol harder to implement in other languages where such + * specializations may not be allowed. */ MAKE_WORKER_PROTO(, std::optional); MAKE_WORKER_PROTO(, std::optional); diff --git a/src/libutil/abstract-setting-to-json.hh b/src/libutil/abstract-setting-to-json.hh index 2d82b54e7..7b6c3fcb5 100644 --- a/src/libutil/abstract-setting-to-json.hh +++ b/src/libutil/abstract-setting-to-json.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include "config.hh" diff --git a/src/libutil/ansicolor.hh b/src/libutil/ansicolor.hh index 38305e71c..86becafa6 100644 --- a/src/libutil/ansicolor.hh +++ b/src/libutil/ansicolor.hh @@ -1,8 +1,12 @@ #pragma once +/** + * @file + * + * @brief Some ANSI escape sequences. + */ namespace nix { -/* Some ANSI escape sequences. */ #define ANSI_NORMAL "\e[0m" #define ANSI_BOLD "\e[1m" #define ANSI_FAINT "\e[2m" diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 0e2b9d12c..268a798d9 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -87,7 +87,7 @@ static time_t dump(const Path & path, Sink & sink, PathFilter & filter) std::string name(i.name); size_t pos = i.name.find(caseHackSuffix); if (pos != std::string::npos) { - debug(format("removing case hack suffix from '%1%'") % (path + "/" + i.name)); + debug("removing case hack suffix from '%1%'", path + "/" + i.name); name.erase(pos); } if (!unhacked.emplace(name, i.name).second) @@ -262,7 +262,7 @@ static void parse(ParseSink & sink, Source & source, const Path & path) if (archiveSettings.useCaseHack) { auto i = names.find(name); if (i != names.end()) { - debug(format("case collision between '%1%' and '%2%'") % i->first % name); + debug("case collision between '%1%' and '%2%'", i->first, name); name += caseHackSuffix; name += std::to_string(++i->second); } else diff --git a/src/libutil/archive.hh b/src/libutil/archive.hh index e42dea540..2cf164a41 100644 --- a/src/libutil/archive.hh +++ b/src/libutil/archive.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "serialise.hh" @@ -7,54 +8,73 @@ namespace nix { -/* dumpPath creates a Nix archive of the specified path. The format - is as follows: - - IF path points to a REGULAR FILE: - dump(path) = attrs( - [ ("type", "regular") - , ("contents", contents(path)) - ]) - - IF path points to a DIRECTORY: - dump(path) = attrs( - [ ("type", "directory") - , ("entries", concat(map(f, sort(entries(path))))) - ]) - where f(fn) = attrs( - [ ("name", fn) - , ("file", dump(path + "/" + fn)) - ]) - - where: - - attrs(as) = concat(map(attr, as)) + encN(0) - attrs((a, b)) = encS(a) + encS(b) - - encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary) - - encN(n) = 64-bit little-endian encoding of n. - - contents(path) = the contents of a regular file. - - sort(strings) = lexicographic sort by 8-bit value (strcmp). - - entries(path) = the entries of a directory, without `.' and - `..'. - - `+' denotes string concatenation. */ - - +/** + * dumpPath creates a Nix archive of the specified path. + * + * @param path the file system data to dump. Dumping is recursive so if + * this is a directory we dump it and all its children. + * + * @param [out] sink The serialised archive is fed into this sink. + * + * @param filter Can be used to skip certain files. + * + * The format is as follows: + * + * ``` + * IF path points to a REGULAR FILE: + * dump(path) = attrs( + * [ ("type", "regular") + * , ("contents", contents(path)) + * ]) + * + * IF path points to a DIRECTORY: + * dump(path) = attrs( + * [ ("type", "directory") + * , ("entries", concat(map(f, sort(entries(path))))) + * ]) + * where f(fn) = attrs( + * [ ("name", fn) + * , ("file", dump(path + "/" + fn)) + * ]) + * + * where: + * + * attrs(as) = concat(map(attr, as)) + encN(0) + * attrs((a, b)) = encS(a) + encS(b) + * + * encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary) + * + * encN(n) = 64-bit little-endian encoding of n. + * + * contents(path) = the contents of a regular file. + * + * sort(strings) = lexicographic sort by 8-bit value (strcmp). + * + * entries(path) = the entries of a directory, without `.` and + * `..`. + * + * `+` denotes string concatenation. + * ``` + */ void dumpPath(const Path & path, Sink & sink, PathFilter & filter = defaultPathFilter); -/* Same as `void dumpPath()`, but returns the last modified date of the path */ +/** + * Same as dumpPath(), but returns the last modified date of the path. + */ time_t dumpPathAndGetMtime(const Path & path, Sink & sink, PathFilter & filter = defaultPathFilter); +/** + * Dump an archive with a single file with these contents. + * + * @param s Contents of the file. + */ void dumpString(std::string_view s, Sink & sink); -/* FIXME: fix this API, it sucks. */ +/** + * \todo Fix this API, it sucks. + */ struct ParseSink { virtual void createDirectory(const Path & path) { }; @@ -68,8 +88,10 @@ struct ParseSink virtual void createSymlink(const Path & path, const std::string & target) { }; }; -/* If the NAR archive contains a single file at top-level, then save - the contents of the file to `s'. Otherwise barf. */ +/** + * If the NAR archive contains a single file at top-level, then save + * the contents of the file to `s`. Otherwise barf. + */ struct RetrieveRegularNARSink : ParseSink { bool regular = true; @@ -97,7 +119,9 @@ void parseDump(ParseSink & sink, Source & source); void restorePath(const Path & path, Source & source); -/* Read a NAR from 'source' and write it to 'sink'. */ +/** + * Read a NAR from 'source' and write it to 'sink'. + */ void copyNAR(Source & source, Sink & sink); void copyPath(const Path & from, const Path & to); diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 35686a8aa..081dbeb28 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -52,7 +52,7 @@ std::shared_ptr completions; std::string completionMarker = "___COMPLETE___"; -std::optional needsCompletion(std::string_view s) +static std::optional needsCompletion(std::string_view s) { if (!completions) return {}; auto i = s.find(completionMarker); @@ -120,6 +120,12 @@ void Args::parseCmdline(const Strings & _cmdline) if (!argsSeen) initialFlagsProcessed(); + + /* Now that we are done parsing, make sure that any experimental + * feature required by the flags is enabled */ + for (auto & f : flagExperimentalFeatures) + experimentalFeatureSettings.require(f); + } bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) @@ -128,12 +134,18 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) auto process = [&](const std::string & name, const Flag & flag) -> bool { ++pos; + + if (auto & f = flag.experimentalFeature) + flagExperimentalFeatures.insert(*f); + std::vector args; bool anyCompleted = false; for (size_t n = 0 ; n < flag.handler.arity; ++n) { if (pos == end) { if (flag.handler.arity == ArityAny || anyCompleted) break; - throw UsageError("flag '%s' requires %d argument(s)", name, flag.handler.arity); + throw UsageError( + "flag '%s' requires %d argument(s), but only %d were given", + name, flag.handler.arity, n); } if (auto prefix = needsCompletion(*pos)) { anyCompleted = true; @@ -152,7 +164,11 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) for (auto & [name, flag] : longFlags) { if (!hiddenCategories.count(flag->category) && hasPrefix(name, std::string(*prefix, 2))) + { + if (auto & f = flag->experimentalFeature) + flagExperimentalFeatures.insert(*f); completions->add("--" + name, flag->description); + } } return false; } @@ -172,7 +188,8 @@ bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) if (prefix == "-") { completions->add("--"); for (auto & [flagName, flag] : shortFlags) - completions->add(std::string("-") + flagName, flag->description); + if (experimentalFeatureSettings.isEnabled(flag->experimentalFeature)) + completions->add(std::string("-") + flagName, flag->description); } } @@ -230,6 +247,11 @@ nlohmann::json Args::toJSON() j["arity"] = flag->handler.arity; if (!flag->labels.empty()) j["labels"] = flag->labels; + // TODO With C++23 use `std::optional::tranform` + if (auto & xp = flag->experimentalFeature) + j["experimental-feature"] = showExperimentalFeature(*xp); + else + j["experimental-feature"] = nullptr; flags[name] = std::move(j); } @@ -326,6 +348,11 @@ Strings argvToStrings(int argc, char * * argv) return args; } +std::optional Command::experimentalFeature () +{ + return { Xp::NixCommand }; +} + MultiCommand::MultiCommand(const Commands & commands_) : commands(commands_) { @@ -389,6 +416,11 @@ nlohmann::json MultiCommand::toJSON() cat["id"] = command->category(); cat["description"] = trim(categories[command->category()]); j["category"] = std::move(cat); + // TODO With C++23 use `std::optional::tranform` + if (auto xp = command->experimentalFeature()) + cat["experimental-feature"] = showExperimentalFeature(*xp); + else + cat["experimental-feature"] = nullptr; cmds[name] = std::move(j); } diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 84866f12b..d90129796 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -18,16 +19,22 @@ class Args { public: - /* Parse the command line, throwing a UsageError if something goes - wrong. */ + /** + * Parse the command line, throwing a UsageError if something goes + * wrong. + */ void parseCmdline(const Strings & cmdline); - /* Return a short one-line description of the command. */ + /** + * Return a short one-line description of the command. + */ virtual std::string description() { return ""; } virtual bool forceImpureByDefault() { return false; } - /* Return documentation about this command, in Markdown format. */ + /** + * Return documentation about this command, in Markdown format. + */ virtual std::string doc() { return ""; } protected: @@ -117,6 +124,8 @@ protected: Handler handler; std::function completer; + std::optional experimentalFeature; + static Flag mkHashTypeFlag(std::string && longName, HashType * ht); static Flag mkHashTypeOptFlag(std::string && longName, std::optional * oht); }; @@ -144,13 +153,17 @@ protected: std::set hiddenCategories; - /* Called after all command line flags before the first non-flag - argument (if any) have been processed. */ + /** + * Called after all command line flags before the first non-flag + * argument (if any) have been processed. + */ virtual void initialFlagsProcessed() {} - /* Called after the command line has been processed if we need to generate - completions. Useful for commands that need to know the whole command line - in order to know what completions to generate. */ + /** + * Called after the command line has been processed if we need to generate + * completions. Useful for commands that need to know the whole command line + * in order to know what completions to generate. + */ virtual void completionHook() { } public: @@ -164,7 +177,9 @@ public: expectedArgs.emplace_back(std::move(arg)); } - /* Expect a string argument. */ + /** + * Expect a string argument. + */ void expectArg(const std::string & label, std::string * dest, bool optional = false) { expectArgs({ @@ -174,7 +189,9 @@ public: }); } - /* Expect 0 or more arguments. */ + /** + * Expect 0 or more arguments. + */ void expectArgs(const std::string & label, std::vector * dest) { expectArgs({ @@ -188,30 +205,48 @@ public: friend class MultiCommand; MultiCommand * parent = nullptr; + +private: + + /** + * Experimental features needed when parsing args. These are checked + * after flag parsing is completed in order to support enabling + * experimental features coming after the flag that needs the + * experimental feature. + */ + std::set flagExperimentalFeatures; }; -/* A command is an argument parser that can be executed by calling its - run() method. */ +/** + * A command is an argument parser that can be executed by calling its + * run() method. + */ struct Command : virtual public Args { friend class MultiCommand; virtual ~Command() { } - virtual void prepare() { }; + /** + * Entry point to the command + */ virtual void run() = 0; typedef int Category; static constexpr Category catDefault = 0; + virtual std::optional experimentalFeature (); + virtual Category category() { return catDefault; } }; typedef std::map()>> Commands; -/* An argument parser that supports multiple subcommands, - i.e. ‘ ’. */ +/** + * An argument parser that supports multiple subcommands, + * i.e. ‘ ’. + */ class MultiCommand : virtual public Args { public: @@ -219,7 +254,9 @@ public: std::map categories; - // Selected command, if any. + /** + * Selected command, if any. + */ std::optional>> command; MultiCommand(const Commands & commands); @@ -254,8 +291,6 @@ enum CompletionType { }; extern CompletionType completionType; -std::optional needsCompletion(std::string_view s); - void completePath(size_t, std::string_view prefix); void completeDir(size_t, std::string_view prefix); diff --git a/src/libutil/callback.hh b/src/libutil/callback.hh index ef31794be..3710d1239 100644 --- a/src/libutil/callback.hh +++ b/src/libutil/callback.hh @@ -1,13 +1,16 @@ #pragma once +///@file #include #include namespace nix { -/* A callback is a wrapper around a lambda that accepts a valid of - type T or an exception. (We abuse std::future to pass the value or - exception.) */ +/** + * A callback is a wrapper around a lambda that accepts a valid of + * type T or an exception. (We abuse std::future to pass the value or + * exception.) + */ template class Callback { diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index b132b4262..040464532 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -13,6 +13,11 @@ CanonPath::CanonPath(std::string_view raw, const CanonPath & root) : path(absPath((Path) raw, root.abs())) { } +CanonPath CanonPath::fromCwd(std::string_view path) +{ + return CanonPath(unchecked_t(), absPath((Path) path)); +} + std::optional CanonPath::parent() const { if (isRoot()) return std::nullopt; @@ -100,4 +105,30 @@ std::ostream & operator << (std::ostream & stream, const CanonPath & path) return stream; } +std::string CanonPath::makeRelative(const CanonPath & path) const +{ + auto p1 = begin(); + auto p2 = path.begin(); + + for (; p1 != end() && p2 != path.end() && *p1 == *p2; ++p1, ++p2) ; + + if (p1 == end() && p2 == path.end()) + return "."; + else if (p1 == end()) + return std::string(p2.remaining); + else { + std::string res; + while (p1 != end()) { + ++p1; + if (!res.empty()) res += '/'; + res += ".."; + } + if (p2 != path.end()) { + if (!res.empty()) res += '/'; + res += p2.remaining; + } + return res; + } +} + } diff --git a/src/libutil/canon-path.hh b/src/libutil/canon-path.hh index 9d5984584..eefe05ed5 100644 --- a/src/libutil/canon-path.hh +++ b/src/libutil/canon-path.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -8,28 +9,31 @@ namespace nix { -/* A canonical representation of a path. It ensures the following: - - - It always starts with a slash. - - - It never ends with a slash, except if the path is "/". - - - A slash is never followed by a slash (i.e. no empty components). - - - There are no components equal to '.' or '..'. - - Note that the path does not need to correspond to an actually - existing path, and there is no guarantee that symlinks are - resolved. -*/ +/** + * A canonical representation of a path. It ensures the following: + * + * - It always starts with a slash. + * + * - It never ends with a slash, except if the path is "/". + * + * - A slash is never followed by a slash (i.e. no empty components). + * + * - There are no components equal to '.' or '..'. + * + * Note that the path does not need to correspond to an actually + * existing path, and there is no guarantee that symlinks are + * resolved. + */ class CanonPath { std::string path; public: - /* Construct a canon path from a non-canonical path. Any '.', '..' - or empty components are removed. */ + /** + * Construct a canon path from a non-canonical path. Any '.', '..' + * or empty components are removed. + */ CanonPath(std::string_view raw); explicit CanonPath(const char * raw) @@ -42,11 +46,15 @@ public: : path(std::move(path)) { } + static CanonPath fromCwd(std::string_view path = "."); + static CanonPath root; - /* If `raw` starts with a slash, return - `CanonPath(raw)`. Otherwise return a `CanonPath` representing - `root + "/" + raw`. */ + /** + * If `raw` starts with a slash, return + * `CanonPath(raw)`. Otherwise return a `CanonPath` representing + * `root + "/" + raw`. + */ CanonPath(std::string_view raw, const CanonPath & root); bool isRoot() const @@ -58,8 +66,10 @@ public: const std::string & abs() const { return path; } - /* Like abs(), but return an empty string if this path is - '/'. Thus the returned string never ends in a slash. */ + /** + * Like abs(), but return an empty string if this path is + * '/'. Thus the returned string never ends in a slash. + */ const std::string & absOrEmpty() const { const static std::string epsilon; @@ -85,6 +95,9 @@ public: bool operator != (const Iterator & x) const { return remaining.data() != x.remaining.data(); } + bool operator == (const Iterator & x) const + { return !(*this != x); } + const std::string_view operator * () const { return remaining.substr(0, slash); } @@ -104,7 +117,9 @@ public: std::optional parent() const; - /* Remove the last component. Panics if this path is the root. */ + /** + * Remove the last component. Panics if this path is the root. + */ void pop(); std::optional dirOf() const @@ -125,10 +140,12 @@ public: bool operator != (const CanonPath & x) const { return path != x.path; } - /* Compare paths lexicographically except that path separators - are sorted before any other character. That is, in the sorted order - a directory is always followed directly by its children. For - instance, 'foo' < 'foo/bar' < 'foo!'. */ + /** + * Compare paths lexicographically except that path separators + * are sorted before any other character. That is, in the sorted order + * a directory is always followed directly by its children. For + * instance, 'foo' < 'foo/bar' < 'foo!'. + */ bool operator < (const CanonPath & x) const { auto i = path.begin(); @@ -144,28 +161,44 @@ public: return i == path.end() && j != x.path.end(); } - /* Return true if `this` is equal to `parent` or a child of - `parent`. */ + /** + * Return true if `this` is equal to `parent` or a child of + * `parent`. + */ bool isWithin(const CanonPath & parent) const; CanonPath removePrefix(const CanonPath & prefix) const; - /* Append another path to this one. */ + /** + * Append another path to this one. + */ void extend(const CanonPath & x); - /* Concatenate two paths. */ + /** + * Concatenate two paths. + */ CanonPath operator + (const CanonPath & x) const; - /* Add a path component to this one. It must not contain any slashes. */ + /** + * Add a path component to this one. It must not contain any slashes. + */ void push(std::string_view c); CanonPath operator + (std::string_view c) const; - /* Check whether access to this path is allowed, which is the case - if 1) `this` is within any of the `allowed` paths; or 2) any of - the `allowed` paths are within `this`. (The latter condition - ensures access to the parents of allowed paths.) */ + /** + * Check whether access to this path is allowed, which is the case + * if 1) `this` is within any of the `allowed` paths; or 2) any of + * the `allowed` paths are within `this`. (The latter condition + * ensures access to the parents of allowed paths.) + */ bool isAllowed(const std::set & allowed) const; + + /** + * Return a representation `x` of `path` relative to `this`, i.e. + * `CanonPath(this.makeRelative(x), this) == path`. + */ + std::string makeRelative(const CanonPath & path) const; }; std::ostream & operator << (std::ostream & stream, const CanonPath & path); diff --git a/src/libutil/cgroup.hh b/src/libutil/cgroup.hh index d08c8ad29..574ae8e5b 100644 --- a/src/libutil/cgroup.hh +++ b/src/libutil/cgroup.hh @@ -1,4 +1,5 @@ #pragma once +///@file #if __linux__ @@ -18,10 +19,12 @@ struct CgroupStats std::optional cpuUser, cpuSystem; }; -/* Destroy the cgroup denoted by 'path'. The postcondition is that - 'path' does not exist, and thus any processes in the cgroup have - been killed. Also return statistics from the cgroup just before - destruction. */ +/** + * Destroy the cgroup denoted by 'path'. The postcondition is that + * 'path' does not exist, and thus any processes in the cgroup have + * been killed. Also return statistics from the cgroup just before + * destruction. + */ CgroupStats destroyCgroup(const Path & cgroup); } diff --git a/src/libutil/chunked-vector.hh b/src/libutil/chunked-vector.hh index 0a4f0b400..d914e2542 100644 --- a/src/libutil/chunked-vector.hh +++ b/src/libutil/chunked-vector.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -7,20 +8,24 @@ namespace nix { -/* Provides an indexable container like vector<> with memory overhead - guarantees like list<> by allocating storage in chunks of ChunkSize - elements instead of using a contiguous memory allocation like vector<> - does. Not using a single vector that is resized reduces memory overhead - on large data sets by on average (growth factor)/2, mostly - eliminates copies within the vector during resizing, and provides stable - references to its elements. */ +/** + * Provides an indexable container like vector<> with memory overhead + * guarantees like list<> by allocating storage in chunks of ChunkSize + * elements instead of using a contiguous memory allocation like vector<> + * does. Not using a single vector that is resized reduces memory overhead + * on large data sets by on average (growth factor)/2, mostly + * eliminates copies within the vector during resizing, and provides stable + * references to its elements. + */ template class ChunkedVector { private: uint32_t size_ = 0; std::vector> chunks; - /* keep this out of the ::add hot path */ + /** + * Keep this out of the ::add hot path + */ [[gnu::noinline]] auto & addChunk() { diff --git a/src/libutil/closure.hh b/src/libutil/closure.hh index 779b9b2d5..16e3b93e4 100644 --- a/src/libutil/closure.hh +++ b/src/libutil/closure.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include #include #include "sync.hh" diff --git a/src/libutil/comparator.hh b/src/libutil/comparator.hh index eecd5b819..9f661c5c3 100644 --- a/src/libutil/comparator.hh +++ b/src/libutil/comparator.hh @@ -1,6 +1,8 @@ #pragma once +///@file -/* Awfull hacky generation of the comparison operators by doing a lexicographic +/** + * Awful hacky generation of the comparison operators by doing a lexicographic * comparison between the choosen fields. * * ``` @@ -15,12 +17,12 @@ * } * ``` */ -#define GENERATE_ONE_CMP(COMPARATOR, MY_TYPE, FIELDS...) \ +#define GENERATE_ONE_CMP(COMPARATOR, MY_TYPE, ...) \ bool operator COMPARATOR(const MY_TYPE& other) const { \ - const MY_TYPE* me = this; \ - auto fields1 = std::make_tuple( FIELDS ); \ - me = &other; \ - auto fields2 = std::make_tuple( FIELDS ); \ + __VA_OPT__(const MY_TYPE* me = this;) \ + auto fields1 = std::make_tuple( __VA_ARGS__ ); \ + __VA_OPT__(me = &other;) \ + auto fields2 = std::make_tuple( __VA_ARGS__ ); \ return fields1 COMPARATOR fields2; \ } #define GENERATE_EQUAL(args...) GENERATE_ONE_CMP(==, args) diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index 89180e7a7..ba0847cde 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -23,7 +23,7 @@ struct ChunkedCompressionSink : CompressionSink { uint8_t outbuf[32 * 1024]; - void write(std::string_view data) override + void writeUnbuffered(std::string_view data) override { const size_t CHUNK_SIZE = sizeof(outbuf) << 2; while (!data.empty()) { @@ -103,7 +103,7 @@ struct ArchiveCompressionSink : CompressionSink throw Error(reason, archive_error_string(this->archive)); } - void write(std::string_view data) override + void writeUnbuffered(std::string_view data) override { ssize_t result = archive_write_data(archive, data.data(), data.length()); if (result <= 0) check(result); @@ -136,7 +136,7 @@ struct NoneSink : CompressionSink warn("requested compression level '%d' not supported by compression method 'none'", level); } void finish() override { flush(); } - void write(std::string_view data) override { nextSink(data); } + void writeUnbuffered(std::string_view data) override { nextSink(data); } }; struct BrotliDecompressionSink : ChunkedCompressionSink diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh index c470b82a5..4e53a7b3c 100644 --- a/src/libutil/compression.hh +++ b/src/libutil/compression.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "ref.hh" #include "types.hh" @@ -11,7 +12,7 @@ namespace nix { struct CompressionSink : BufferedSink, FinishSink { using BufferedSink::operator (); - using BufferedSink::write; + using BufferedSink::writeUnbuffered; using FinishSink::finish; }; diff --git a/src/libutil/compute-levels.hh b/src/libutil/compute-levels.hh index 8ded295f9..093e7a915 100644 --- a/src/libutil/compute-levels.hh +++ b/src/libutil/compute-levels.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "types.hh" namespace nix { diff --git a/src/libutil/config-impl.hh b/src/libutil/config-impl.hh new file mode 100644 index 000000000..b6cae5ec3 --- /dev/null +++ b/src/libutil/config-impl.hh @@ -0,0 +1,71 @@ +#pragma once +/** + * @file + * + * Template implementations (as opposed to mere declarations). + * + * One only needs to include this when one is declaring a + * `BaseClass` setting, or as derived class of such an + * instantiation. + */ + +#include "config.hh" + +namespace nix { + +template<> struct BaseSetting::trait +{ + static constexpr bool appendable = true; +}; +template<> struct BaseSetting::trait +{ + static constexpr bool appendable = true; +}; +template<> struct BaseSetting::trait +{ + static constexpr bool appendable = true; +}; +template<> struct BaseSetting>::trait +{ + static constexpr bool appendable = true; +}; + +template +struct BaseSetting::trait +{ + static constexpr bool appendable = false; +}; + +template +bool BaseSetting::isAppendable() +{ + return trait::appendable; +} + +template<> void BaseSetting::appendOrSet(Strings && newValue, bool append); +template<> void BaseSetting::appendOrSet(StringSet && newValue, bool append); +template<> void BaseSetting::appendOrSet(StringMap && newValue, bool append); +template<> void BaseSetting>::appendOrSet(std::set && newValue, bool append); + +template +void BaseSetting::appendOrSet(T && newValue, bool append) +{ + static_assert(!trait::appendable, "using default `appendOrSet` implementation with an appendable type"); + assert(!append); + value = std::move(newValue); +} + +template +void BaseSetting::set(const std::string & str, bool append) +{ + if (experimentalFeatureSettings.isEnabled(experimentalFeature)) + appendOrSet(parse(str), append); + else { + assert(experimentalFeature); + warn("Ignoring setting '%s' because experimental feature '%s' is not enabled", + name, + showExperimentalFeature(*experimentalFeature)); + } +} + +} diff --git a/src/libutil/config.cc b/src/libutil/config.cc index b349f2d80..085a884dc 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -3,6 +3,8 @@ #include "abstract-setting-to-json.hh" #include "experimental-features.hh" +#include "config-impl.hh" + #include namespace nix { @@ -80,6 +82,8 @@ void Config::getSettings(std::map & res, bool overridd void AbstractConfig::applyConfig(const std::string & contents, const std::string & path) { unsigned int pos = 0; + std::vector> parsedContents; + while (pos < contents.size()) { std::string line; while (pos < contents.size() && contents[pos] != '\n') @@ -125,8 +129,21 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string auto i = tokens.begin(); advance(i, 2); - set(name, concatStringsSep(" ", Strings(i, tokens.end()))); // FIXME: slow + parsedContents.push_back({ + name, + concatStringsSep(" ", Strings(i, tokens.end())), + }); }; + + // First apply experimental-feature related settings + for (auto & [name, value] : parsedContents) + if (name == "experimental-features" || name == "extra-experimental-features") + set(name, value); + + // Then apply other settings + for (auto & [name, value] : parsedContents) + if (name != "experimental-features" && name != "extra-experimental-features") + set(name, value); } void AbstractConfig::applyConfigFile(const Path & path) @@ -147,9 +164,8 @@ nlohmann::json Config::toJSON() { auto res = nlohmann::json::object(); for (auto & s : _settings) - if (!s.second.isAlias) { + if (!s.second.isAlias) res.emplace(s.first, s.second.setting->toJSON()); - } return res; } @@ -157,24 +173,28 @@ std::string Config::toKeyValue() { auto res = std::string(); for (auto & s : _settings) - if (!s.second.isAlias) { + if (s.second.isAlias) res += fmt("%s = %s\n", s.first, s.second.setting->to_string()); - } return res; } void Config::convertToArgs(Args & args, const std::string & category) { - for (auto & s : _settings) + for (auto & s : _settings) { if (!s.second.isAlias) s.second.setting->convertToArg(args, category); + } } AbstractSetting::AbstractSetting( const std::string & name, const std::string & description, - const std::set & aliases) - : name(name), description(stripIndentation(description)), aliases(aliases) + const std::set & aliases, + std::optional experimentalFeature) + : name(name) + , description(stripIndentation(description)) + , aliases(aliases) + , experimentalFeature(experimentalFeature) { } @@ -188,6 +208,10 @@ std::map AbstractSetting::toJSONObject() std::map obj; obj.emplace("description", description); obj.emplace("aliases", aliases); + if (experimentalFeature) + obj.emplace("experimentalFeature", *experimentalFeature); + else + obj.emplace("experimentalFeature", nullptr); return obj; } @@ -195,12 +219,6 @@ void AbstractSetting::convertToArg(Args & args, const std::string & category) { } -template -bool BaseSetting::isAppendable() -{ - return false; -} - template void BaseSetting::convertToArg(Args & args, const std::string & category) { @@ -210,6 +228,7 @@ void BaseSetting::convertToArg(Args & args, const std::string & category) .category = category, .labels = {"value"}, .handler = {[this](std::string s) { overridden = true; set(s); }}, + .experimentalFeature = experimentalFeature, }); if (isAppendable()) @@ -219,12 +238,13 @@ void BaseSetting::convertToArg(Args & args, const std::string & category) .category = category, .labels = {"value"}, .handler = {[this](std::string s) { overridden = true; set(s, true); }}, + .experimentalFeature = experimentalFeature, }); } -template<> void BaseSetting::set(const std::string & str, bool append) +template<> std::string BaseSetting::parse(const std::string & str) const { - value = str; + return str; } template<> std::string BaseSetting::to_string() const @@ -233,11 +253,11 @@ template<> std::string BaseSetting::to_string() const } template -void BaseSetting::set(const std::string & str, bool append) +T BaseSetting::parse(const std::string & str) const { static_assert(std::is_integral::value, "Integer required."); if (auto n = string2Int(str)) - value = *n; + return *n; else throw UsageError("setting '%s' has invalid value '%s'", name, str); } @@ -249,12 +269,12 @@ std::string BaseSetting::to_string() const return std::to_string(value); } -template<> void BaseSetting::set(const std::string & str, bool append) +template<> bool BaseSetting::parse(const std::string & str) const { if (str == "true" || str == "yes" || str == "1") - value = true; + return true; else if (str == "false" || str == "no" || str == "0") - value = false; + return false; else throw UsageError("Boolean setting '%s' has invalid value '%s'", name, str); } @@ -270,26 +290,27 @@ template<> void BaseSetting::convertToArg(Args & args, const std::string & .longName = name, .description = fmt("Enable the `%s` setting.", name), .category = category, - .handler = {[this]() { override(true); }} + .handler = {[this]() { override(true); }}, + .experimentalFeature = experimentalFeature, }); args.addFlag({ .longName = "no-" + name, .description = fmt("Disable the `%s` setting.", name), .category = category, - .handler = {[this]() { override(false); }} + .handler = {[this]() { override(false); }}, + .experimentalFeature = experimentalFeature, }); } -template<> void BaseSetting::set(const std::string & str, bool append) +template<> Strings BaseSetting::parse(const std::string & str) const { - auto ss = tokenizeString(str); - if (!append) value.clear(); - for (auto & s : ss) value.push_back(std::move(s)); + return tokenizeString(str); } -template<> bool BaseSetting::isAppendable() +template<> void BaseSetting::appendOrSet(Strings && newValue, bool append) { - return true; + if (!append) value.clear(); + for (auto && s : std::move(newValue)) value.push_back(std::move(s)); } template<> std::string BaseSetting::to_string() const @@ -297,16 +318,16 @@ template<> std::string BaseSetting::to_string() const return concatStringsSep(" ", value); } -template<> void BaseSetting::set(const std::string & str, bool append) +template<> StringSet BaseSetting::parse(const std::string & str) const { - if (!append) value.clear(); - for (auto & s : tokenizeString(str)) - value.insert(s); + return tokenizeString(str); } -template<> bool BaseSetting::isAppendable() +template<> void BaseSetting::appendOrSet(StringSet && newValue, bool append) { - return true; + if (!append) value.clear(); + for (auto && s : std::move(newValue)) + value.insert(s); } template<> std::string BaseSetting::to_string() const @@ -314,21 +335,24 @@ template<> std::string BaseSetting::to_string() const return concatStringsSep(" ", value); } -template<> void BaseSetting>::set(const std::string & str, bool append) +template<> std::set BaseSetting>::parse(const std::string & str) const { - if (!append) value.clear(); + std::set res; for (auto & s : tokenizeString(str)) { auto thisXpFeature = parseExperimentalFeature(s); if (thisXpFeature) - value.insert(thisXpFeature.value()); + res.insert(thisXpFeature.value()); else warn("unknown experimental feature '%s'", s); } + return res; } -template<> bool BaseSetting>::isAppendable() +template<> void BaseSetting>::appendOrSet(std::set && newValue, bool append) { - return true; + if (!append) value.clear(); + for (auto && s : std::move(newValue)) + value.insert(s); } template<> std::string BaseSetting>::to_string() const @@ -339,20 +363,23 @@ template<> std::string BaseSetting>::to_string() c return concatStringsSep(" ", stringifiedXpFeatures); } -template<> void BaseSetting::set(const std::string & str, bool append) +template<> StringMap BaseSetting::parse(const std::string & str) const { - if (!append) value.clear(); + StringMap res; for (auto & s : tokenizeString(str)) { auto eq = s.find_first_of('='); if (std::string::npos != eq) - value.emplace(std::string(s, 0, eq), std::string(s, eq + 1)); + res.emplace(std::string(s, 0, eq), std::string(s, eq + 1)); // else ignored } + return res; } -template<> bool BaseSetting::isAppendable() +template<> void BaseSetting::appendOrSet(StringMap && newValue, bool append) { - return true; + if (!append) value.clear(); + for (auto && [k, v] : std::move(newValue)) + value.emplace(std::move(k), std::move(v)); } template<> std::string BaseSetting::to_string() const @@ -376,15 +403,15 @@ template class BaseSetting; template class BaseSetting; template class BaseSetting>; -void PathSetting::set(const std::string & str, bool append) +Path PathSetting::parse(const std::string & str) const { if (str == "") { if (allowEmpty) - value = ""; + return ""; else throw UsageError("setting '%s' cannot be empty", name); } else - value = canonPath(str); + return canonPath(str); } bool GlobalConfig::set(const std::string & name, const std::string & value) @@ -444,4 +471,30 @@ GlobalConfig::Register::Register(Config * config) configRegistrations->emplace_back(config); } +ExperimentalFeatureSettings experimentalFeatureSettings; + +static GlobalConfig::Register rSettings(&experimentalFeatureSettings); + +bool ExperimentalFeatureSettings::isEnabled(const ExperimentalFeature & feature) const +{ + auto & f = experimentalFeatures.get(); + return std::find(f.begin(), f.end(), feature) != f.end(); +} + +void ExperimentalFeatureSettings::require(const ExperimentalFeature & feature) const +{ + if (!isEnabled(feature)) + throw MissingExperimentalFeature(feature); +} + +bool ExperimentalFeatureSettings::isEnabled(const std::optional & feature) const +{ + return !feature || isEnabled(*feature); +} + +void ExperimentalFeatureSettings::require(const std::optional & feature) const +{ + if (feature) require(*feature); +} + } diff --git a/src/libutil/config.hh b/src/libutil/config.hh index 7ac43c854..2675baed7 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -1,12 +1,14 @@ +#pragma once +///@file + #include #include #include -#include "types.hh" - #include -#pragma once +#include "types.hh" +#include "experimental-features.hh" namespace nix { @@ -123,21 +125,21 @@ public: void reapplyUnknownSettings(); }; -/* A class to simplify providing configuration settings. The typical - use is to inherit Config and add Setting members: - - class MyClass : private Config - { - Setting foo{this, 123, "foo", "the number of foos to use"}; - Setting bar{this, "blabla", "bar", "the name of the bar"}; - - MyClass() : Config(readConfigFile("/etc/my-app.conf")) - { - std::cout << foo << "\n"; // will print 123 unless overridden - } - }; -*/ - +/** + * A class to simplify providing configuration settings. The typical + * use is to inherit Config and add Setting members: + * + * class MyClass : private Config + * { + * Setting foo{this, 123, "foo", "the number of foos to use"}; + * Setting bar{this, "blabla", "bar", "the name of the bar"}; + * + * MyClass() : Config(readConfigFile("/etc/my-app.conf")) + * { + * std::cout << foo << "\n"; // will print 123 unless overridden + * } + * }; + */ class Config : public AbstractConfig { friend class AbstractSetting; @@ -194,12 +196,15 @@ public: bool overridden = false; + std::optional experimentalFeature; + protected: AbstractSetting( const std::string & name, const std::string & description, - const std::set & aliases); + const std::set & aliases, + std::optional experimentalFeature = std::nullopt); virtual ~AbstractSetting() { @@ -210,8 +215,11 @@ protected: virtual void set(const std::string & value, bool append = false) = 0; - virtual bool isAppendable() - { return false; } + /** + * Whether the type is appendable; i.e. whether the `append` + * parameter to `set()` is allowed to be `true`. + */ + virtual bool isAppendable() = 0; virtual std::string to_string() const = 0; @@ -224,7 +232,9 @@ protected: bool isOverridden() const { return overridden; } }; -/* A setting of type T. */ +/** + * A setting of type T. + */ template class BaseSetting : public AbstractSetting { @@ -234,14 +244,32 @@ protected: const T defaultValue; const bool documentDefault; + /** + * Parse the string into a `T`. + * + * Used by `set()`. + */ + virtual T parse(const std::string & str) const; + + /** + * Append or overwrite `value` with `newValue`. + * + * Some types to do not support appending in which case `append` + * should never be passed. The default handles this case. + * + * @param append Whether to append or overwrite. + */ + virtual void appendOrSet(T && newValue, bool append); + public: BaseSetting(const T & def, const bool documentDefault, const std::string & name, const std::string & description, - const std::set & aliases = {}) - : AbstractSetting(name, description, aliases) + const std::set & aliases = {}, + std::optional experimentalFeature = std::nullopt) + : AbstractSetting(name, description, aliases, experimentalFeature) , value(def) , defaultValue(def) , documentDefault(documentDefault) @@ -260,9 +288,25 @@ public: template void setDefault(const U & v) { if (!overridden) value = v; } - void set(const std::string & str, bool append = false) override; + /** + * Require any experimental feature the setting depends on + * + * Uses `parse()` to get the value from `str`, and `appendOrSet()` + * to set it. + */ + void set(const std::string & str, bool append = false) override final; - bool isAppendable() override; + /** + * C++ trick; This is template-specialized to compile-time indicate whether + * the type is appendable. + */ + struct trait; + + /** + * Always defined based on the C++ magic + * with `trait` above. + */ + bool isAppendable() override final; virtual void override(const T & v) { @@ -296,8 +340,9 @@ public: const std::string & name, const std::string & description, const std::set & aliases = {}, - const bool documentDefault = true) - : BaseSetting(def, documentDefault, name, description, aliases) + const bool documentDefault = true, + std::optional experimentalFeature = std::nullopt) + : BaseSetting(def, documentDefault, name, description, aliases, experimentalFeature) { options->addSetting(this); } @@ -305,8 +350,10 @@ public: void operator =(const T & v) { this->assign(v); } }; -/* A special setting for Paths. These are automatically canonicalised - (e.g. "/foo//bar/" becomes "/foo/bar"). */ +/** + * A special setting for Paths. These are automatically canonicalised + * (e.g. "/foo//bar/" becomes "/foo/bar"). + */ class PathSetting : public BaseSetting { bool allowEmpty; @@ -325,7 +372,7 @@ public: options->addSetting(this); } - void set(const std::string & str, bool append = false) override; + Path parse(const std::string & str) const override; Path operator +(const char * p) const { return value + p; } @@ -357,4 +404,52 @@ struct GlobalConfig : public AbstractConfig extern GlobalConfig globalConfig; + +struct ExperimentalFeatureSettings : Config { + + Setting> experimentalFeatures{ + this, {}, "experimental-features", + R"( + Experimental features that are enabled. + + Example: + + ``` + experimental-features = nix-command flakes + ``` + + The following experimental features are available: + + {{#include experimental-features-shortlist.md}} + + Experimental features are [further documented in the manual](@docroot@/contributing/experimental-features.md). + )"}; + + /** + * Check whether the given experimental feature is enabled. + */ + bool isEnabled(const ExperimentalFeature &) const; + + /** + * Require an experimental feature be enabled, throwing an error if it is + * not. + */ + void require(const ExperimentalFeature &) const; + + /** + * `std::nullopt` pointer means no feature, which means there is nothing that could be + * disabled, and so the function returns true in that case. + */ + bool isEnabled(const std::optional &) const; + + /** + * `std::nullopt` pointer means no feature, which means there is nothing that could be + * disabled, and so the function does nothing in that case. + */ + void require(const std::optional &) const; +}; + +// FIXME: don't use a global variable. +extern ExperimentalFeatureSettings experimentalFeatureSettings; + } diff --git a/src/libutil/error.cc b/src/libutil/error.cc index e4f0d4677..c9d61942a 100644 --- a/src/libutil/error.cc +++ b/src/libutil/error.cc @@ -302,14 +302,14 @@ std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool s if (!einfo.traces.empty()) { size_t count = 0; for (const auto & trace : einfo.traces) { + if (trace.hint.str().empty()) continue; + if (frameOnly && !trace.frame) continue; + if (!showTrace && count > 3) { oss << "\n" << ANSI_WARNING "(stack trace truncated; use '--show-trace' to show the full trace)" ANSI_NORMAL << "\n"; break; } - if (trace.hint.str().empty()) continue; - if (frameOnly && !trace.frame) continue; - count++; frameOnly = trace.frame; diff --git a/src/libutil/error.hh b/src/libutil/error.hh index 0ebeaba61..6a0923081 100644 --- a/src/libutil/error.hh +++ b/src/libutil/error.hh @@ -1,4 +1,19 @@ #pragma once +/** + * @file + * + * @brief This file defines two main structs/classes used in nix error handling. + * + * ErrorInfo provides a standard payload of error information, with conversion to string + * happening in the logger rather than at the call site. + * + * BaseError is the ancestor of nix specific exceptions (and Interrupted), and contains + * an ErrorInfo. + * + * ErrorInfo structs are sent to the logger as part of an exception, or directly with the + * logError or logWarning macros. + * See libutil/tests/logging.cc for usage examples. + */ #include "suggestions.hh" #include "ref.hh" @@ -26,22 +41,6 @@ namespace nix { -/* - - This file defines two main structs/classes used in nix error handling. - - ErrorInfo provides a standard payload of error information, with conversion to string - happening in the logger rather than at the call site. - - BaseError is the ancestor of nix specific exceptions (and Interrupted), and contains - an ErrorInfo. - - ErrorInfo structs are sent to the logger as part of an exception, or directly with the - logError or logWarning macros. - - See libutil/tests/logging.cc for usage examples. - - */ typedef enum { lvlError = 0, @@ -54,20 +53,26 @@ typedef enum { lvlVomit } Verbosity; -// the lines of code surrounding an error. +/** + * The lines of code surrounding an error. + */ struct LinesOfCode { std::optional prevLineOfCode; std::optional errLineOfCode; std::optional nextLineOfCode; }; -/* An abstract type that represents a location in a source file. */ +/** + * An abstract type that represents a location in a source file. + */ struct AbstractPos { uint32_t line = 0; uint32_t column = 0; - /* Return the contents of the source file. */ + /** + * Return the contents of the source file. + */ virtual std::optional getSource() const { return std::nullopt; }; @@ -104,8 +109,10 @@ struct ErrorInfo { std::ostream & showErrorInfo(std::ostream & out, const ErrorInfo & einfo, bool showTrace); -/* BaseError should generally not be caught, as it has Interrupted as - a subclass. Catch Error instead. */ +/** + * BaseError should generally not be caught, as it has Interrupted as + * a subclass. Catch Error instead. + */ class BaseError : public std::exception { protected: diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 58d762ebb..bd1899662 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -5,29 +5,219 @@ namespace nix { -std::map stringifiedXpFeatures = { - { Xp::CaDerivations, "ca-derivations" }, - { Xp::ImpureDerivations, "impure-derivations" }, - { Xp::Flakes, "flakes" }, - { Xp::NixCommand, "nix-command" }, - { Xp::RecursiveNix, "recursive-nix" }, - { Xp::NoUrlLiterals, "no-url-literals" }, - { Xp::FetchClosure, "fetch-closure" }, - { Xp::ReplFlake, "repl-flake" }, - { Xp::AutoAllocateUids, "auto-allocate-uids" }, - { Xp::Cgroups, "cgroups" }, - { Xp::DiscardReferences, "discard-references" }, +struct ExperimentalFeatureDetails +{ + ExperimentalFeature tag; + std::string_view name; + std::string_view description; }; +constexpr std::array xpFeatureDetails = {{ + { + .tag = Xp::CaDerivations, + .name = "ca-derivations", + .description = R"( + Allow derivations to be content-addressed in order to prevent + rebuilds when changes to the derivation do not result in changes to + the derivation's output. See + [__contentAddressed](@docroot@/language/advanced-attributes.md#adv-attr-__contentAddressed) + for details. + )", + }, + { + .tag = Xp::ImpureDerivations, + .name = "impure-derivations", + .description = R"( + Allow derivations to produce non-fixed outputs by setting the + `__impure` derivation attribute to `true`. An impure derivation can + have differing outputs each time it is built. + + Example: + + ``` + derivation { + name = "impure"; + builder = /bin/sh; + __impure = true; # mark this derivation as impure + args = [ "-c" "read -n 10 random < /dev/random; echo $random > $out" ]; + system = builtins.currentSystem; + } + ``` + + Each time this derivation is built, it can produce a different + output (as the builder outputs random bytes to `$out`). Impure + derivations also have access to the network, and only fixed-output + or other impure derivations can rely on impure derivations. Finally, + an impure derivation cannot also be + [content-addressed](#xp-feature-ca-derivations). + )", + }, + { + .tag = Xp::Flakes, + .name = "flakes", + .description = R"( + Enable flakes. See the manual entry for [`nix + flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. + )", + }, + { + .tag = Xp::NixCommand, + .name = "nix-command", + .description = R"( + Enable the new `nix` subcommands. See the manual on + [`nix`](@docroot@/command-ref/new-cli/nix.md) for details. + )", + }, + { + .tag = Xp::RecursiveNix, + .name = "recursive-nix", + .description = R"( + Allow derivation builders to call Nix, and thus build derivations + recursively. + + Example: + + ``` + with import {}; + + runCommand "foo" + { + buildInputs = [ nix jq ]; + NIX_PATH = "nixpkgs=${}"; + } + '' + hello=$(nix-build -E '(import {}).hello.overrideDerivation (args: { name = "recursive-hello"; })') + + mkdir -p $out/bin + ln -s $hello/bin/hello $out/bin/hello + '' + ``` + + An important restriction on recursive builders is disallowing + arbitrary substitutions. For example, running + + ``` + nix-store -r /nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10 + ``` + + in the above `runCommand` script would be disallowed, as this could + lead to derivations with hidden dependencies or breaking + reproducibility by relying on the current state of the Nix store. An + exception would be if + `/nix/store/kmwd1hq55akdb9sc7l3finr175dajlby-hello-2.10` were + already in the build inputs or built by a previous recursive Nix + call. + )", + }, + { + .tag = Xp::NoUrlLiterals, + .name = "no-url-literals", + .description = R"( + Disallow unquoted URLs as part of the Nix language syntax. The Nix + language allows for URL literals, like so: + + ``` + $ nix repl + Welcome to Nix 2.15.0. Type :? for help. + + nix-repl> http://foo + "http://foo" + ``` + + But enabling this experimental feature will cause the Nix parser to + throw an error when encountering a URL literal: + + ``` + $ nix repl --extra-experimental-features 'no-url-literals' + Welcome to Nix 2.15.0. Type :? for help. + + nix-repl> http://foo + error: URL literals are disabled + + at «string»:1:1: + + 1| http://foo + | ^ + + ``` + + While this is currently an experimental feature, unquoted URLs are + being deprecated and their usage is discouraged. + + The reason is that, as opposed to path literals, URLs have no + special properties that distinguish them from regular strings, URLs + containing parameters have to be quoted anyway, and unquoted URLs + may confuse external tooling. + )", + }, + { + .tag = Xp::FetchClosure, + .name = "fetch-closure", + .description = R"( + Enable the use of the [`fetchClosure`](@docroot@/language/builtins.md#builtins-fetchClosure) built-in function in the Nix language. + )", + }, + { + .tag = Xp::ReplFlake, + .name = "repl-flake", + .description = R"( + Allow passing [installables](@docroot@/command-ref/new-cli/nix.md#installables) to `nix repl`, making its interface consistent with the other experimental commands. + )", + }, + { + .tag = Xp::AutoAllocateUids, + .name = "auto-allocate-uids", + .description = R"( + Allows Nix to automatically pick UIDs for builds, rather than creating + `nixbld*` user accounts. See the [`auto-allocate-uids`](#conf-auto-allocate-uids) setting for details. + )", + }, + { + .tag = Xp::Cgroups, + .name = "cgroups", + .description = R"( + Allows Nix to execute builds inside cgroups. See + the [`use-cgroups`](#conf-use-cgroups) setting for details. + )", + }, + { + .tag = Xp::DiscardReferences, + .name = "discard-references", + .description = R"( + Allow the use of the [`unsafeDiscardReferences`](@docroot@/language/advanced-attributes.html#adv-attr-unsafeDiscardReferences) attribute in derivations + that use [structured attributes](@docroot@/language/advanced-attributes.html#adv-attr-structuredAttrs). This disables scanning of outputs for + runtime dependencies. + )", + }, + { + .tag = Xp::DaemonTrustOverride, + .name = "daemon-trust-override", + .description = R"( + Allow forcing trusting or not trusting clients with + `nix-daemon`. This is useful for testing, but possibly also + useful for various experiments with `nix-daemon --stdio` + networking. + )", + }, +}}; + +static_assert( + []() constexpr { + for (auto [index, feature] : enumerate(xpFeatureDetails)) + if (index != (size_t)feature.tag) + return false; + return true; + }(), + "array order does not match enum tag order"); + const std::optional parseExperimentalFeature(const std::string_view & name) { using ReverseXpMap = std::map; - static auto reverseXpMap = []() - { + static std::unique_ptr reverseXpMap = []() { auto reverseXpMap = std::make_unique(); - for (auto & [feature, name] : stringifiedXpFeatures) - (*reverseXpMap)[name] = feature; + for (auto & xpFeature : xpFeatureDetails) + (*reverseXpMap)[xpFeature.name] = xpFeature.tag; return reverseXpMap; }(); @@ -37,20 +227,27 @@ const std::optional parseExperimentalFeature(const std::str return std::nullopt; } -std::string_view showExperimentalFeature(const ExperimentalFeature feature) +std::string_view showExperimentalFeature(const ExperimentalFeature tag) { - const auto ret = get(stringifiedXpFeatures, feature); - assert(ret); - return *ret; + assert((size_t)tag < xpFeatureDetails.size()); + return xpFeatureDetails[(size_t)tag].name; +} + +nlohmann::json documentExperimentalFeatures() +{ + StringMap res; + for (auto & xpFeature : xpFeatureDetails) + res[std::string { xpFeature.name }] = + trim(stripIndentation(xpFeature.description)); + return (nlohmann::json) res; } std::set parseFeatures(const std::set & rawFeatures) { std::set res; - for (auto & rawFeature : rawFeatures) { + for (auto & rawFeature : rawFeatures) if (auto feature = parseExperimentalFeature(rawFeature)) res.insert(*feature); - } return res; } diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh index ac372e03e..3c00bc4e5 100644 --- a/src/libutil/experimental-features.hh +++ b/src/libutil/experimental-features.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "comparator.hh" #include "error.hh" @@ -10,9 +11,10 @@ namespace nix { /** * The list of available experimental features. * - * If you update this, don’t forget to also change the map defining their - * string representation in the corresponding `.cc` file. - **/ + * If you update this, don’t forget to also change the map defining + * their string representation and documentation in the corresponding + * `.cc` file as well. + */ enum struct ExperimentalFeature { CaDerivations, @@ -26,6 +28,7 @@ enum struct ExperimentalFeature AutoAllocateUids, Cgroups, DiscardReferences, + DaemonTrustOverride, }; /** @@ -33,26 +36,52 @@ enum struct ExperimentalFeature */ using Xp = ExperimentalFeature; +/** + * Parse an experimental feature (enum value) from its name. Experimental + * feature flag names are hyphenated and do not contain spaces. + */ const std::optional parseExperimentalFeature( const std::string_view & name); + +/** + * Show the name of an experimental feature. This is the opposite of + * parseExperimentalFeature(). + */ std::string_view showExperimentalFeature(const ExperimentalFeature); +/** + * Compute the documentation of all experimental features. + * + * See `doc/manual` for how this information is used. + */ +nlohmann::json documentExperimentalFeatures(); + +/** + * Shorthand for `str << showExperimentalFeature(feature)`. + */ std::ostream & operator<<( std::ostream & str, const ExperimentalFeature & feature); /** - * Parse a set of strings to the corresponding set of experimental features, - * ignoring (but warning for) any unkwown feature. + * Parse a set of strings to the corresponding set of experimental + * features, ignoring (but warning for) any unknown feature. */ std::set parseFeatures(const std::set &); +/** + * An experimental feature was required for some (experimental) + * operation, but was not enabled. + */ class MissingExperimentalFeature : public Error { public: + /** + * The experimental feature that was required but not enabled. + */ ExperimentalFeature missingFeature; - MissingExperimentalFeature(ExperimentalFeature); + MissingExperimentalFeature(ExperimentalFeature missingFeature); }; /** diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc index 3a732cff8..56be76ecc 100644 --- a/src/libutil/filesystem.cc +++ b/src/libutil/filesystem.cc @@ -15,9 +15,9 @@ static Path tempName(Path tmpRoot, const Path & prefix, bool includePid, { tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); if (includePid) - return (format("%1%/%2%-%3%-%4%") % tmpRoot % prefix % getpid() % counter++).str(); + return fmt("%1%/%2%-%3%-%4%", tmpRoot, prefix, getpid(), counter++); else - return (format("%1%/%2%-%3%") % tmpRoot % prefix % counter++).str(); + return fmt("%1%/%2%-%3%", tmpRoot, prefix, counter++); } Path createTempDir(const Path & tmpRoot, const Path & prefix, diff --git a/src/libutil/finally.hh b/src/libutil/finally.hh index dee2e8d2f..db654301f 100644 --- a/src/libutil/finally.hh +++ b/src/libutil/finally.hh @@ -1,6 +1,9 @@ #pragma once +///@file -/* A trivial class to run a function at the end of a scope. */ +/** + * A trivial class to run a function at the end of a scope. + */ template class Finally { diff --git a/src/libutil/fmt.hh b/src/libutil/fmt.hh index e879fd3b8..727255b45 100644 --- a/src/libutil/fmt.hh +++ b/src/libutil/fmt.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -8,30 +9,25 @@ namespace nix { -/* Inherit some names from other namespaces for convenience. */ +/** + * Inherit some names from other namespaces for convenience. + */ using boost::format; -/* A variadic template that does nothing. Useful to call a function - for all variadic arguments but ignoring the result. */ +/** + * A variadic template that does nothing. Useful to call a function + * for all variadic arguments but ignoring the result. + */ struct nop { template nop(T...) {} }; -struct FormatOrString -{ - std::string s; - FormatOrString(std::string s) : s(std::move(s)) { }; - template - FormatOrString(const F & f) : s(f.str()) { }; - FormatOrString(const char * s) : s(s) { }; -}; - - -/* A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is - equivalent to ‘boost::format(format) % a_0 % ... % - ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion - takes place). */ - +/** + * A helper for formatting strings. ‘fmt(format, a_0, ..., a_n)’ is + * equivalent to ‘boost::format(format) % a_0 % ... % + * ... a_n’. However, ‘fmt(s)’ is equivalent to ‘s’ (so no %-expansion + * takes place). + */ template inline void formatHelper(F & f) { @@ -53,11 +49,6 @@ inline std::string fmt(const char * s) return s; } -inline std::string fmt(const FormatOrString & fs) -{ - return fs.s; -} - template inline std::string fmt(const std::string & fs, const Args & ... args) { diff --git a/src/libutil/git.hh b/src/libutil/git.hh index cb13ef0e5..bf2b9a286 100644 --- a/src/libutil/git.hh +++ b/src/libutil/git.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -8,21 +9,23 @@ namespace nix { namespace git { -// A line from the output of `git ls-remote --symref`. -// -// These can be of two kinds: -// -// - Symbolic references of the form -// -// ref: {target} {reference} -// -// where {target} is itself a reference and {reference} is optional -// -// - Object references of the form -// -// {target} {reference} -// -// where {target} is a commit id and {reference} is mandatory +/** + * A line from the output of `git ls-remote --symref`. + * + * These can be of two kinds: + * + * - Symbolic references of the form + * + * ref: {target} {reference} + * + * where {target} is itself a reference and {reference} is optional + * + * - Object references of the form + * + * {target} {reference} + * + * where {target} is a commit id and {reference} is mandatory + */ struct LsRemoteRefLine { enum struct Kind { Symbolic, diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index d2fd0c15a..2c36d9d94 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -1,6 +1,7 @@ #include #include +#include #include #include @@ -16,7 +17,6 @@ namespace nix { - static size_t regularHashSize(HashType type) { switch (type) { case htMD5: return md5HashSize; @@ -71,12 +71,13 @@ const std::string base16Chars = "0123456789abcdef"; static std::string printHash16(const Hash & hash) { - char buf[hash.hashSize * 2]; + std::string buf; + buf.reserve(hash.hashSize * 2); for (unsigned int i = 0; i < hash.hashSize; i++) { - buf[i * 2] = base16Chars[hash.hash[i] >> 4]; - buf[i * 2 + 1] = base16Chars[hash.hash[i] & 0x0f]; + buf.push_back(base16Chars[hash.hash[i] >> 4]); + buf.push_back(base16Chars[hash.hash[i] & 0x0f]); } - return std::string(buf, hash.hashSize * 2); + return buf; } @@ -130,7 +131,7 @@ std::string Hash::to_string(Base base, bool includeType) const break; case Base64: case SRI: - s += base64Encode(std::string((const char *) hash, hashSize)); + s += base64Encode(std::string_view((const char *) hash, hashSize)); break; } return s; @@ -342,7 +343,7 @@ HashSink::~HashSink() delete ctx; } -void HashSink::write(std::string_view data) +void HashSink::writeUnbuffered(std::string_view data) { bytes += data.size(); update(ht, *ctx, data); @@ -403,7 +404,7 @@ HashType parseHashType(std::string_view s) throw UsageError("unknown hash algorithm '%1%'", s); } -std::string printHashType(HashType ht) +std::string_view printHashType(HashType ht) { switch (ht) { case htMD5: return "md5"; diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index 00f70a572..ae3ee40f4 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "serialise.hh" @@ -33,62 +34,86 @@ struct Hash HashType type; - /* Create a zero-filled hash object. */ + /** + * Create a zero-filled hash object. + */ Hash(HashType type); - /* Parse the hash from a string representation in the format - "[:]" or "-" (a - Subresource Integrity hash expression). If the 'type' argument - is not present, then the hash type must be specified in the - string. */ + /** + * Parse the hash from a string representation in the format + * "[:]" or "-" (a + * Subresource Integrity hash expression). If the 'type' argument + * is not present, then the hash type must be specified in the + * string. + */ static Hash parseAny(std::string_view s, std::optional type); - /* Parse a hash from a string representation like the above, except the - type prefix is mandatory is there is no separate arguement. */ + /** + * Parse a hash from a string representation like the above, except the + * type prefix is mandatory is there is no separate arguement. + */ static Hash parseAnyPrefixed(std::string_view s); - /* Parse a plain hash that musst not have any prefix indicating the type. - The type is passed in to disambiguate. */ + /** + * Parse a plain hash that musst not have any prefix indicating the type. + * The type is passed in to disambiguate. + */ static Hash parseNonSRIUnprefixed(std::string_view s, HashType type); static Hash parseSRI(std::string_view original); private: - /* The type must be provided, the string view must not include - prefix. `isSRI` helps disambigate the various base-* encodings. */ + /** + * The type must be provided, the string view must not include + * prefix. `isSRI` helps disambigate the various base-* encodings. + */ Hash(std::string_view s, HashType type, bool isSRI); public: - /* Check whether two hash are equal. */ + /** + * Check whether two hash are equal. + */ bool operator == (const Hash & h2) const; - /* Check whether two hash are not equal. */ + /** + * Check whether two hash are not equal. + */ bool operator != (const Hash & h2) const; - /* For sorting. */ + /** + * For sorting. + */ bool operator < (const Hash & h) const; - /* Returns the length of a base-16 representation of this hash. */ + /** + * Returns the length of a base-16 representation of this hash. + */ size_t base16Len() const { return hashSize * 2; } - /* Returns the length of a base-32 representation of this hash. */ + /** + * Returns the length of a base-32 representation of this hash. + */ size_t base32Len() const { return (hashSize * 8 - 1) / 5 + 1; } - /* Returns the length of a base-64 representation of this hash. */ + /** + * Returns the length of a base-64 representation of this hash. + */ size_t base64Len() const { return ((4 * hashSize / 3) + 3) & ~3; } - /* Return a string representation of the hash, in base-16, base-32 - or base-64. By default, this is prefixed by the hash type - (e.g. "sha256:"). */ + /** + * Return a string representation of the hash, in base-16, base-32 + * or base-64. By default, this is prefixed by the hash type + * (e.g. "sha256:"). + */ std::string to_string(Base base, bool includeType) const; std::string gitRev() const @@ -104,36 +129,54 @@ public: static Hash dummy; }; -/* Helper that defaults empty hashes to the 0 hash. */ +/** + * Helper that defaults empty hashes to the 0 hash. + */ Hash newHashAllowEmpty(std::string_view hashStr, std::optional ht); -/* Print a hash in base-16 if it's MD5, or base-32 otherwise. */ +/** + * Print a hash in base-16 if it's MD5, or base-32 otherwise. + */ std::string printHash16or32(const Hash & hash); -/* Compute the hash of the given string. */ +/** + * Compute the hash of the given string. + */ Hash hashString(HashType ht, std::string_view s); -/* Compute the hash of the given file. */ +/** + * Compute the hash of the given file. + */ Hash hashFile(HashType ht, const Path & path); -/* Compute the hash of the given path. The hash is defined as - (essentially) hashString(ht, dumpPath(path)). */ +/** + * Compute the hash of the given path. The hash is defined as + * (essentially) hashString(ht, dumpPath(path)). + */ typedef std::pair HashResult; HashResult hashPath(HashType ht, const Path & path, PathFilter & filter = defaultPathFilter); -/* Compress a hash to the specified number of bytes by cyclically - XORing bytes together. */ +/** + * Compress a hash to the specified number of bytes by cyclically + * XORing bytes together. + */ Hash compressHash(const Hash & hash, unsigned int newSize); -/* Parse a string representing a hash type. */ +/** + * Parse a string representing a hash type. + */ HashType parseHashType(std::string_view s); -/* Will return nothing on parse error */ +/** + * Will return nothing on parse error + */ std::optional parseHashTypeOpt(std::string_view s); -/* And the reverse. */ -std::string printHashType(HashType ht); +/** + * And the reverse. + */ +std::string_view printHashType(HashType ht); union Ctx; @@ -154,7 +197,7 @@ public: HashSink(HashType ht); HashSink(const HashSink & h); ~HashSink(); - void write(std::string_view data) override; + void writeUnbuffered(std::string_view data) override; HashResult finish() override; HashResult currentHash(); }; diff --git a/src/libutil/hilite.hh b/src/libutil/hilite.hh index f8bdbfc55..2d5cf7c6f 100644 --- a/src/libutil/hilite.hh +++ b/src/libutil/hilite.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -6,11 +7,13 @@ namespace nix { -/* Highlight all the given matches in the given string `s` by wrapping - them between `prefix` and `postfix`. - - If some matches overlap, then their union will be wrapped rather - than the individual matches. */ +/** + * Highlight all the given matches in the given string `s` by wrapping + * them between `prefix` and `postfix`. + * + * If some matches overlap, then their union will be wrapped rather + * than the individual matches. + */ std::string hiliteMatches( std::string_view s, std::vector matches, diff --git a/src/libutil/json-impls.hh b/src/libutil/json-impls.hh index bd75748ad..b26163a04 100644 --- a/src/libutil/json-impls.hh +++ b/src/libutil/json-impls.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "nlohmann/json_fwd.hpp" diff --git a/src/libutil/json-utils.hh b/src/libutil/json-utils.hh index b8a031227..eb00e954f 100644 --- a/src/libutil/json-utils.hh +++ b/src/libutil/json-utils.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 904ba6ebe..5a2dd99af 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -32,7 +32,8 @@ void Logger::warn(const std::string & msg) void Logger::writeToStdout(std::string_view s) { - std::cout << s << "\n"; + writeFull(STDOUT_FILENO, s); + writeFull(STDOUT_FILENO, "\n"); } class SimpleLogger : public Logger @@ -53,7 +54,7 @@ public: return printBuildLogs; } - void log(Verbosity lvl, const FormatOrString & fs) override + void log(Verbosity lvl, std::string_view s) override { if (lvl > verbosity) return; @@ -64,14 +65,15 @@ public: switch (lvl) { case lvlError: c = '3'; break; case lvlWarn: c = '4'; break; - case lvlInfo: c = '5'; break; + case lvlNotice: case lvlInfo: c = '5'; break; case lvlTalkative: case lvlChatty: c = '6'; break; - default: c = '7'; + case lvlDebug: case lvlVomit: c = '7'; + default: c = '7'; break; // should not happen, and missing enum case is reported by -Werror=switch-enum } prefix = std::string("<") + c + ">"; } - writeToStderr(prefix + filterANSIEscapes(fs.s, !tty) + "\n"); + writeToStderr(prefix + filterANSIEscapes(s, !tty) + "\n"); } void logEI(const ErrorInfo & ei) override @@ -84,7 +86,7 @@ public: void startActivity(ActivityId act, Verbosity lvl, ActivityType type, const std::string & s, const Fields & fields, ActivityId parent) - override + override { if (lvl <= verbosity && !s.empty()) log(lvl, s + "..."); @@ -173,12 +175,12 @@ struct JSONLogger : Logger { prevLogger.log(lvlError, "@nix " + json.dump(-1, ' ', false, nlohmann::json::error_handler_t::replace)); } - void log(Verbosity lvl, const FormatOrString & fs) override + void log(Verbosity lvl, std::string_view s) override { nlohmann::json json; json["action"] = "msg"; json["level"] = lvl; - json["msg"] = fs.s; + json["msg"] = s; write(json); } diff --git a/src/libutil/logging.hh b/src/libutil/logging.hh index 4642c49f7..5aa6bee95 100644 --- a/src/libutil/logging.hh +++ b/src/libutil/logging.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "error.hh" @@ -72,14 +73,17 @@ public: virtual void stop() { }; + virtual void pause() { }; + virtual void resume() { }; + // Whether the logger prints the whole build log virtual bool isVerbose() { return false; } - virtual void log(Verbosity lvl, const FormatOrString & fs) = 0; + virtual void log(Verbosity lvl, std::string_view s) = 0; - void log(const FormatOrString & fs) + void log(std::string_view s) { - log(lvlInfo, fs); + log(lvlInfo, s); } virtual void logEI(const ErrorInfo & ei) = 0; @@ -102,11 +106,9 @@ public: virtual void writeToStdout(std::string_view s); template - inline void cout(const std::string & fs, const Args & ... args) + inline void cout(const Args & ... args) { - boost::format f(fs); - formatHelper(f, args...); - writeToStdout(f.str()); + writeToStdout(fmt(args...)); } virtual std::optional ask(std::string_view s) @@ -181,12 +183,17 @@ bool handleJSONLogMessage(const std::string & msg, const Activity & act, std::map & activities, bool trusted); -extern Verbosity verbosity; /* suppress msgs > this */ +/** + * suppress msgs > this + */ +extern Verbosity verbosity; -/* Print a message with the standard ErrorInfo format. - In general, use these 'log' macros for reporting problems that may require user - intervention or that need more explanation. Use the 'print' macros for more - lightweight status messages. */ +/** + * Print a message with the standard ErrorInfo format. + * In general, use these 'log' macros for reporting problems that may require user + * intervention or that need more explanation. Use the 'print' macros for more + * lightweight status messages. + */ #define logErrorInfo(level, errorInfo...) \ do { \ if ((level) <= nix::verbosity) { \ @@ -197,9 +204,11 @@ extern Verbosity verbosity; /* suppress msgs > this */ #define logError(errorInfo...) logErrorInfo(lvlError, errorInfo) #define logWarning(errorInfo...) logErrorInfo(lvlWarn, errorInfo) -/* Print a string message if the current log level is at least the specified - level. Note that this has to be implemented as a macro to ensure that the - arguments are evaluated lazily. */ +/** + * Print a string message if the current log level is at least the specified + * level. Note that this has to be implemented as a macro to ensure that the + * arguments are evaluated lazily. + */ #define printMsgUsing(loggerParam, level, args...) \ do { \ auto __lvl = level; \ @@ -216,7 +225,9 @@ extern Verbosity verbosity; /* suppress msgs > this */ #define debug(args...) printMsg(lvlDebug, args) #define vomit(args...) printMsg(lvlVomit, args) -/* if verbosity >= lvlWarn, print a message with a yellow 'warning:' prefix. */ +/** + * if verbosity >= lvlWarn, print a message with a yellow 'warning:' prefix. + */ template inline void warn(const std::string & fs, const Args & ... args) { diff --git a/src/libutil/lru-cache.hh b/src/libutil/lru-cache.hh index 6ef4a3e06..0e19517ed 100644 --- a/src/libutil/lru-cache.hh +++ b/src/libutil/lru-cache.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -7,7 +8,9 @@ namespace nix { -/* A simple least-recently used cache. Not thread-safe. */ +/** + * A simple least-recently used cache. Not thread-safe. + */ template class LRUCache { @@ -31,7 +34,9 @@ public: LRUCache(size_t capacity) : capacity(capacity) { } - /* Insert or upsert an item in the cache. */ + /** + * Insert or upsert an item in the cache. + */ void upsert(const Key & key, const Value & value) { if (capacity == 0) return; @@ -39,7 +44,9 @@ public: erase(key); if (data.size() >= capacity) { - /* Retire the oldest item. */ + /** + * Retire the oldest item. + */ auto oldest = lru.begin(); data.erase(*oldest); lru.erase(oldest); @@ -63,14 +70,18 @@ public: return true; } - /* Look up an item in the cache. If it exists, it becomes the most - recently used item. */ + /** + * Look up an item in the cache. If it exists, it becomes the most + * recently used item. + * */ std::optional get(const Key & key) { auto i = data.find(key); if (i == data.end()) return {}; - /* Move this item to the back of the LRU list. */ + /** + * Move this item to the back of the LRU list. + */ lru.erase(i->second.first.it); auto j = lru.insert(lru.end(), i); i->second.first.it = j; diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh index 9518cf8aa..86d0115fc 100644 --- a/src/libutil/monitor-fd.hh +++ b/src/libutil/monitor-fd.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include diff --git a/src/libutil/namespaces.hh b/src/libutil/namespaces.hh index e82379b9c..0b7eeb66c 100644 --- a/src/libutil/namespaces.hh +++ b/src/libutil/namespaces.hh @@ -1,4 +1,5 @@ #pragma once +///@file namespace nix { diff --git a/src/libutil/pool.hh b/src/libutil/pool.hh index d49067bb9..6247b6125 100644 --- a/src/libutil/pool.hh +++ b/src/libutil/pool.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -11,33 +12,37 @@ namespace nix { -/* This template class implements a simple pool manager of resources - of some type R, such as database connections. It is used as - follows: - - class Connection { ... }; - - Pool pool; - - { - auto conn(pool.get()); - conn->exec("select ..."); - } - - Here, the Connection object referenced by ‘conn’ is automatically - returned to the pool when ‘conn’ goes out of scope. -*/ - +/** + * This template class implements a simple pool manager of resources + * of some type R, such as database connections. It is used as + * follows: + * + * class Connection { ... }; + * + * Pool pool; + * + * { + * auto conn(pool.get()); + * conn->exec("select ..."); + * } + * + * Here, the Connection object referenced by ‘conn’ is automatically + * returned to the pool when ‘conn’ goes out of scope. + */ template class Pool { public: - /* A function that produces new instances of R on demand. */ + /** + * A function that produces new instances of R on demand. + */ typedef std::function()> Factory; - /* A function that checks whether an instance of R is still - usable. Unusable instances are removed from the pool. */ + /** + * A function that checks whether an instance of R is still + * usable. Unusable instances are removed from the pool. + */ typedef std::function &)> Validator; private: diff --git a/src/libutil/ref.hh b/src/libutil/ref.hh index 7d38b059c..af5f8304c 100644 --- a/src/libutil/ref.hh +++ b/src/libutil/ref.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -6,8 +7,10 @@ namespace nix { -/* A simple non-nullable reference-counted pointer. Actually a wrapper - around std::shared_ptr that prevents null constructions. */ +/** + * A simple non-nullable reference-counted pointer. Actually a wrapper + * around std::shared_ptr that prevents null constructions. + */ template class ref { diff --git a/src/libutil/regex-combinators.hh b/src/libutil/regex-combinators.hh index 0b997b25a..87d6aa678 100644 --- a/src/libutil/regex-combinators.hh +++ b/src/libutil/regex-combinators.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index c653db9d0..3d5121a19 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -20,7 +20,7 @@ void BufferedSink::operator () (std::string_view data) buffer size. */ if (bufPos + data.size() >= bufSize) { flush(); - write(data); + writeUnbuffered(data); break; } /* Otherwise, copy the bytes to the buffer. Flush the buffer @@ -38,7 +38,7 @@ void BufferedSink::flush() if (bufPos == 0) return; size_t n = bufPos; bufPos = 0; // don't trigger the assert() in ~BufferedSink() - write({buffer.get(), n}); + writeUnbuffered({buffer.get(), n}); } @@ -48,7 +48,7 @@ FdSink::~FdSink() } -void FdSink::write(std::string_view data) +void FdSink::writeUnbuffered(std::string_view data) { written += data.size(); try { @@ -186,6 +186,22 @@ static DefaultStackAllocator defaultAllocatorSingleton; StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton; +std::shared_ptr (*create_coro_gc_hook)() = []() -> std::shared_ptr { + return {}; +}; + +/* This class is used for entry and exit hooks on coroutines */ +class CoroutineContext { + /* Disable GC when entering the coroutine without the boehm patch, + * since it doesn't find the main thread stack in this case. + * std::shared_ptr performs type-erasure, so it will call the right + * deleter. */ + const std::shared_ptr coro_gc_hook = create_coro_gc_hook(); +public: + CoroutineContext() {}; + ~CoroutineContext() {}; +}; + std::unique_ptr sourceToSink(std::function fun) { struct SourceToSink : FinishSink @@ -206,7 +222,8 @@ std::unique_ptr sourceToSink(std::function fun) if (in.empty()) return; cur = in; - if (!coro) + if (!coro) { + CoroutineContext ctx; coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) { LambdaSource source([&](char *out, size_t out_len) { if (cur.empty()) { @@ -223,17 +240,24 @@ std::unique_ptr sourceToSink(std::function fun) }); fun(source); }); + } if (!*coro) { abort(); } - if (!cur.empty()) (*coro)(false); + if (!cur.empty()) { + CoroutineContext ctx; + (*coro)(false); + } } void finish() override { if (!coro) return; if (!*coro) abort(); - (*coro)(true); + { + CoroutineContext ctx; + (*coro)(true); + } if (*coro) abort(); } }; @@ -264,18 +288,23 @@ std::unique_ptr sinkToSource( size_t read(char * data, size_t len) override { - if (!coro) + if (!coro) { + CoroutineContext ctx; coro = coro_t::pull_type(VirtualStackAllocator{}, [&](coro_t::push_type & yield) { LambdaSink sink([&](std::string_view data) { if (!data.empty()) yield(std::string(data)); }); fun(sink); }); + } if (!*coro) { eof(); abort(); } if (pos == cur.size()) { - if (!cur.empty()) (*coro)(); + if (!cur.empty()) { + CoroutineContext ctx; + (*coro)(); + } cur = coro->get(); pos = 0; } @@ -415,7 +444,7 @@ Error readError(Source & source) auto msg = readString(source); ErrorInfo info { .level = level, - .msg = hintformat(std::move(format("%s") % msg)), + .msg = hintformat(fmt("%s", msg)), }; auto havePos = readNum(source); assert(havePos == 0); @@ -424,7 +453,7 @@ Error readError(Source & source) havePos = readNum(source); assert(havePos == 0); info.traces.push_back(Trace { - .hint = hintformat(std::move(format("%s") % readString(source))) + .hint = hintformat(fmt("%s", readString(source))) }); } return Error(std::move(info)); diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 7da5b07fd..333c254ea 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include @@ -10,7 +11,9 @@ namespace boost::context { struct stack_context; } namespace nix { -/* Abstract destination of binary data. */ +/** + * Abstract destination of binary data. + */ struct Sink { virtual ~Sink() { } @@ -18,7 +21,9 @@ struct Sink virtual bool good() { return true; } }; -/* Just throws away data. */ +/** + * Just throws away data. + */ struct NullSink : Sink { void operator () (std::string_view data) override @@ -32,8 +37,10 @@ struct FinishSink : virtual Sink }; -/* A buffered abstract sink. Warning: a BufferedSink should not be - used from multiple threads concurrently. */ +/** + * A buffered abstract sink. Warning: a BufferedSink should not be + * used from multiple threads concurrently. + */ struct BufferedSink : virtual Sink { size_t bufSize, bufPos; @@ -46,23 +53,31 @@ struct BufferedSink : virtual Sink void flush(); - virtual void write(std::string_view data) = 0; +protected: + + virtual void writeUnbuffered(std::string_view data) = 0; }; -/* Abstract source of binary data. */ +/** + * Abstract source of binary data. + */ struct Source { virtual ~Source() { } - /* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’. - It blocks until all the requested data is available, or throws - an error if it is not going to be available. */ + /** + * Store exactly ‘len’ bytes in the buffer pointed to by ‘data’. + * It blocks until all the requested data is available, or throws + * an error if it is not going to be available. + */ void operator () (char * data, size_t len); - /* Store up to ‘len’ in the buffer pointed to by ‘data’, and - return the number of bytes stored. It blocks until at least - one byte is available. */ + /** + * Store up to ‘len’ in the buffer pointed to by ‘data’, and + * return the number of bytes stored. It blocks until at least + * one byte is available. + */ virtual size_t read(char * data, size_t len) = 0; virtual bool good() { return true; } @@ -73,8 +88,10 @@ struct Source }; -/* A buffered abstract source. Warning: a BufferedSource should not be - used from multiple threads concurrently. */ +/** + * A buffered abstract source. Warning: a BufferedSource should not be + * used from multiple threads concurrently. + */ struct BufferedSource : Source { size_t bufSize, bufPosIn, bufPosOut; @@ -88,12 +105,16 @@ struct BufferedSource : Source bool hasData(); protected: - /* Underlying read call, to be overridden. */ + /** + * Underlying read call, to be overridden. + */ virtual size_t readUnbuffered(char * data, size_t len) = 0; }; -/* A sink that writes data to a file descriptor. */ +/** + * A sink that writes data to a file descriptor. + */ struct FdSink : BufferedSink { int fd; @@ -114,7 +135,7 @@ struct FdSink : BufferedSink ~FdSink(); - void write(std::string_view data) override; + void writeUnbuffered(std::string_view data) override; bool good() override; @@ -123,7 +144,9 @@ private: }; -/* A source that reads data from a file descriptor. */ +/** + * A source that reads data from a file descriptor. + */ struct FdSource : BufferedSource { int fd; @@ -149,7 +172,9 @@ private: }; -/* A sink that writes data to a string. */ +/** + * A sink that writes data to a string. + */ struct StringSink : Sink { std::string s; @@ -163,7 +188,9 @@ struct StringSink : Sink }; -/* A source that reads data from a string. */ +/** + * A source that reads data from a string. + */ struct StringSource : Source { std::string_view s; @@ -173,7 +200,9 @@ struct StringSource : Source }; -/* A sink that writes all incoming data to two other sinks. */ +/** + * A sink that writes all incoming data to two other sinks. + */ struct TeeSink : Sink { Sink & sink1, & sink2; @@ -186,7 +215,9 @@ struct TeeSink : Sink }; -/* Adapter class of a Source that saves all data read to a sink. */ +/** + * Adapter class of a Source that saves all data read to a sink. + */ struct TeeSource : Source { Source & orig; @@ -201,7 +232,9 @@ struct TeeSource : Source } }; -/* A reader that consumes the original Source until 'size'. */ +/** + * A reader that consumes the original Source until 'size'. + */ struct SizedSource : Source { Source & orig; @@ -219,7 +252,9 @@ struct SizedSource : Source return n; } - /* Consume the original source until no remain data is left to consume. */ + /** + * Consume the original source until no remain data is left to consume. + */ size_t drainAll() { std::vector buf(8192); @@ -232,7 +267,9 @@ struct SizedSource : Source } }; -/* A sink that that just counts the number of bytes given to it */ +/** + * A sink that that just counts the number of bytes given to it + */ struct LengthSink : Sink { uint64_t length = 0; @@ -243,7 +280,9 @@ struct LengthSink : Sink } }; -/* Convert a function into a sink. */ +/** + * Convert a function into a sink. + */ struct LambdaSink : Sink { typedef std::function lambda_t; @@ -259,7 +298,9 @@ struct LambdaSink : Sink }; -/* Convert a function into a source. */ +/** + * Convert a function into a source. + */ struct LambdaSource : Source { typedef std::function lambda_t; @@ -274,8 +315,10 @@ struct LambdaSource : Source } }; -/* Chain two sources together so after the first is exhausted, the second is - used */ +/** + * Chain two sources together so after the first is exhausted, the second is + * used + */ struct ChainSource : Source { Source & source1, & source2; @@ -289,8 +332,10 @@ struct ChainSource : Source std::unique_ptr sourceToSink(std::function fun); -/* Convert a function that feeds data into a Sink into a Source. The - Source executes the function as a coroutine. */ +/** + * Convert a function that feeds data into a Sink into a Source. The + * Source executes the function as a coroutine. + */ std::unique_ptr sinkToSource( std::function fun, std::function eof = []() { @@ -376,7 +421,9 @@ Source & operator >> (Source & in, bool & b) Error readError(Source & source); -/* An adapter that converts a std::basic_istream into a source. */ +/** + * An adapter that converts a std::basic_istream into a source. + */ struct StreamToSourceAdapter : Source { std::shared_ptr> istream; @@ -399,13 +446,14 @@ struct StreamToSourceAdapter : Source }; -/* A source that reads a distinct format of concatenated chunks back into its - logical form, in order to guarantee a known state to the original stream, - even in the event of errors. - - Use with FramedSink, which also allows the logical stream to be terminated - in the event of an exception. -*/ +/** + * A source that reads a distinct format of concatenated chunks back into its + * logical form, in order to guarantee a known state to the original stream, + * even in the event of errors. + * + * Use with FramedSink, which also allows the logical stream to be terminated + * in the event of an exception. + */ struct FramedSource : Source { Source & from; @@ -450,11 +498,12 @@ struct FramedSource : Source } }; -/* Write as chunks in the format expected by FramedSource. - - The exception_ptr reference can be used to terminate the stream when you - detect that an error has occurred on the remote end. -*/ +/** + * Write as chunks in the format expected by FramedSource. + * + * The exception_ptr reference can be used to terminate the stream when you + * detect that an error has occurred on the remote end. + */ struct FramedSink : nix::BufferedSink { BufferedSink & to; @@ -473,7 +522,7 @@ struct FramedSink : nix::BufferedSink } } - void write(std::string_view data) override + void writeUnbuffered(std::string_view data) override { /* Don't send more data if the remote has encountered an error. */ @@ -487,18 +536,27 @@ struct FramedSink : nix::BufferedSink }; }; -/* Stack allocation strategy for sinkToSource. - Mutable to avoid a boehm gc dependency in libutil. - - boost::context doesn't provide a virtual class, so we define our own. +/** + * Stack allocation strategy for sinkToSource. + * Mutable to avoid a boehm gc dependency in libutil. + * + * boost::context doesn't provide a virtual class, so we define our own. */ struct StackAllocator { virtual boost::context::stack_context allocate() = 0; virtual void deallocate(boost::context::stack_context sctx) = 0; - /* The stack allocator to use in sinkToSource and potentially elsewhere. - It is reassigned by the initGC() method in libexpr. */ + /** + * The stack allocator to use in sinkToSource and potentially elsewhere. + * It is reassigned by the initGC() method in libexpr. + */ static StackAllocator *defaultAllocator; }; +/* Disabling GC when entering a coroutine (without the boehm patch). + mutable to avoid boehm gc dependency in libutil. + */ +extern std::shared_ptr (*create_coro_gc_hook)(); + + } diff --git a/src/libutil/split.hh b/src/libutil/split.hh index 87a23b13e..3b9b2b83b 100644 --- a/src/libutil/split.hh +++ b/src/libutil/split.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -7,10 +8,12 @@ namespace nix { -// If `separator` is found, we return the portion of the string before the -// separator, and modify the string argument to contain only the part after the -// separator. Otherwise, we return `std::nullopt`, and we leave the argument -// string alone. +/** + * If `separator` is found, we return the portion of the string before the + * separator, and modify the string argument to contain only the part after the + * separator. Otherwise, we return `std::nullopt`, and we leave the argument + * string alone. + */ static inline std::optional splitPrefixTo(std::string_view & string, char separator) { auto sepInstance = string.find(separator); diff --git a/src/libutil/suggestions.hh b/src/libutil/suggestions.hh index d54dd8e31..9abf5ee5f 100644 --- a/src/libutil/suggestions.hh +++ b/src/libutil/suggestions.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "comparator.hh" #include "types.hh" @@ -13,7 +14,8 @@ int levenshteinDistance(std::string_view first, std::string_view second); */ class Suggestion { public: - int distance; // The smaller the better + /// The smaller the better + int distance; std::string suggestion; std::string to_string() const; @@ -43,7 +45,9 @@ public: std::ostream & operator<<(std::ostream & str, const Suggestion &); std::ostream & operator<<(std::ostream & str, const Suggestions &); -// Either a value of type `T`, or some suggestions +/** + * Either a value of type `T`, or some suggestions + */ template class OrSuggestions { public: diff --git a/src/libutil/sync.hh b/src/libutil/sync.hh index e1d591d77..47e4512b1 100644 --- a/src/libutil/sync.hh +++ b/src/libutil/sync.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -7,22 +8,22 @@ namespace nix { -/* This template class ensures synchronized access to a value of type - T. It is used as follows: - - struct Data { int x; ... }; - - Sync data; - - { - auto data_(data.lock()); - data_->x = 123; - } - - Here, "data" is automatically unlocked when "data_" goes out of - scope. -*/ - +/** + * This template class ensures synchronized access to a value of type + * T. It is used as follows: + * + * struct Data { int x; ... }; + * + * Sync data; + * + * { + * auto data_(data.lock()); + * data_->x = 123; + * } + * + * Here, "data" is automatically unlocked when "data_" goes out of + * scope. + */ template class Sync { diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index 238d0a7a6..5060a8f24 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -17,7 +17,7 @@ static ssize_t callback_read(struct archive * archive, void * _self, const void *buffer = self->buffer.data(); try { - return self->source->read((char *) self->buffer.data(), 4096); + return self->source->read((char *) self->buffer.data(), self->buffer.size()); } catch (EndOfFile &) { return 0; } catch (std::exception & err) { @@ -39,7 +39,7 @@ void TarArchive::check(int err, const std::string & reason) throw Error(reason, archive_error_string(this->archive)); } -TarArchive::TarArchive(Source & source, bool raw) : buffer(4096) +TarArchive::TarArchive(Source & source, bool raw) : buffer(65536) { this->archive = archive_read_new(); this->source = &source; diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh index 4d9141fd4..237d18c31 100644 --- a/src/libutil/tarfile.hh +++ b/src/libutil/tarfile.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "serialise.hh" #include @@ -14,13 +17,14 @@ struct TarArchive { TarArchive(const Path & path); - // disable copy constructor + /// disable copy constructor TarArchive(const TarArchive &) = delete; void close(); ~TarArchive(); }; + void unpackTarfile(Source & source, const Path & destDir); void unpackTarfile(const Path & tarFile, const Path & destDir); diff --git a/src/libutil/tests/canon-path.cc b/src/libutil/tests/canon-path.cc index c1c5adadf..fc94ccc3d 100644 --- a/src/libutil/tests/canon-path.cc +++ b/src/libutil/tests/canon-path.cc @@ -107,15 +107,13 @@ namespace nix { } TEST(CanonPath, within) { - { - ASSERT_TRUE(CanonPath("foo").isWithin(CanonPath("foo"))); - ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("bar"))); - ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("fo"))); - ASSERT_TRUE(CanonPath("foo/bar").isWithin(CanonPath("foo"))); - ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("foo/bar"))); - ASSERT_TRUE(CanonPath("/foo/bar/default.nix").isWithin(CanonPath("/"))); - ASSERT_TRUE(CanonPath("/").isWithin(CanonPath("/"))); - } + ASSERT_TRUE(CanonPath("foo").isWithin(CanonPath("foo"))); + ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("bar"))); + ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("fo"))); + ASSERT_TRUE(CanonPath("foo/bar").isWithin(CanonPath("foo"))); + ASSERT_FALSE(CanonPath("foo").isWithin(CanonPath("foo/bar"))); + ASSERT_TRUE(CanonPath("/foo/bar/default.nix").isWithin(CanonPath("/"))); + ASSERT_TRUE(CanonPath("/").isWithin(CanonPath("/"))); } TEST(CanonPath, sort) { @@ -127,29 +125,38 @@ namespace nix { } TEST(CanonPath, allowed) { - { - std::set allowed { - CanonPath("foo/bar"), - CanonPath("foo!"), - CanonPath("xyzzy"), - CanonPath("a/b/c"), - }; + std::set allowed { + CanonPath("foo/bar"), + CanonPath("foo!"), + CanonPath("xyzzy"), + CanonPath("a/b/c"), + }; - ASSERT_TRUE (CanonPath("foo/bar").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("foo/bar/bla").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("foo").isAllowed(allowed)); - ASSERT_FALSE(CanonPath("bar").isAllowed(allowed)); - ASSERT_FALSE(CanonPath("bar/a").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("a").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("a/b").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("a/b/c").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("a/b/c/d").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("a/b/c/d/e").isAllowed(allowed)); - ASSERT_FALSE(CanonPath("a/b/a").isAllowed(allowed)); - ASSERT_FALSE(CanonPath("a/b/d").isAllowed(allowed)); - ASSERT_FALSE(CanonPath("aaa").isAllowed(allowed)); - ASSERT_FALSE(CanonPath("zzz").isAllowed(allowed)); - ASSERT_TRUE (CanonPath("/").isAllowed(allowed)); - } + ASSERT_TRUE (CanonPath("foo/bar").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("foo/bar/bla").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("foo").isAllowed(allowed)); + ASSERT_FALSE(CanonPath("bar").isAllowed(allowed)); + ASSERT_FALSE(CanonPath("bar/a").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("a").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("a/b").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("a/b/c").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("a/b/c/d").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("a/b/c/d/e").isAllowed(allowed)); + ASSERT_FALSE(CanonPath("a/b/a").isAllowed(allowed)); + ASSERT_FALSE(CanonPath("a/b/d").isAllowed(allowed)); + ASSERT_FALSE(CanonPath("aaa").isAllowed(allowed)); + ASSERT_FALSE(CanonPath("zzz").isAllowed(allowed)); + ASSERT_TRUE (CanonPath("/").isAllowed(allowed)); + } + + TEST(CanonPath, makeRelative) { + CanonPath d("/foo/bar"); + ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar")), "."); + ASSERT_EQ(d.makeRelative(CanonPath("/foo")), ".."); + ASSERT_EQ(d.makeRelative(CanonPath("/")), "../.."); + ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar/xyzzy")), "xyzzy"); + ASSERT_EQ(d.makeRelative(CanonPath("/foo/bar/xyzzy/bla")), "xyzzy/bla"); + ASSERT_EQ(d.makeRelative(CanonPath("/foo/xyzzy/bla")), "../xyzzy/bla"); + ASSERT_EQ(d.makeRelative(CanonPath("/xyzzy/bla")), "../../xyzzy/bla"); } } diff --git a/src/libutil/tests/config.cc b/src/libutil/tests/config.cc index 8be6730dd..886e70da5 100644 --- a/src/libutil/tests/config.cc +++ b/src/libutil/tests/config.cc @@ -82,6 +82,7 @@ namespace nix { TestSetting() : AbstractSetting("test", "test", {}) {} void set(const std::string & value, bool append) override {} std::string to_string() const override { return {}; } + bool isAppendable() override { return false; } }; Config config; @@ -90,6 +91,7 @@ namespace nix { ASSERT_FALSE(config.set("test", "value")); config.addSetting(&setting); ASSERT_TRUE(config.set("test", "value")); + ASSERT_FALSE(config.set("extra-test", "value")); } TEST(Config, withInitialValue) { @@ -156,12 +158,54 @@ namespace nix { } TEST(Config, toJSONOnNonEmptyConfig) { + using nlohmann::literals::operator "" _json; Config config; - std::map settings; - Setting setting{&config, "", "name-of-the-setting", "description"}; + Setting setting{ + &config, + "", + "name-of-the-setting", + "description", + }; setting.assign("value"); - ASSERT_EQ(config.toJSON().dump(), R"#({"name-of-the-setting":{"aliases":[],"defaultValue":"","description":"description\n","documentDefault":true,"value":"value"}})#"); + ASSERT_EQ(config.toJSON(), + R"#({ + "name-of-the-setting": { + "aliases": [], + "defaultValue": "", + "description": "description\n", + "documentDefault": true, + "value": "value", + "experimentalFeature": null + } + })#"_json); + } + + TEST(Config, toJSONOnNonEmptyConfigWithExperimentalSetting) { + using nlohmann::literals::operator "" _json; + Config config; + Setting setting{ + &config, + "", + "name-of-the-setting", + "description", + {}, + true, + Xp::Flakes, + }; + setting.assign("value"); + + ASSERT_EQ(config.toJSON(), + R"#({ + "name-of-the-setting": { + "aliases": [], + "defaultValue": "", + "description": "description\n", + "documentDefault": true, + "value": "value", + "experimentalFeature": "flakes" + } + })#"_json); } TEST(Config, setSettingAlias) { diff --git a/src/libutil/tests/hash.hh b/src/libutil/tests/hash.hh index 9e9650e6e..1f9fa59ae 100644 --- a/src/libutil/tests/hash.hh +++ b/src/libutil/tests/hash.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include diff --git a/src/libutil/tests/url.cc b/src/libutil/tests/url.cc index e0c438b4d..a908631e6 100644 --- a/src/libutil/tests/url.cc +++ b/src/libutil/tests/url.cc @@ -302,4 +302,37 @@ namespace nix { ASSERT_EQ(d, s); } + + /* ---------------------------------------------------------------------------- + * percentEncode + * --------------------------------------------------------------------------*/ + + TEST(percentEncode, encodesUrlEncodedString) { + std::string s = percentEncode("==@=="); + std::string d = "%3D%3D%40%3D%3D"; + ASSERT_EQ(d, s); + } + + TEST(percentEncode, keepArgument) { + std::string a = percentEncode("abd / def"); + std::string b = percentEncode("abd / def", "/"); + ASSERT_EQ(a, "abd%20%2F%20def"); + ASSERT_EQ(b, "abd%20/%20def"); + } + + TEST(percentEncode, inverseOfDecode) { + std::string original = "%3D%3D%40%3D%3D"; + std::string once = percentEncode(original); + std::string back = percentDecode(once); + + ASSERT_EQ(back, original); + } + + TEST(percentEncode, trailingPercent) { + std::string s = percentEncode("==@==%"); + std::string d = "%3D%3D%40%3D%3D%25"; + + ASSERT_EQ(d, s); + } + } diff --git a/src/libutil/thread-pool.hh b/src/libutil/thread-pool.hh index b22e0d162..0e09fae97 100644 --- a/src/libutil/thread-pool.hh +++ b/src/libutil/thread-pool.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "sync.hh" #include "util.hh" @@ -13,8 +14,10 @@ namespace nix { MakeError(ThreadPoolShutDown, Error); -/* A simple thread pool that executes a queue of work items - (lambdas). */ +/** + * A simple thread pool that executes a queue of work items + * (lambdas). + */ class ThreadPool { public: @@ -23,19 +26,30 @@ public: ~ThreadPool(); - // FIXME: use std::packaged_task? + /** + * An individual work item. + * + * \todo use std::packaged_task? + */ typedef std::function work_t; - /* Enqueue a function to be executed by the thread pool. */ + /** + * Enqueue a function to be executed by the thread pool. + */ void enqueue(const work_t & t); - /* Execute work items until the queue is empty. Note that work - items are allowed to add new items to the queue; this is - handled correctly. Queue processing stops prematurely if any - work item throws an exception. This exception is propagated to - the calling thread. If multiple work items throw an exception - concurrently, only one item is propagated; the others are - printed on stderr and otherwise ignored. */ + /** + * Execute work items until the queue is empty. + * + * \note Note that work items are allowed to add new items to the + * queue; this is handled correctly. + * + * Queue processing stops prematurely if any work item throws an + * exception. This exception is propagated to the calling thread. If + * multiple work items throw an exception concurrently, only one + * item is propagated; the others are printed on stderr and + * otherwise ignored. + */ void process(); private: @@ -62,9 +76,11 @@ private: void shutdown(); }; -/* Process in parallel a set of items of type T that have a partial - ordering between them. Thus, any item is only processed after all - its dependencies have been processed. */ +/** + * Process in parallel a set of items of type T that have a partial + * ordering between them. Thus, any item is only processed after all + * its dependencies have been processed. + */ template void processGraph( ThreadPool & pool, diff --git a/src/libutil/topo-sort.hh b/src/libutil/topo-sort.hh index 7418be5e0..a52811fbf 100644 --- a/src/libutil/topo-sort.hh +++ b/src/libutil/topo-sort.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "error.hh" diff --git a/src/libutil/types.hh b/src/libutil/types.hh index 6bcbd7e1d..c86f52175 100644 --- a/src/libutil/types.hh +++ b/src/libutil/types.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "ref.hh" @@ -17,7 +18,9 @@ typedef std::set StringSet; typedef std::map StringMap; typedef std::map StringPairs; -/* Paths are just strings. */ +/** + * Paths are just strings. + */ typedef std::string Path; typedef std::string_view PathView; typedef std::list Paths; @@ -25,15 +28,19 @@ typedef std::set PathSet; typedef std::vector> Headers; -/* Helper class to run code at startup. */ +/** + * Helper class to run code at startup. + */ template struct OnStartup { OnStartup(T && t) { t(); } }; -/* Wrap bools to prevent string literals (i.e. 'char *') from being - cast to a bool in Attr. */ +/** + * Wrap bools to prevent string literals (i.e. 'char *') from being + * cast to a bool in Attr. + */ template struct Explicit { T t; @@ -45,21 +52,25 @@ struct Explicit { }; -/* This wants to be a little bit like rust's Cow type. - Some parts of the evaluator benefit greatly from being able to reuse - existing allocations for strings, but have to be able to also use - newly allocated storage for values. - - We do not define implicit conversions, even with ref qualifiers, - since those can easily become ambiguous to the reader and can degrade - into copying behaviour we want to avoid. */ +/** + * This wants to be a little bit like rust's Cow type. + * Some parts of the evaluator benefit greatly from being able to reuse + * existing allocations for strings, but have to be able to also use + * newly allocated storage for values. + * + * We do not define implicit conversions, even with ref qualifiers, + * since those can easily become ambiguous to the reader and can degrade + * into copying behaviour we want to avoid. + */ class BackedStringView { private: std::variant data; - /* Needed to introduce a temporary since operator-> must return - a pointer. Without this we'd need to store the view object - even when we already own a string. */ + /** + * Needed to introduce a temporary since operator-> must return + * a pointer. Without this we'd need to store the view object + * even when we already own a string. + */ class Ptr { private: std::string_view view; @@ -77,8 +88,10 @@ public: BackedStringView(const BackedStringView &) = delete; BackedStringView & operator=(const BackedStringView &) = delete; - /* We only want move operations defined since the sole purpose of - this type is to avoid copies. */ + /** + * We only want move operations defined since the sole purpose of + * this type is to avoid copies. + */ BackedStringView(BackedStringView && other) = default; BackedStringView & operator=(BackedStringView && other) = default; diff --git a/src/libutil/url-parts.hh b/src/libutil/url-parts.hh index d5e6a2736..98162b0f7 100644 --- a/src/libutil/url-parts.hh +++ b/src/libutil/url-parts.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include @@ -22,21 +23,22 @@ const static std::string segmentRegex = "(?:" + pcharRegex + "*)"; const static std::string absPathRegex = "(?:(?:/" + segmentRegex + ")*/?)"; const static std::string pathRegex = "(?:" + segmentRegex + "(?:/" + segmentRegex + ")*/?)"; -// A Git ref (i.e. branch or tag name). -const static std::string refRegexS = "[a-zA-Z0-9][a-zA-Z0-9_.\\/-]*"; // FIXME: check +/// A Git ref (i.e. branch or tag name). +/// \todo check that this is correct. +const static std::string refRegexS = "[a-zA-Z0-9@][a-zA-Z0-9_.\\/@-]*"; extern std::regex refRegex; -// Instead of defining what a good Git Ref is, we define what a bad Git Ref is -// This is because of the definition of a ref in refs.c in https://github.com/git/git -// See tests/fetchGitRefs.sh for the full definition +/// Instead of defining what a good Git Ref is, we define what a bad Git Ref is +/// This is because of the definition of a ref in refs.c in https://github.com/git/git +/// See tests/fetchGitRefs.sh for the full definition const static std::string badGitRefRegexS = "//|^[./]|/\\.|\\.\\.|[[:cntrl:][:space:]:?^~\[]|\\\\|\\*|\\.lock$|\\.lock/|@\\{|[/.]$|^@$|^$"; extern std::regex badGitRefRegex; -// A Git revision (a SHA-1 commit hash). +/// A Git revision (a SHA-1 commit hash). const static std::string revRegexS = "[0-9a-fA-F]{40}"; extern std::regex revRegex; -// A ref or revision, or a ref followed by a revision. +/// A ref or revision, or a ref followed by a revision. const static std::string refAndOrRevRegex = "(?:(" + revRegexS + ")|(?:(" + refRegexS + ")(?:/(" + revRegexS + "))?))"; const static std::string flakeIdRegexS = "[a-zA-Z][a-zA-Z0-9_-]*"; diff --git a/src/libutil/url.cc b/src/libutil/url.cc index 4e43455e1..9e44241ac 100644 --- a/src/libutil/url.cc +++ b/src/libutil/url.cc @@ -88,17 +88,22 @@ std::map decodeQuery(const std::string & query) return result; } -std::string percentEncode(std::string_view s) +const static std::string allowedInQuery = ":@/?"; +const static std::string allowedInPath = ":@/"; + +std::string percentEncode(std::string_view s, std::string_view keep) { std::string res; for (auto & c : s) + // unreserved + keep if ((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') - || strchr("-._~!$&'()*+,;=:@", c)) + || strchr("-._~", c) + || keep.find(c) != std::string::npos) res += c; else - res += fmt("%%%02x", (unsigned int) c); + res += fmt("%%%02X", (unsigned int) c); return res; } @@ -109,9 +114,9 @@ std::string encodeQuery(const std::map & ss) for (auto & [name, value] : ss) { if (!first) res += '&'; first = false; - res += percentEncode(name); + res += percentEncode(name, allowedInQuery); res += '='; - res += percentEncode(value); + res += percentEncode(value, allowedInQuery); } return res; } @@ -122,7 +127,7 @@ std::string ParsedURL::to_string() const scheme + ":" + (authority ? "//" + *authority : "") - + path + + percentEncode(path, allowedInPath) + (query.empty() ? "" : "?" + encodeQuery(query)) + (fragment.empty() ? "" : "#" + percentEncode(fragment)); } diff --git a/src/libutil/url.hh b/src/libutil/url.hh index 2a9fb34c1..d2413ec0e 100644 --- a/src/libutil/url.hh +++ b/src/libutil/url.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "error.hh" @@ -7,7 +8,8 @@ namespace nix { struct ParsedURL { std::string url; - std::string base; // URL without query/fragment + /// URL without query/fragment + std::string base; std::string scheme; std::optional authority; std::string path; @@ -22,12 +24,13 @@ struct ParsedURL MakeError(BadURL, Error); std::string percentDecode(std::string_view in); +std::string percentEncode(std::string_view s, std::string_view keep=""); std::map decodeQuery(const std::string & query); ParsedURL parseURL(const std::string & url); -/* +/** * Although that’s not really standardized anywhere, an number of tools * use a scheme of the form 'x+y' in urls, where y is the “transport layer” * scheme, and x is the “application layer” scheme. diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 885bae69c..21d1c8dcd 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -47,6 +47,9 @@ extern char * * environ __attribute__((weak)); namespace nix { +void initLibUtil() { +} + std::optional getEnv(const std::string & key) { char * value = getenv(key.c_str()); @@ -54,6 +57,11 @@ std::optional getEnv(const std::string & key) return std::string(value); } +std::optional getEnvNonEmpty(const std::string & key) { + auto value = getEnv(key); + if (value == "") return {}; + return value; +} std::map getEnv() { @@ -523,7 +531,7 @@ void deletePath(const Path & path) void deletePath(const Path & path, uint64_t & bytesFreed) { - //Activity act(*logger, lvlDebug, format("recursively deleting path '%1%'") % path); + //Activity act(*logger, lvlDebug, "recursively deleting path '%1%'", path); bytesFreed = 0; _deletePath(path, bytesFreed); } @@ -1065,12 +1073,14 @@ static pid_t doFork(bool allowVfork, std::function fun) } +#if __linux__ static int childEntry(void * arg) { auto main = (std::function *) arg; (*main)(); return 1; } +#endif pid_t startProcess(std::function fun, const ProcessOptions & options) @@ -1394,14 +1404,14 @@ std::string statusToString(int status) { if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { if (WIFEXITED(status)) - return (format("failed with exit code %1%") % WEXITSTATUS(status)).str(); + return fmt("failed with exit code %1%", WEXITSTATUS(status)); else if (WIFSIGNALED(status)) { int sig = WTERMSIG(status); #if HAVE_STRSIGNAL const char * description = strsignal(sig); - return (format("failed due to signal %1% (%2%)") % sig % description).str(); + return fmt("failed due to signal %1% (%2%)", sig, description); #else - return (format("failed due to signal %1%") % sig).str(); + return fmt("failed due to signal %1%", sig); #endif } else @@ -1470,7 +1480,7 @@ bool shouldANSI() && !getEnv("NO_COLOR").has_value(); } -std::string filterANSIEscapes(const std::string & s, bool filterAll, unsigned int width) +std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int width) { std::string t, e; size_t w = 0; @@ -1737,13 +1747,39 @@ void triggerInterrupt() } static sigset_t savedSignalMask; +static bool savedSignalMaskIsSet = false; + +void setChildSignalMask(sigset_t * sigs) +{ + assert(sigs); // C style function, but think of sigs as a reference + +#if _POSIX_C_SOURCE >= 1 || _XOPEN_SOURCE || _POSIX_SOURCE + sigemptyset(&savedSignalMask); + // There's no "assign" or "copy" function, so we rely on (math) idempotence + // of the or operator: a or a = a. + sigorset(&savedSignalMask, sigs, sigs); +#else + // Without sigorset, our best bet is to assume that sigset_t is a type that + // can be assigned directly, such as is the case for a sigset_t defined as + // an integer type. + savedSignalMask = *sigs; +#endif + + savedSignalMaskIsSet = true; +} + +void saveSignalMask() { + if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) + throw SysError("querying signal mask"); + + savedSignalMaskIsSet = true; +} void startSignalHandlerThread() { updateWindowSize(); - if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) - throw SysError("querying signal mask"); + saveSignalMask(); sigset_t set; sigemptyset(&set); @@ -1760,6 +1796,20 @@ void startSignalHandlerThread() static void restoreSignals() { + // If startSignalHandlerThread wasn't called, that means we're not running + // in a proper libmain process, but a process that presumably manages its + // own signal handlers. Such a process should call either + // - initNix(), to be a proper libmain process + // - startSignalHandlerThread(), to resemble libmain regarding signal + // handling only + // - saveSignalMask(), for processes that define their own signal handling + // thread + // TODO: Warn about this? Have a default signal mask? The latter depends on + // whether we should generally inherit signal masks from the caller. + // I don't know what the larger unix ecosystem expects from us here. + if (!savedSignalMaskIsSet) + return; + if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr)) throw SysError("restoring signals"); } @@ -1961,7 +2011,7 @@ std::string showBytes(uint64_t bytes) // FIXME: move to libstore/build -void commonChildInit(Pipe & logPipe) +void commonChildInit() { logger = makeSimpleLogger(); @@ -1975,10 +2025,6 @@ void commonChildInit(Pipe & logPipe) if (setsid() == -1) throw SysError("creating a new session"); - /* Dup the write side of the logger pipe into stderr. */ - if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1) - throw SysError("cannot pipe standard error into log file"); - /* Dup stderr to stdout. */ if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) throw SysError("cannot dup stderr into stdout"); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b5625ecef..040fed68f 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "error.hh" @@ -31,74 +32,114 @@ namespace nix { struct Sink; struct Source; +void initLibUtil(); -/* The system for which Nix is compiled. */ +/** + * The system for which Nix is compiled. + */ extern const std::string nativeSystem; -/* Return an environment variable. */ +/** + * @return an environment variable. + */ std::optional getEnv(const std::string & key); -/* Get the entire environment. */ +/** + * @return a non empty environment variable. Returns nullopt if the env + * variable is set to "" + */ +std::optional getEnvNonEmpty(const std::string & key); + +/** + * Get the entire environment. + */ std::map getEnv(); -/* Clear the environment. */ +/** + * Clear the environment. + */ void clearEnv(); -/* Return an absolutized path, resolving paths relative to the - specified directory, or the current directory otherwise. The path - is also canonicalised. */ +/** + * @return An absolutized path, resolving paths relative to the + * specified directory, or the current directory otherwise. The path + * is also canonicalised. + */ Path absPath(Path path, std::optional dir = {}, bool resolveSymlinks = false); -/* Canonicalise a path by removing all `.' or `..' components and - double or trailing slashes. Optionally resolves all symlink - components such that each component of the resulting path is *not* - a symbolic link. */ +/** + * Canonicalise a path by removing all `.` or `..` components and + * double or trailing slashes. Optionally resolves all symlink + * components such that each component of the resulting path is *not* + * a symbolic link. + */ Path canonPath(PathView path, bool resolveSymlinks = false); -/* Return the directory part of the given canonical path, i.e., - everything before the final `/'. If the path is the root or an - immediate child thereof (e.g., `/foo'), this means `/' - is returned.*/ +/** + * @return The directory part of the given canonical path, i.e., + * everything before the final `/`. If the path is the root or an + * immediate child thereof (e.g., `/foo`), this means `/` + * is returned. + */ Path dirOf(const PathView path); -/* Return the base name of the given canonical path, i.e., everything - following the final `/' (trailing slashes are removed). */ +/** + * @return the base name of the given canonical path, i.e., everything + * following the final `/` (trailing slashes are removed). + */ std::string_view baseNameOf(std::string_view path); -/* Perform tilde expansion on a path. */ +/** + * Perform tilde expansion on a path. + */ std::string expandTilde(std::string_view path); -/* Check whether 'path' is a descendant of 'dir'. Both paths must be - canonicalized. */ +/** + * Check whether 'path' is a descendant of 'dir'. Both paths must be + * canonicalized. + */ bool isInDir(std::string_view path, std::string_view dir); -/* Check whether 'path' is equal to 'dir' or a descendant of - 'dir'. Both paths must be canonicalized. */ +/** + * Check whether 'path' is equal to 'dir' or a descendant of + * 'dir'. Both paths must be canonicalized. + */ bool isDirOrInDir(std::string_view path, std::string_view dir); -/* Get status of `path'. */ +/** + * Get status of `path`. + */ struct stat stat(const Path & path); struct stat lstat(const Path & path); -/* Return true iff the given path exists. */ +/** + * @return true iff the given path exists. + */ bool pathExists(const Path & path); -/* Read the contents (target) of a symbolic link. The result is not - in any way canonicalised. */ +/** + * Read the contents (target) of a symbolic link. The result is not + * in any way canonicalised. + */ Path readLink(const Path & path); bool isLink(const Path & path); -/* Read the contents of a directory. The entries `.' and `..' are - removed. */ +/** + * Read the contents of a directory. The entries `.` and `..` are + * removed. + */ struct DirEntry { std::string name; ino_t ino; - unsigned char type; // one of DT_* + /** + * one of DT_* + */ + unsigned char type; DirEntry(std::string name, ino_t ino, unsigned char type) : name(std::move(name)), ino(ino), type(type) { } }; @@ -109,74 +150,110 @@ DirEntries readDirectory(const Path & path); unsigned char getFileType(const Path & path); -/* Read the contents of a file into a string. */ +/** + * Read the contents of a file into a string. + */ std::string readFile(int fd); std::string readFile(const Path & path); void readFile(const Path & path, Sink & sink); -/* Write a string to a file. */ +/** + * Write a string to a file. + */ void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); -/* Flush a file's parent directory to disk */ +/** + * Flush a file's parent directory to disk + */ void syncParent(const Path & path); -/* Read a line from a file descriptor. */ +/** + * Read a line from a file descriptor. + */ std::string readLine(int fd); -/* Write a line to a file descriptor. */ +/** + * Write a line to a file descriptor. + */ void writeLine(int fd, std::string s); -/* Delete a path; i.e., in the case of a directory, it is deleted - recursively. It's not an error if the path does not exist. The - second variant returns the number of bytes and blocks freed. */ +/** + * Delete a path; i.e., in the case of a directory, it is deleted + * recursively. It's not an error if the path does not exist. The + * second variant returns the number of bytes and blocks freed. + */ void deletePath(const Path & path); void deletePath(const Path & path, uint64_t & bytesFreed); std::string getUserName(); -/* Return the given user's home directory from /etc/passwd. */ +/** + * @return the given user's home directory from /etc/passwd. + */ Path getHomeOf(uid_t userId); -/* Return $HOME or the user's home directory from /etc/passwd. */ +/** + * @return $HOME or the user's home directory from /etc/passwd. + */ Path getHome(); -/* Return $XDG_CACHE_HOME or $HOME/.cache. */ +/** + * @return $XDG_CACHE_HOME or $HOME/.cache. + */ Path getCacheDir(); -/* Return $XDG_CONFIG_HOME or $HOME/.config. */ +/** + * @return $XDG_CONFIG_HOME or $HOME/.config. + */ Path getConfigDir(); -/* Return the directories to search for user configuration files */ +/** + * @return the directories to search for user configuration files + */ std::vector getConfigDirs(); -/* Return $XDG_DATA_HOME or $HOME/.local/share. */ +/** + * @return $XDG_DATA_HOME or $HOME/.local/share. + */ Path getDataDir(); -/* Return the path of the current executable. */ +/** + * @return the path of the current executable. + */ std::optional getSelfExe(); -/* Return $XDG_STATE_HOME or $HOME/.local/state. */ +/** + * @return $XDG_STATE_HOME or $HOME/.local/state. + */ Path getStateDir(); -/* Create the Nix state directory and return the path to it. */ +/** + * Create the Nix state directory and return the path to it. + */ Path createNixStateDir(); -/* Create a directory and all its parents, if necessary. Returns the - list of created directories, in order of creation. */ +/** + * Create a directory and all its parents, if necessary. Returns the + * list of created directories, in order of creation. + */ Paths createDirs(const Path & path); inline Paths createDirs(PathView path) { return createDirs(Path(path)); } -/* Create a symlink. */ +/** + * Create a symlink. + */ void createSymlink(const Path & target, const Path & link, std::optional mtime = {}); -/* Atomically create or replace a symlink. */ +/** + * Atomically create or replace a symlink. + */ void replaceSymlink(const Path & target, const Path & link, std::optional mtime = {}); @@ -192,24 +269,32 @@ void renameFile(const Path & src, const Path & dst); void moveFile(const Path & src, const Path & dst); -/* Wrappers arount read()/write() that read/write exactly the - requested number of bytes. */ +/** + * Wrappers arount read()/write() that read/write exactly the + * requested number of bytes. + */ void readFull(int fd, char * buf, size_t count); void writeFull(int fd, std::string_view s, bool allowInterrupts = true); MakeError(EndOfFile, Error); -/* Read a file descriptor until EOF occurs. */ +/** + * Read a file descriptor until EOF occurs. + */ std::string drainFD(int fd, bool block = true, const size_t reserveSize=0); void drainFD(int fd, Sink & sink, bool block = true); -/* If cgroups are active, attempt to calculate the number of CPUs available. - If cgroups are unavailable or if cpu.max is set to "max", return 0. */ +/** + * If cgroups are active, attempt to calculate the number of CPUs available. + * If cgroups are unavailable or if cpu.max is set to "max", return 0. + */ unsigned int getMaxCPU(); -/* Automatic cleanup of resources. */ +/** + * Automatic cleanup of resources. + */ class AutoDelete @@ -247,11 +332,15 @@ public: }; -/* Create a temporary directory. */ +/** + * Create a temporary directory. + */ Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); -/* Create a temporary file, returning a file handle and its path. */ +/** + * Create a temporary file, returning a file handle and its path. + */ std::pair createTempFile(const Path & prefix = "nix"); @@ -294,27 +383,36 @@ public: }; -/* Kill all processes running under the specified uid by sending them - a SIGKILL. */ +/** + * Kill all processes running under the specified uid by sending them + * a SIGKILL. + */ void killUser(uid_t uid); -/* Fork a process that runs the given function, and return the child - pid to the caller. */ +/** + * Fork a process that runs the given function, and return the child + * pid to the caller. + */ struct ProcessOptions { std::string errorPrefix = ""; bool dieWithParent = true; bool runExitHandlers = false; bool allowVfork = false; - int cloneFlags = 0; // use clone() with the specified flags (Linux only) + /** + * use clone() with the specified flags (Linux only) + */ + int cloneFlags = 0; }; pid_t startProcess(std::function fun, const ProcessOptions & options = ProcessOptions()); -/* Run a program and return its stdout in a string (i.e., like the - shell backtick operator). */ +/** + * Run a program and return its stdout in a string (i.e., like the + * shell backtick operator). + */ std::string runProgram(Path program, bool searchPath = false, const Strings & args = Strings(), const std::optional & input = {}); @@ -339,25 +437,37 @@ std::pair runProgram(RunOptions && options); void runProgram2(const RunOptions & options); -/* Change the stack size. */ +/** + * Change the stack size. + */ void setStackSize(size_t stackSize); -/* Restore the original inherited Unix process context (such as signal - masks, stack size). */ +/** + * Restore the original inherited Unix process context (such as signal + * masks, stack size). + + * See startSignalHandlerThread(), saveSignalMask(). + */ void restoreProcessContext(bool restoreMounts = true); -/* Save the current mount namespace. Ignored if called more than - once. */ +/** + * Save the current mount namespace. Ignored if called more than + * once. + */ void saveMountNamespace(); -/* Restore the mount namespace saved by saveMountNamespace(). Ignored - if saveMountNamespace() was never called. */ +/** + * Restore the mount namespace saved by saveMountNamespace(). Ignored + * if saveMountNamespace() was never called. + */ void restoreMountNamespace(); -/* Cause this thread to not share any FS attributes with the main - thread, because this causes setns() in restoreMountNamespace() to - fail. */ +/** + * Cause this thread to not share any FS attributes with the main + * thread, because this causes setns() in restoreMountNamespace() to + * fail. + */ void unshareFilesystem(); @@ -372,16 +482,22 @@ public: { } }; -/* Convert a list of strings to a null-terminated vector of char - *'s. The result must not be accessed beyond the lifetime of the - list of strings. */ +/** + * Convert a list of strings to a null-terminated vector of `char + * *`s. The result must not be accessed beyond the lifetime of the + * list of strings. + */ std::vector stringsToCharPtrs(const Strings & ss); -/* Close all file descriptors except those listed in the given set. - Good practice in child processes. */ +/** + * Close all file descriptors except those listed in the given set. + * Good practice in child processes. + */ void closeMostFDs(const std::set & exceptions); -/* Set the close-on-exec flag for the given file descriptor. */ +/** + * Set the close-on-exec flag for the given file descriptor. + */ void closeOnExec(int fd); @@ -407,12 +523,16 @@ MakeError(Interrupted, BaseError); MakeError(FormatError, Error); -/* String tokenizer. */ +/** + * String tokenizer. + */ template C tokenizeString(std::string_view s, std::string_view separators = " \t\n\r"); -/* Concatenate the given strings with a separator between the - elements. */ +/** + * Concatenate the given strings with a separator between the + * elements. + */ template std::string concatStringsSep(const std::string_view sep, const C & ss) { @@ -437,7 +557,9 @@ auto concatStrings(Parts && ... parts) } -/* Add quotes around a collection of strings. */ +/** + * Add quotes around a collection of strings. + */ template Strings quoteStrings(const C & c) { Strings res; @@ -446,17 +568,23 @@ template Strings quoteStrings(const C & c) return res; } - -/* Remove trailing whitespace from a string. FIXME: return - std::string_view. */ +/** + * Remove trailing whitespace from a string. + * + * \todo return std::string_view. + */ std::string chomp(std::string_view s); -/* Remove whitespace from the start and end of a string. */ +/** + * Remove whitespace from the start and end of a string. + */ std::string trim(std::string_view s, std::string_view whitespace = " \n\r\t"); -/* Replace all occurrences of a string inside another string. */ +/** + * Replace all occurrences of a string inside another string. + */ std::string replaceStrings( std::string s, std::string_view from, @@ -466,14 +594,18 @@ std::string replaceStrings( std::string rewriteStrings(std::string s, const StringMap & rewrites); -/* Convert the exit status of a child as returned by wait() into an - error string. */ +/** + * Convert the exit status of a child as returned by wait() into an + * error string. + */ std::string statusToString(int status); bool statusOk(int status); -/* Parse a string into an integer. */ +/** + * Parse a string into an integer. + */ template std::optional string2Int(const std::string_view s) { @@ -486,8 +618,10 @@ std::optional string2Int(const std::string_view s) } } -/* Like string2Int(), but support an optional suffix 'K', 'M', 'G' or - 'T' denoting a binary unit prefix. */ +/** + * Like string2Int(), but support an optional suffix 'K', 'M', 'G' or + * 'T' denoting a binary unit prefix. + */ template N string2IntWithUnitPrefix(std::string_view s) { @@ -508,7 +642,9 @@ N string2IntWithUnitPrefix(std::string_view s) throw UsageError("'%s' is not an integer", s); } -/* Parse a string into a float. */ +/** + * Parse a string into a float. + */ template std::optional string2Float(const std::string_view s) { @@ -520,7 +656,9 @@ std::optional string2Float(const std::string_view s) } -/* Convert a little-endian integer to host order. */ +/** + * Convert a little-endian integer to host order. + */ template T readLittleEndian(unsigned char * p) { @@ -532,66 +670,90 @@ T readLittleEndian(unsigned char * p) } -/* Return true iff `s' starts with `prefix'. */ +/** + * @return true iff `s` starts with `prefix`. + */ bool hasPrefix(std::string_view s, std::string_view prefix); -/* Return true iff `s' ends in `suffix'. */ +/** + * @return true iff `s` ends in `suffix`. + */ bool hasSuffix(std::string_view s, std::string_view suffix); -/* Convert a string to lower case. */ +/** + * Convert a string to lower case. + */ std::string toLower(const std::string & s); -/* Escape a string as a shell word. */ +/** + * Escape a string as a shell word. + */ std::string shellEscape(const std::string_view s); -/* Exception handling in destructors: print an error message, then - ignore the exception. */ +/** + * Exception handling in destructors: print an error message, then + * ignore the exception. + */ void ignoreException(Verbosity lvl = lvlError); -/* Tree formatting. */ +/** + * Tree formatting. + */ constexpr char treeConn[] = "├───"; constexpr char treeLast[] = "└───"; constexpr char treeLine[] = "│ "; constexpr char treeNull[] = " "; -/* Determine whether ANSI escape sequences are appropriate for the - present output. */ +/** + * Determine whether ANSI escape sequences are appropriate for the + * present output. + */ bool shouldANSI(); -/* Truncate a string to 'width' printable characters. If 'filterAll' - is true, all ANSI escape sequences are filtered out. Otherwise, - some escape sequences (such as colour setting) are copied but not - included in the character count. Also, tabs are expanded to - spaces. */ -std::string filterANSIEscapes(const std::string & s, +/** + * Truncate a string to 'width' printable characters. If 'filterAll' + * is true, all ANSI escape sequences are filtered out. Otherwise, + * some escape sequences (such as colour setting) are copied but not + * included in the character count. Also, tabs are expanded to + * spaces. + */ +std::string filterANSIEscapes(std::string_view s, bool filterAll = false, unsigned int width = std::numeric_limits::max()); -/* Base64 encoding/decoding. */ +/** + * Base64 encoding/decoding. + */ std::string base64Encode(std::string_view s); std::string base64Decode(std::string_view s); -/* Remove common leading whitespace from the lines in the string - 's'. For example, if every line is indented by at least 3 spaces, - then we remove 3 spaces from the start of every line. */ +/** + * Remove common leading whitespace from the lines in the string + * 's'. For example, if every line is indented by at least 3 spaces, + * then we remove 3 spaces from the start of every line. + */ std::string stripIndentation(std::string_view s); -/* Get the prefix of 's' up to and excluding the next line break (LF - optionally preceded by CR), and the remainder following the line - break. */ +/** + * Get the prefix of 's' up to and excluding the next line break (LF + * optionally preceded by CR), and the remainder following the line + * break. + */ std::pair getLine(std::string_view s); -/* Get a value for the specified key from an associate container. */ +/** + * Get a value for the specified key from an associate container. + */ template const typename T::mapped_type * get(const T & map, const typename T::key_type & key) { @@ -608,7 +770,9 @@ typename T::mapped_type * get(T & map, const typename T::key_type & key) return &i->second; } -/* Get a value for the specified key from an associate container, or a default value if the key isn't present. */ +/** + * Get a value for the specified key from an associate container, or a default value if the key isn't present. + */ template const typename T::mapped_type & getOr(T & map, const typename T::key_type & key, @@ -619,7 +783,9 @@ const typename T::mapped_type & getOr(T & map, return i->second; } -/* Remove and return the first item from a container. */ +/** + * Remove and return the first item from a container. + */ template std::optional remove_begin(T & c) { @@ -631,7 +797,9 @@ std::optional remove_begin(T & c) } -/* Remove and return the first item from a container. */ +/** + * Remove and return the first item from a container. + */ template std::optional pop(T & c) { @@ -646,25 +814,48 @@ template class Callback; -/* Start a thread that handles various signals. Also block those signals - on the current thread (and thus any threads created by it). */ +/** + * Start a thread that handles various signals. Also block those signals + * on the current thread (and thus any threads created by it). + * Saves the signal mask before changing the mask to block those signals. + * See saveSignalMask(). + */ void startSignalHandlerThread(); +/** + * Saves the signal mask, which is the signal mask that nix will restore + * before creating child processes. + * See setChildSignalMask() to set an arbitrary signal mask instead of the + * current mask. + */ +void saveSignalMask(); + +/** + * Sets the signal mask. Like saveSignalMask() but for a signal set that doesn't + * necessarily match the current thread's mask. + * See saveSignalMask() to set the saved mask to the current mask. + */ +void setChildSignalMask(sigset_t *sigs); + struct InterruptCallback { virtual ~InterruptCallback() { }; }; -/* Register a function that gets called on SIGINT (in a non-signal - context). */ +/** + * Register a function that gets called on SIGINT (in a non-signal + * context). + */ std::unique_ptr createInterruptCallback( std::function callback); void triggerInterrupt(); -/* A RAII class that causes the current thread to receive SIGUSR1 when - the signal handler thread receives SIGINT. That is, this allows - SIGINT to be multiplexed to multiple threads. */ +/** + * A RAII class that causes the current thread to receive SIGUSR1 when + * the signal handler thread receives SIGINT. That is, this allows + * SIGINT to be multiplexed to multiple threads. + */ struct ReceiveInterrupts { pthread_t target; @@ -678,8 +869,10 @@ struct ReceiveInterrupts -/* A RAII helper that increments a counter on construction and - decrements it on destruction. */ +/** + * A RAII helper that increments a counter on construction and + * decrements it on destruction. + */ template struct MaintainCount { @@ -690,33 +883,50 @@ struct MaintainCount }; -/* Return the number of rows and columns of the terminal. */ +/** + * @return the number of rows and columns of the terminal. + */ std::pair getWindowSize(); -/* Used in various places. */ +/** + * Used in various places. + */ typedef std::function PathFilter; extern PathFilter defaultPathFilter; -/* Common initialisation performed in child processes. */ -void commonChildInit(Pipe & logPipe); +/** + * Common initialisation performed in child processes. + */ +void commonChildInit(); -/* Create a Unix domain socket. */ +/** + * Create a Unix domain socket. + */ AutoCloseFD createUnixDomainSocket(); -/* Create a Unix domain socket in listen mode. */ +/** + * Create a Unix domain socket in listen mode. + */ AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); -/* Bind a Unix domain socket to a path. */ +/** + * Bind a Unix domain socket to a path. + */ void bind(int fd, const std::string & path); -/* Connect to a Unix domain socket. */ +/** + * Connect to a Unix domain socket. + */ void connect(int fd, const std::string & path); -// A Rust/Python-like enumerate() iterator adapter. -// Borrowed from http://reedbeta.com/blog/python-like-enumerate-in-cpp17. +/** + * A Rust/Python-like enumerate() iterator adapter. + * + * Borrowed from http://reedbeta.com/blog/python-like-enumerate-in-cpp17. + */ template ())), typename = decltype(std::end(std::declval()))> @@ -726,23 +936,25 @@ constexpr auto enumerate(T && iterable) { size_t i; TIter iter; - bool operator != (const iterator & other) const { return iter != other.iter; } - void operator ++ () { ++i; ++iter; } - auto operator * () const { return std::tie(i, *iter); } + constexpr bool operator != (const iterator & other) const { return iter != other.iter; } + constexpr void operator ++ () { ++i; ++iter; } + constexpr auto operator * () const { return std::tie(i, *iter); } }; struct iterable_wrapper { T iterable; - auto begin() { return iterator{ 0, std::begin(iterable) }; } - auto end() { return iterator{ 0, std::end(iterable) }; } + constexpr auto begin() { return iterator{ 0, std::begin(iterable) }; } + constexpr auto end() { return iterator{ 0, std::end(iterable) }; } }; return iterable_wrapper{ std::forward(iterable) }; } -// C++17 std::visit boilerplate +/** + * C++17 std::visit boilerplate + */ template struct overloaded : Ts... { using Ts::operator()...; }; template overloaded(Ts...) -> overloaded; @@ -750,8 +962,10 @@ template overloaded(Ts...) -> overloaded; std::string showBytes(uint64_t bytes); -/* Provide an addition operator between strings and string_views - inexplicably omitted from the standard library. */ +/** + * Provide an addition operator between strings and string_views + * inexplicably omitted from the standard library. + */ inline std::string operator + (const std::string & s1, std::string_view s2) { auto s = s1; diff --git a/src/libutil/xml-writer.hh b/src/libutil/xml-writer.hh index 4c91adee6..74f53b7ca 100644 --- a/src/libutil/xml-writer.hh +++ b/src/libutil/xml-writer.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include #include diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index da76c2ace..6510df8f0 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -84,7 +84,6 @@ static void main_nix_build(int argc, char * * argv) auto interactive = isatty(STDIN_FILENO) && isatty(STDERR_FILENO); Strings attrPaths; Strings left; - RepairFlag repair = NoRepair; BuildMode buildMode = bmNormal; bool readStdin = false; @@ -169,11 +168,6 @@ static void main_nix_build(int argc, char * * argv) else if (*arg == "--dry-run") dryRun = true; - else if (*arg == "--repair") { - repair = Repair; - buildMode = bmRepair; - } - else if (*arg == "--run-env") // obsolete runEnv = true; @@ -219,9 +213,9 @@ static void main_nix_build(int argc, char * * argv) // read the shebang to understand which packages to read from. Since // this is handled via nix-shell -p, we wrap our ruby script execution // in ruby -e 'load' which ignores the shebangs. - envCommand = (format("exec %1% %2% -e 'load(ARGV.shift)' -- %3% %4%") % execArgs % interpreter % shellEscape(script) % joined.str()).str(); + envCommand = fmt("exec %1% %2% -e 'load(ARGV.shift)' -- %3% %4%", execArgs, interpreter, shellEscape(script), joined.str()); } else { - envCommand = (format("exec %1% %2% %3% %4%") % execArgs % interpreter % shellEscape(script) % joined.str()).str(); + envCommand = fmt("exec %1% %2% %3% %4%", execArgs, interpreter, shellEscape(script), joined.str()); } } @@ -249,7 +243,8 @@ static void main_nix_build(int argc, char * * argv) auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : store; auto state = std::make_unique(myArgs.searchPath, evalStore, store); - state->repair = repair; + state->repair = myArgs.repair; + if (myArgs.repair) buildMode = bmRepair; auto autoArgs = myArgs.getAutoArgs(*state); @@ -289,7 +284,7 @@ static void main_nix_build(int argc, char * * argv) else for (auto i : left) { if (fromArgs) - exprs.push_back(state->parseExprFromString(std::move(i), absPath("."))); + exprs.push_back(state->parseExprFromString(std::move(i), state->rootPath(CanonPath::fromCwd()))); else { auto absolute = i; try { @@ -385,7 +380,9 @@ static void main_nix_build(int argc, char * * argv) if (!shell) { try { - auto expr = state->parseExprFromString("(import {}).bashInteractive", absPath(".")); + auto expr = state->parseExprFromString( + "(import {}).bashInteractive", + state->rootPath(CanonPath::fromCwd())); Value v; state->eval(expr, v); @@ -440,7 +437,7 @@ static void main_nix_build(int argc, char * * argv) shell = store->printStorePath(shellDrvOutputs.at("out").value()) + "/bin/bash"; } - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { auto resolvedDrv = drv.tryResolve(*store); assert(resolvedDrv && "Successfully resolved the derivation"); drv = *resolvedDrv; diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 338a7d18e..740737ffe 100755 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -168,7 +168,8 @@ static int main_nix_channel(int argc, char ** argv) nixDefExpr = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : home + "/.nix-defexpr"; // Figure out the name of the channels profile. - profile = profilesDir() + "/channels"; + profile = profilesDir() + "/channels"; + createDirs(dirOf(profile)); enum { cNone, diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index e413faffe..3cc57af4e 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -40,7 +40,7 @@ void removeOldGenerations(std::string dir) throw; } if (link.find("link") != std::string::npos) { - printInfo(format("removing old generations of profile %1%") % path); + printInfo("removing old generations of profile %s", path); if (deleteOlderThan != "") deleteGenerationsOlderThan(path, deleteOlderThan, dryRun); else @@ -77,8 +77,7 @@ static int main_nix_collect_garbage(int argc, char * * argv) return true; }); - auto profilesDir = settings.nixStateDir + "/profiles"; - if (removeOld) removeOldGenerations(profilesDir); + if (removeOld) removeOldGenerations(profilesDir()); // Run the actual garbage collector. if (!dryRun) { diff --git a/src/nix-copy-closure/nix-copy-closure.cc b/src/nix-copy-closure/nix-copy-closure.cc index 841d50fd3..7f2bb93b6 100755 --- a/src/nix-copy-closure/nix-copy-closure.cc +++ b/src/nix-copy-closure/nix-copy-closure.cc @@ -22,7 +22,7 @@ static int main_nix_copy_closure(int argc, char ** argv) printVersion("nix-copy-closure"); else if (*arg == "--gzip" || *arg == "--bzip2" || *arg == "--xz") { if (*arg != "--gzip") - printMsg(lvlError, format("Warning: '%1%' is not implemented, falling back to gzip") % *arg); + warn("'%1%' is not implemented, falling back to gzip", *arg); gzip = true; } else if (*arg == "--from") toMode = false; diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 0daf374de..5e94f2d14 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -44,7 +44,7 @@ typedef enum { struct InstallSourceInfo { InstallSourceType type; - Path nixExprPath; /* for srcNixExprDrvs, srcNixExprs */ + std::shared_ptr nixExprPath; /* for srcNixExprDrvs, srcNixExprs */ Path profile; /* for srcProfile */ std::string systemFilter; /* for srcNixExprDrvs */ Bindings * autoArgs; @@ -92,9 +92,11 @@ static bool parseInstallSourceOptions(Globals & globals, } -static bool isNixExpr(const Path & path, struct stat & st) +static bool isNixExpr(const SourcePath & path, struct InputAccessor::Stat & st) { - return S_ISREG(st.st_mode) || (S_ISDIR(st.st_mode) && pathExists(path + "/default.nix")); + return + st.type == InputAccessor::tRegular + || (st.type == InputAccessor::tDirectory && (path + "default.nix").pathExists()); } @@ -102,10 +104,10 @@ static constexpr size_t maxAttrs = 1024; static void getAllExprs(EvalState & state, - const Path & path, StringSet & seen, BindingsBuilder & attrs) + const SourcePath & path, StringSet & seen, BindingsBuilder & attrs) { StringSet namesSorted; - for (auto & i : readDirectory(path)) namesSorted.insert(i.name); + for (auto & [name, _] : path.readDirectory()) namesSorted.insert(name); for (auto & i : namesSorted) { /* Ignore the manifest.nix used by profiles. This is @@ -113,13 +115,16 @@ static void getAllExprs(EvalState & state, are implemented using profiles). */ if (i == "manifest.nix") continue; - Path path2 = path + "/" + i; + SourcePath path2 = path + i; - struct stat st; - if (stat(path2.c_str(), &st) == -1) + InputAccessor::Stat st; + try { + st = path2.resolveSymlinks().lstat(); + } catch (Error &) { continue; // ignore dangling symlinks in ~/.nix-defexpr + } - if (isNixExpr(path2, st) && (!S_ISREG(st.st_mode) || hasSuffix(path2, ".nix"))) { + if (isNixExpr(path2, st) && (st.type != InputAccessor::tRegular || hasSuffix(path2.baseName(), ".nix"))) { /* Strip off the `.nix' filename suffix (if applicable), otherwise the attribute cannot be selected with the `-A' option. Useful if you want to stick a Nix @@ -129,21 +134,20 @@ static void getAllExprs(EvalState & state, attrName = std::string(attrName, 0, attrName.size() - 4); if (!seen.insert(attrName).second) { std::string suggestionMessage = ""; - if (path2.find("channels") != std::string::npos && path.find("channels") != std::string::npos) { + if (path2.path.abs().find("channels") != std::string::npos && path.path.abs().find("channels") != std::string::npos) suggestionMessage = fmt("\nsuggestion: remove '%s' from either the root channels or the user channels", attrName); - } printError("warning: name collision in input Nix expressions, skipping '%1%'" "%2%", path2, suggestionMessage); continue; } /* Load the expression on demand. */ auto vArg = state.allocValue(); - vArg->mkString(path2); + vArg->mkString(path2.path.abs()); if (seen.size() == maxAttrs) throw Error("too many Nix expressions in directory '%1%'", path); attrs.alloc(attrName).mkApp(&state.getBuiltin("import"), vArg); } - else if (S_ISDIR(st.st_mode)) + else if (st.type == InputAccessor::tDirectory) /* `path2' is a directory (with no default.nix in it); recurse into it. */ getAllExprs(state, path2, seen, attrs); @@ -152,11 +156,9 @@ static void getAllExprs(EvalState & state, -static void loadSourceExpr(EvalState & state, const Path & path, Value & v) +static void loadSourceExpr(EvalState & state, const SourcePath & path, Value & v) { - struct stat st; - if (stat(path.c_str(), &st) == -1) - throw SysError("getting information about '%1%'", path); + auto st = path.resolveSymlinks().lstat(); if (isNixExpr(path, st)) state.evalFile(path, v); @@ -167,7 +169,7 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v) set flat, not nested, to make it easier for a user to have a ~/.nix-defexpr directory that includes some system-wide directory). */ - else if (S_ISDIR(st.st_mode)) { + else if (st.type == InputAccessor::tDirectory) { auto attrs = state.buildBindings(maxAttrs); attrs.alloc("_combineChannels").mkList(0); StringSet seen; @@ -179,7 +181,7 @@ static void loadSourceExpr(EvalState & state, const Path & path, Value & v) } -static void loadDerivations(EvalState & state, Path nixExprPath, +static void loadDerivations(EvalState & state, const SourcePath & nixExprPath, std::string systemFilter, Bindings & autoArgs, const std::string & pathPrefix, DrvInfos & elems) { @@ -390,7 +392,7 @@ static void queryInstSources(EvalState & state, /* Load the derivations from the (default or specified) Nix expression. */ DrvInfos allElems; - loadDerivations(state, instSource.nixExprPath, + loadDerivations(state, *instSource.nixExprPath, instSource.systemFilter, *instSource.autoArgs, "", allElems); elems = filterBySelector(state, allElems, args, newestOnly); @@ -407,10 +409,10 @@ static void queryInstSources(EvalState & state, case srcNixExprs: { Value vArg; - loadSourceExpr(state, instSource.nixExprPath, vArg); + loadSourceExpr(state, *instSource.nixExprPath, vArg); for (auto & i : args) { - Expr * eFun = state.parseExprFromString(i, absPath(".")); + Expr * eFun = state.parseExprFromString(i, state.rootPath(CanonPath::fromCwd())); Value vFun, vTmp; state.eval(eFun, vFun); vTmp.mkApp(&vFun, &vArg); @@ -462,7 +464,7 @@ static void queryInstSources(EvalState & state, case srcAttrPath: { Value vRoot; - loadSourceExpr(state, instSource.nixExprPath, vRoot); + loadSourceExpr(state, *instSource.nixExprPath, vRoot); for (auto & i : args) { Value & v(*findAlongAttrPath(state, i, *instSource.autoArgs, vRoot).first); getDerivations(state, v, "", *instSource.autoArgs, elems, true); @@ -500,7 +502,7 @@ static bool keep(DrvInfo & drv) static void installDerivations(Globals & globals, const Strings & args, const Path & profile) { - debug(format("installing derivations")); + debug("installing derivations"); /* Get the set of user environment elements to be installed. */ DrvInfos newElems, newElemsTmp; @@ -579,7 +581,7 @@ typedef enum { utLt, utLeq, utEq, utAlways } UpgradeType; static void upgradeDerivations(Globals & globals, const Strings & args, UpgradeType upgradeType) { - debug(format("upgrading derivations")); + debug("upgrading derivations"); /* Upgrade works as follows: we take all currently installed derivations, and for any derivation matching any selector, look @@ -768,7 +770,7 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs) if (globals.dryRun) return; globals.state->store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal); - debug(format("switching to new user environment")); + debug("switching to new user environment"); Path generation = createGeneration( ref(store2), globals.profile, @@ -960,7 +962,7 @@ static void queryJSON(Globals & globals, std::vector & elems, bool prin printError("derivation '%s' has invalid meta attribute '%s'", i.queryName(), j); metaObj[j] = nullptr; } else { - PathSet context; + NixStringContext context; metaObj[j] = printValueAsJSON(*globals.state, true, *v, noPos, context); } } @@ -1030,7 +1032,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) installedElems = queryInstalled(*globals.state, globals.profile); if (source == sAvailable || compareVersions) - loadDerivations(*globals.state, globals.instSource.nixExprPath, + loadDerivations(*globals.state, *globals.instSource.nixExprPath, globals.instSource.systemFilter, *globals.instSource.autoArgs, attrPath, availElems); @@ -1093,7 +1095,7 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) try { if (i.hasFailed()) continue; - //Activity act(*logger, lvlDebug, format("outputting query result '%1%'") % i.attrPath); + //Activity act(*logger, lvlDebug, "outputting query result '%1%'", i.attrPath); if (globals.prebuiltOnly && !validPaths.count(i.queryOutPath()) && @@ -1229,11 +1231,11 @@ static void opQuery(Globals & globals, Strings opFlags, Strings opArgs) xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nInt) { attrs2["type"] = "int"; - attrs2["value"] = (format("%1%") % v->integer).str(); + attrs2["value"] = fmt("%1%", v->integer); xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nFloat) { attrs2["type"] = "float"; - attrs2["value"] = (format("%1%") % v->fpoint).str(); + attrs2["value"] = fmt("%1%", v->fpoint); xml.writeEmptyElement("meta", attrs2); } else if (v->type() == nBool) { attrs2["type"] = "bool"; @@ -1337,11 +1339,11 @@ static void opListGenerations(Globals & globals, Strings opFlags, Strings opArgs for (auto & i : gens) { tm t; if (!localtime_r(&i.creationTime, &t)) throw Error("cannot convert time"); - cout << format("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||\n") - % i.number - % (t.tm_year + 1900) % (t.tm_mon + 1) % t.tm_mday - % t.tm_hour % t.tm_min % t.tm_sec - % (i.number == curGen ? "(current)" : ""); + logger->cout("%|4| %|4|-%|02|-%|02| %|02|:%|02|:%|02| %||", + i.number, + t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, + t.tm_hour, t.tm_min, t.tm_sec, + i.number == curGen ? "(current)" : ""); } } @@ -1387,28 +1389,27 @@ static int main_nix_env(int argc, char * * argv) { Strings opFlags, opArgs; Operation op = 0; - RepairFlag repair = NoRepair; + std::string opName; + bool showHelp = false; std::string file; Globals globals; globals.instSource.type = srcUnknown; - { - Path nixExprPath = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : getHome() + "/.nix-defexpr"; - globals.instSource.nixExprPath = nixExprPath; - } globals.instSource.systemFilter = "*"; - if (!pathExists(globals.instSource.nixExprPath)) { + Path nixExprPath = settings.useXDGBaseDirectories ? createNixStateDir() + "/defexpr" : getHome() + "/.nix-defexpr"; + + if (!pathExists(nixExprPath)) { try { - createDirs(globals.instSource.nixExprPath); + createDirs(nixExprPath); replaceSymlink( - fmt("%s/profiles/per-user/%s/channels", settings.nixStateDir, getUserName()), - globals.instSource.nixExprPath + "/channels"); + defaultChannelsDir(), + nixExprPath + "/channels"); if (getuid() != 0) replaceSymlink( - fmt("%s/profiles/per-user/root/channels", settings.nixStateDir), - globals.instSource.nixExprPath + "/channels_root"); + rootChannelsDir(), + nixExprPath + "/channels_root"); } catch (Error &) { } } @@ -1426,37 +1427,59 @@ static int main_nix_env(int argc, char * * argv) Operation oldOp = op; if (*arg == "--help") - showManPage("nix-env"); + showHelp = true; else if (*arg == "--version") op = opVersion; - else if (*arg == "--install" || *arg == "-i") + else if (*arg == "--install" || *arg == "-i") { op = opInstall; + opName = "-install"; + } else if (*arg == "--force-name") // undocumented flag for nix-install-package globals.forceName = getArg(*arg, arg, end); - else if (*arg == "--uninstall" || *arg == "-e") + else if (*arg == "--uninstall" || *arg == "-e") { op = opUninstall; - else if (*arg == "--upgrade" || *arg == "-u") + opName = "-uninstall"; + } + else if (*arg == "--upgrade" || *arg == "-u") { op = opUpgrade; - else if (*arg == "--set-flag") + opName = "-upgrade"; + } + else if (*arg == "--set-flag") { op = opSetFlag; - else if (*arg == "--set") + opName = arg->substr(1); + } + else if (*arg == "--set") { op = opSet; - else if (*arg == "--query" || *arg == "-q") + opName = arg->substr(1); + } + else if (*arg == "--query" || *arg == "-q") { op = opQuery; + opName = "-query"; + } else if (*arg == "--profile" || *arg == "-p") globals.profile = absPath(getArg(*arg, arg, end)); else if (*arg == "--file" || *arg == "-f") file = getArg(*arg, arg, end); - else if (*arg == "--switch-profile" || *arg == "-S") + else if (*arg == "--switch-profile" || *arg == "-S") { op = opSwitchProfile; - else if (*arg == "--switch-generation" || *arg == "-G") + opName = "-switch-profile"; + } + else if (*arg == "--switch-generation" || *arg == "-G") { op = opSwitchGeneration; - else if (*arg == "--rollback") + opName = "-switch-generation"; + } + else if (*arg == "--rollback") { op = opRollback; - else if (*arg == "--list-generations") + opName = arg->substr(1); + } + else if (*arg == "--list-generations") { op = opListGenerations; - else if (*arg == "--delete-generations") + opName = arg->substr(1); + } + else if (*arg == "--delete-generations") { op = opDeleteGenerations; + opName = arg->substr(1); + } else if (*arg == "--dry-run") { printInfo("(dry run; not doing anything)"); globals.dryRun = true; @@ -1465,8 +1488,6 @@ static int main_nix_env(int argc, char * * argv) globals.instSource.systemFilter = getArg(*arg, arg, end); else if (*arg == "--prebuilt-only" || *arg == "-b") globals.prebuiltOnly = true; - else if (*arg == "--repair") - repair = Repair; else if (*arg != "" && arg->at(0) == '-') { opFlags.push_back(*arg); /* FIXME: hacky */ @@ -1485,15 +1506,18 @@ static int main_nix_env(int argc, char * * argv) myArgs.parseCmdline(argvToStrings(argc, argv)); + if (showHelp) showManPage("nix-env" + opName); if (!op) throw UsageError("no operation specified"); auto store = openStore(); globals.state = std::shared_ptr(new EvalState(myArgs.searchPath, store)); - globals.state->repair = repair; + globals.state->repair = myArgs.repair; - if (file != "") - globals.instSource.nixExprPath = lookupFileArg(*globals.state, file); + globals.instSource.nixExprPath = std::make_shared( + file != "" + ? lookupFileArg(*globals.state, file) + : globals.state->rootPath(CanonPath(nixExprPath))); globals.instSource.autoArgs = myArgs.getAutoArgs(*globals.state); diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index cad7f9c88..9e916abc4 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -19,10 +19,10 @@ DrvInfos queryInstalled(EvalState & state, const Path & userEnv) DrvInfos elems; if (pathExists(userEnv + "/manifest.json")) throw Error("profile '%s' is incompatible with 'nix-env'; please use 'nix profile' instead", userEnv); - Path manifestFile = userEnv + "/manifest.nix"; + auto manifestFile = userEnv + "/manifest.nix"; if (pathExists(manifestFile)) { Value v; - state.evalFile(manifestFile, v); + state.evalFile(state.rootPath(CanonPath(manifestFile)), v); Bindings & bindings(*state.allocBindings(0)); getDerivations(state, v, "", bindings, elems, false); } @@ -41,7 +41,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, if (auto drvPath = i.queryDrvPath()) drvsToBuild.push_back({*drvPath}); - debug(format("building user environment dependencies")); + debug("building user environment dependencies"); state.store->buildPaths( toDerivedPaths(drvsToBuild), state.repair ? bmRepair : bmNormal); @@ -114,14 +114,12 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, Value envBuilder; state.eval(state.parseExprFromString( #include "buildenv.nix.gen.hh" - , "/"), envBuilder); + , state.rootPath(CanonPath::root)), envBuilder); /* Construct a Nix expression that calls the user environment builder with the manifest as argument. */ auto attrs = state.buildBindings(3); - attrs.alloc("manifest").mkString( - state.store->printStorePath(manifestFile), - {state.store->printStorePath(manifestFile)}); + state.mkStorePathString(manifestFile, attrs.alloc("manifest")); attrs.insert(state.symbols.create("derivations"), &manifest); Value args; args.mkAttrs(attrs); @@ -132,7 +130,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, /* Evaluate it. */ debug("evaluating user environment builder"); state.forceValue(topLevel, [&]() { return topLevel.determinePos(noPos); }); - PathSet context; + NixStringContext context; Attr & aDrvPath(*topLevel.attrs->find(state.sDrvPath)); auto topLevelDrv = state.coerceToStorePath(aDrvPath.pos, *aDrvPath.value, context, ""); Attr & aOutPath(*topLevel.attrs->find(state.sOutPath)); @@ -159,7 +157,7 @@ bool createUserEnv(EvalState & state, DrvInfos & elems, return false; } - debug(format("switching to new user environment")); + debug("switching to new user environment"); Path generation = createGeneration(ref(store2), profile, topLevelOut); switchLink(profile, generation); } diff --git a/src/nix-env/user-env.hh b/src/nix-env/user-env.hh index 10646f713..af45d2d85 100644 --- a/src/nix-env/user-env.hh +++ b/src/nix-env/user-env.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "get-drvs.hh" diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index 6b5ba595d..446b27e66 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -43,7 +43,7 @@ void processExpr(EvalState & state, const Strings & attrPaths, Value & v(*findAlongAttrPath(state, i, autoArgs, vRoot).first); state.forceValue(v, [&]() { return v.determinePos(noPos); }); - PathSet context; + NixStringContext context; if (evalOnly) { Value vRes; if (autoArgs.empty()) @@ -102,7 +102,6 @@ static int main_nix_instantiate(int argc, char * * argv) bool strict = false; Strings attrPaths; bool wantsReadWrite = false; - RepairFlag repair = NoRepair; struct MyArgs : LegacyArgs, MixEvalArgs { @@ -140,8 +139,6 @@ static int main_nix_instantiate(int argc, char * * argv) xmlOutputSourceLocation = false; else if (*arg == "--strict") strict = true; - else if (*arg == "--repair") - repair = Repair; else if (*arg == "--dry-run") settings.readOnlyMode = true; else if (*arg != "" && arg->at(0) == '-') @@ -160,7 +157,7 @@ static int main_nix_instantiate(int argc, char * * argv) auto evalStore = myArgs.evalStoreUrl ? openStore(*myArgs.evalStoreUrl) : store; auto state = std::make_unique(myArgs.searchPath, evalStore, store); - state->repair = repair; + state->repair = myArgs.repair; Bindings & autoArgs = *myArgs.getAutoArgs(*state); @@ -168,9 +165,11 @@ static int main_nix_instantiate(int argc, char * * argv) if (findFile) { for (auto & i : files) { - Path p = state->findFile(i); - if (p == "") throw Error("unable to find '%1%'", i); - std::cout << p << std::endl; + auto p = state->findFile(i); + if (auto fn = p.getPhysicalPath()) + std::cout << fn->abs() << std::endl; + else + throw Error("'%s' has no physical path", p); } return 0; } @@ -184,7 +183,7 @@ static int main_nix_instantiate(int argc, char * * argv) for (auto & i : files) { Expr * e = fromArgs - ? state->parseExprFromString(i, absPath(".")) + ? state->parseExprFromString(i, state->rootPath(CanonPath::fromCwd())) : state->parseExprFromFile(resolveExprPath(state->checkSourcePath(lookupFileArg(*state, i)))); processExpr(*state, attrPaths, parseOnly, strict, autoArgs, evalOnly, outputKind, xmlOutputSourceLocation, e); diff --git a/src/nix-store/dotgraph.hh b/src/nix-store/dotgraph.hh index 73b8d06b9..4fd944080 100644 --- a/src/nix-store/dotgraph.hh +++ b/src/nix-store/dotgraph.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" diff --git a/src/nix-store/graphml.cc b/src/nix-store/graphml.cc index 425d61e53..439557658 100644 --- a/src/nix-store/graphml.cc +++ b/src/nix-store/graphml.cc @@ -57,7 +57,7 @@ void printGraphML(ref store, StorePathSet && roots) << "\n" - << "" + << "" << "" << "" << "\n"; diff --git a/src/nix-store/graphml.hh b/src/nix-store/graphml.hh index 78be8a367..bd3a4a37c 100644 --- a/src/nix-store/graphml.hh +++ b/src/nix-store/graphml.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index 3bbefedbe..40f30eb63 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -72,11 +72,13 @@ static PathSet realisePath(StorePathWithOutputs path, bool build = true) Derivation drv = store->derivationFromPath(path.path); rootNr++; + /* FIXME: Encode this empty special case explicitly in the type. */ if (path.outputs.empty()) for (auto & i : drv.outputs) path.outputs.insert(i.first); PathSet outputs; for (auto & j : path.outputs) { + /* Match outputs of a store path with outputs of the derivation that produces it. */ DerivationOutputs::iterator i = drv.outputs.find(j); if (i == drv.outputs.end()) throw Error("derivation '%s' does not have an output named '%s'", @@ -141,6 +143,7 @@ static void opRealise(Strings opFlags, Strings opArgs) toDerivedPaths(paths), willBuild, willSubstitute, unknown, downloadSize, narSize); + /* Filter out unknown paths from `paths`. */ if (ignoreUnknown) { std::vector paths2; for (auto & i : paths) @@ -201,10 +204,10 @@ static void opAddFixed(Strings opFlags, Strings opArgs) /* Hack to support caching in `nix-prefetch-url'. */ static void opPrintFixedPath(Strings opFlags, Strings opArgs) { - auto recursive = FileIngestionMethod::Flat; + auto method = FileIngestionMethod::Flat; for (auto i : opFlags) - if (i == "--recursive") recursive = FileIngestionMethod::Recursive; + if (i == "--recursive") method = FileIngestionMethod::Recursive; else throw UsageError("unknown flag '%1%'", i); if (opArgs.size() != 3) @@ -215,7 +218,13 @@ static void opPrintFixedPath(Strings opFlags, Strings opArgs) std::string hash = *i++; std::string name = *i++; - cout << fmt("%s\n", store->printStorePath(store->makeFixedOutputPath(recursive, Hash::parseAny(hash, hashAlgo), name))); + cout << fmt("%s\n", store->printStorePath(store->makeFixedOutputPath(name, FixedOutputInfo { + .hash = { + .method = method, + .hash = Hash::parseAny(hash, hashAlgo), + }, + .references = {}, + }))); } @@ -274,17 +283,17 @@ static void printTree(const StorePath & path, static void opQuery(Strings opFlags, Strings opArgs) { enum QueryType - { qDefault, qOutputs, qRequisites, qReferences, qReferrers + { qOutputs, qRequisites, qReferences, qReferrers , qReferrersClosure, qDeriver, qBinding, qHash, qSize , qTree, qGraph, qGraphML, qResolve, qRoots }; - QueryType query = qDefault; + std::optional query; bool useOutput = false; bool includeOutputs = false; bool forceRealise = false; std::string bindingName; for (auto & i : opFlags) { - QueryType prev = query; + std::optional prev = query; if (i == "--outputs") query = qOutputs; else if (i == "--requisites" || i == "-R") query = qRequisites; else if (i == "--references") query = qReferences; @@ -309,15 +318,15 @@ static void opQuery(Strings opFlags, Strings opArgs) else if (i == "--force-realise" || i == "--force-realize" || i == "-f") forceRealise = true; else if (i == "--include-outputs") includeOutputs = true; else throw UsageError("unknown flag '%1%'", i); - if (prev != qDefault && prev != query) + if (prev && prev != query) throw UsageError("query type '%1%' conflicts with earlier flag", i); } - if (query == qDefault) query = qOutputs; + if (!query) query = qOutputs; RunPager pager; - switch (query) { + switch (*query) { case qOutputs: { for (auto & i : opArgs) { @@ -457,7 +466,7 @@ static void opPrintEnv(Strings opFlags, Strings opArgs) /* Print each environment variable in the derivation in a format * that can be sourced by the shell. */ for (auto & i : drv.env) - cout << format("export %1%; %1%=%2%\n") % i.first % shellEscape(i.second); + logger->cout("export %1%; %1%=%2%\n", i.first, shellEscape(i.second)); /* Also output the arguments. This doesn't preserve whitespace in arguments. */ @@ -932,7 +941,10 @@ static void opServe(Strings opFlags, Strings opArgs) if (GET_PROTOCOL_MINOR(clientVersion) >= 3) out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime; if (GET_PROTOCOL_MINOR(clientVersion) >= 6) { - worker_proto::write(*store, out, status.builtOutputs); + DrvOutputs builtOutputs; + for (auto & [output, realisation] : status.builtOutputs) + builtOutputs.insert_or_assign(realisation.id, realisation); + worker_proto::write(*store, out, builtOutputs); } break; @@ -961,7 +973,7 @@ static void opServe(Strings opFlags, Strings opArgs) info.references = worker_proto::read(*store, in, Phantom {}); in >> info.registrationTime >> info.narSize >> info.ultimate; info.sigs = readStrings(in); - info.ca = parseContentAddressOpt(readString(in)); + info.ca = ContentAddress::parseOpt(readString(in)); if (info.narSize == 0) throw Error("narInfo is too old and missing the narSize field"); @@ -1020,64 +1032,109 @@ static int main_nix_store(int argc, char * * argv) { Strings opFlags, opArgs; Operation op = 0; + bool readFromStdIn = false; + std::string opName; + bool showHelp = false; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { Operation oldOp = op; if (*arg == "--help") - showManPage("nix-store"); + showHelp = true; else if (*arg == "--version") op = opVersion; - else if (*arg == "--realise" || *arg == "--realize" || *arg == "-r") + else if (*arg == "--realise" || *arg == "--realize" || *arg == "-r") { op = opRealise; - else if (*arg == "--add" || *arg == "-A") + opName = "-realise"; + } + else if (*arg == "--add" || *arg == "-A"){ op = opAdd; - else if (*arg == "--add-fixed") + opName = "-add"; + } + else if (*arg == "--add-fixed") { op = opAddFixed; + opName = arg->substr(1); + } else if (*arg == "--print-fixed-path") op = opPrintFixedPath; - else if (*arg == "--delete") + else if (*arg == "--delete") { op = opDelete; - else if (*arg == "--query" || *arg == "-q") + opName = arg->substr(1); + } + else if (*arg == "--query" || *arg == "-q") { op = opQuery; - else if (*arg == "--print-env") + opName = "-query"; + } + else if (*arg == "--print-env") { op = opPrintEnv; - else if (*arg == "--read-log" || *arg == "-l") + opName = arg->substr(1); + } + else if (*arg == "--read-log" || *arg == "-l") { op = opReadLog; - else if (*arg == "--dump-db") + opName = "-read-log"; + } + else if (*arg == "--dump-db") { op = opDumpDB; - else if (*arg == "--load-db") + opName = arg->substr(1); + } + else if (*arg == "--load-db") { op = opLoadDB; + opName = arg->substr(1); + } else if (*arg == "--register-validity") op = opRegisterValidity; else if (*arg == "--check-validity") op = opCheckValidity; - else if (*arg == "--gc") + else if (*arg == "--gc") { op = opGC; - else if (*arg == "--dump") + opName = arg->substr(1); + } + else if (*arg == "--dump") { op = opDump; - else if (*arg == "--restore") + opName = arg->substr(1); + } + else if (*arg == "--restore") { op = opRestore; - else if (*arg == "--export") + opName = arg->substr(1); + } + else if (*arg == "--export") { op = opExport; - else if (*arg == "--import") + opName = arg->substr(1); + } + else if (*arg == "--import") { op = opImport; + opName = arg->substr(1); + } else if (*arg == "--init") op = opInit; - else if (*arg == "--verify") + else if (*arg == "--verify") { op = opVerify; - else if (*arg == "--verify-path") + opName = arg->substr(1); + } + else if (*arg == "--verify-path") { op = opVerifyPath; - else if (*arg == "--repair-path") + opName = arg->substr(1); + } + else if (*arg == "--repair-path") { op = opRepairPath; - else if (*arg == "--optimise" || *arg == "--optimize") + opName = arg->substr(1); + } + else if (*arg == "--optimise" || *arg == "--optimize") { op = opOptimise; - else if (*arg == "--serve") + opName = "-optimise"; + } + else if (*arg == "--serve") { op = opServe; - else if (*arg == "--generate-binary-cache-key") + opName = arg->substr(1); + } + else if (*arg == "--generate-binary-cache-key") { op = opGenerateBinaryCacheKey; + opName = arg->substr(1); + } else if (*arg == "--add-root") gcRoot = absPath(getArg(*arg, arg, end)); + else if (*arg == "--stdin" && !isatty(STDIN_FILENO)) + readFromStdIn = true; else if (*arg == "--indirect") ; else if (*arg == "--no-output") @@ -1090,12 +1147,20 @@ static int main_nix_store(int argc, char * * argv) else opArgs.push_back(*arg); + if (readFromStdIn && op != opImport && op != opRestore && op != opServe) { + std::string word; + while (std::cin >> word) { + opArgs.emplace_back(std::move(word)); + }; + } + if (oldOp && oldOp != op) throw UsageError("only one operation may be specified"); return true; }); + if (showHelp) showManPage("nix-store" + opName); if (!op) throw UsageError("no operation specified"); if (op != opDump && op != opRestore) /* !!! hack */ diff --git a/src/nix/add-to-store.cc b/src/nix/add-to-store.cc index 5168413d2..16e48a39b 100644 --- a/src/nix/add-to-store.cc +++ b/src/nix/add-to-store.cc @@ -42,14 +42,18 @@ struct CmdAddToStore : MixDryRun, StoreCommand } ValidPathInfo info { - store->makeFixedOutputPath(ingestionMethod, hash, *namePart), + *store, + std::move(*namePart), + FixedOutputInfo { + .hash = { + .method = std::move(ingestionMethod), + .hash = std::move(hash), + }, + .references = {}, + }, narHash, }; info.narSize = sink.s.size(); - info.ca = std::optional { FixedOutputHash { - .method = ingestionMethod, - .hash = hash, - } }; if (!dryRun) { auto source = StringSource(sink.s); diff --git a/src/nix/app.cc b/src/nix/app.cc index 5cd65136f..fd4569bb4 100644 --- a/src/nix/app.cc +++ b/src/nix/app.cc @@ -1,5 +1,6 @@ #include "installables.hh" #include "installable-derived-path.hh" +#include "installable-value.hh" #include "store-api.hh" #include "eval-inline.hh" #include "eval-cache.hh" @@ -40,7 +41,7 @@ std::string resolveString( return rewriteStrings(toResolve, rewrites); } -UnresolvedApp Installable::toApp(EvalState & state) +UnresolvedApp InstallableValue::toApp(EvalState & state) { auto cursor = getCursor(state); auto attrPath = cursor->getAttrPath(); @@ -119,11 +120,11 @@ App UnresolvedApp::resolve(ref evalStore, ref store) { auto res = unresolved; - std::vector> installableContext; + Installables installableContext; for (auto & ctxElt : unresolved.context) installableContext.push_back( - std::make_shared(store, DerivedPath { ctxElt })); + make_ref(store, DerivedPath { ctxElt })); auto builtContext = Installable::build(evalStore, store, Realise::Outputs, installableContext); res.program = resolveString(*store, unresolved.program, builtContext); diff --git a/src/nix/build.cc b/src/nix/build.cc index 12b22d999..ad1842a4e 100644 --- a/src/nix/build.cc +++ b/src/nix/build.cc @@ -1,4 +1,3 @@ -#include "eval.hh" #include "command.hh" #include "common-args.hh" #include "shared.hh" @@ -28,8 +27,10 @@ nlohmann::json builtPathsWithResultToJSON(const std::vector std::visit([&](const auto & t) { auto j = t.toJSON(store); if (b.result) { - j["startTime"] = b.result->startTime; - j["stopTime"] = b.result->stopTime; + if (b.result->startTime) + j["startTime"] = b.result->startTime; + if (b.result->stopTime) + j["stopTime"] = b.result->stopTime; if (b.result->cpuUser) j["cpuUser"] = ((double) b.result->cpuUser->count()) / 1000000; if (b.result->cpuSystem) @@ -41,6 +42,29 @@ nlohmann::json builtPathsWithResultToJSON(const std::vector return res; } +// TODO deduplicate with other code also setting such out links. +static void createOutLinks(const Path& outLink, const std::vector& buildables, LocalFSStore& store2) +{ + for (const auto & [_i, buildable] : enumerate(buildables)) { + auto i = _i; + std::visit(overloaded { + [&](const BuiltPath::Opaque & bo) { + std::string symlink = outLink; + if (i) symlink += fmt("-%d", i); + store2.addPermRoot(bo.path, absPath(symlink)); + }, + [&](const BuiltPath::Built & bfd) { + for (auto & output : bfd.outputs) { + std::string symlink = outLink; + if (i) symlink += fmt("-%d", i); + if (output.first != "out") symlink += fmt("-%s", output.first); + store2.addPermRoot(output.second, absPath(symlink)); + } + }, + }, buildable.path.raw()); + } +} + struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile { Path outLink = "result"; @@ -89,7 +113,7 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile ; } - void run(ref store) override + void run(ref store, Installables && installables) override { if (dryRun) { std::vector pathsToBuild; @@ -109,41 +133,25 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile auto buildables = Installable::build( getEvalStore(), store, Realise::Outputs, - installables, buildMode); + installables, + repair ? bmRepair : buildMode); if (json) logger->cout("%s", builtPathsWithResultToJSON(buildables, store).dump()); if (outLink != "") if (auto store2 = store.dynamic_pointer_cast()) - for (const auto & [_i, buildable] : enumerate(buildables)) { - auto i = _i; - std::visit(overloaded { - [&](const BuiltPath::Opaque & bo) { - std::string symlink = outLink; - if (i) symlink += fmt("-%d", i); - store2->addPermRoot(bo.path, absPath(symlink)); - }, - [&](const BuiltPath::Built & bfd) { - for (auto & output : bfd.outputs) { - std::string symlink = outLink; - if (i) symlink += fmt("-%d", i); - if (output.first != "out") symlink += fmt("-%s", output.first); - store2->addPermRoot(output.second, absPath(symlink)); - } - }, - }, buildable.path.raw()); - } + createOutLinks(outLink, buildables, *store2); if (printOutputPaths) { stopProgressBar(); for (auto & buildable : buildables) { std::visit(overloaded { [&](const BuiltPath::Opaque & bo) { - std::cout << store->printStorePath(bo.path) << std::endl; + logger->cout(store->printStorePath(bo.path)); }, [&](const BuiltPath::Built & bfd) { for (auto & output : bfd.outputs) { - std::cout << store->printStorePath(output.second) << std::endl; + logger->cout(store->printStorePath(output.second)); } }, }, buildable.path.raw()); diff --git a/src/nix/build.md b/src/nix/build.md index 6a79f308c..ee414dc86 100644 --- a/src/nix/build.md +++ b/src/nix/build.md @@ -82,7 +82,7 @@ R""( # Description -`nix build` builds the specified *installables*. Installables that +`nix build` builds the specified *installables*. [Installables](./nix.md#installables) that resolve to derivations are built (or substituted if possible). Store path installables are substituted. diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc index dcf9a6f2d..bcc00d490 100644 --- a/src/nix/bundle.cc +++ b/src/nix/bundle.cc @@ -1,14 +1,15 @@ -#include "command.hh" #include "installable-flake.hh" +#include "command-installable-value.hh" #include "common-args.hh" #include "shared.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "fs-accessor.hh" +#include "eval-inline.hh" using namespace nix; -struct CmdBundle : InstallableCommand +struct CmdBundle : InstallableValueCommand { std::string bundler = "github:NixOS/bundlers"; std::optional outLink; @@ -70,7 +71,7 @@ struct CmdBundle : InstallableCommand return res; } - void run(ref store) override + void run(ref store, ref installable) override { auto evalState = getEvalState(); @@ -97,7 +98,7 @@ struct CmdBundle : InstallableCommand if (!attr1) throw Error("the bundler '%s' does not produce a derivation", bundler.what()); - PathSet context2; + NixStringContext context2; auto drvPath = evalState->coerceToStorePath(attr1->pos, *attr1->value, context2, ""); auto attr2 = vRes->attrs->get(evalState->sOutPath); diff --git a/src/nix/bundle.md b/src/nix/bundle.md index a18161a3c..89458aaaa 100644 --- a/src/nix/bundle.md +++ b/src/nix/bundle.md @@ -29,7 +29,7 @@ R""( # Description -`nix bundle`, by default, packs the closure of the *installable* into a single +`nix bundle`, by default, packs the closure of the [*installable*](./nix.md#installables) into a single self-extracting executable. See the [`bundlers` homepage](https://github.com/NixOS/bundlers) for more details. diff --git a/src/nix/cat.cc b/src/nix/cat.cc index 6420a0f79..60aa66ce0 100644 --- a/src/nix/cat.cc +++ b/src/nix/cat.cc @@ -17,7 +17,7 @@ struct MixCat : virtual Args if (st.type != FSAccessor::Type::tRegular) throw Error("path '%1%' is not a regular file", path); - std::cout << accessor->readFile(path); + writeFull(STDOUT_FILENO, accessor->readFile(path)); } }; diff --git a/src/nix/copy.cc b/src/nix/copy.cc index 8730a9a5c..151d28277 100644 --- a/src/nix/copy.cc +++ b/src/nix/copy.cc @@ -10,8 +10,6 @@ struct CmdCopy : virtual CopyCommand, virtual BuiltPathsCommand SubstituteFlag substitute = NoSubstitute; - using BuiltPathsCommand::run; - CmdCopy() : BuiltPathsCommand(true) { diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc index a22bccba1..c1a91c63d 100644 --- a/src/nix/daemon.cc +++ b/src/nix/daemon.cc @@ -1,3 +1,5 @@ +///@file + #include "command.hh" #include "shared.hh" #include "local-store.hh" @@ -34,6 +36,19 @@ using namespace nix; using namespace nix::daemon; +/** + * Settings related to authenticating clients for the Nix daemon. + * + * For pipes we have little good information about the client side, but + * for Unix domain sockets we do. So currently these options implemented + * mandatory access control based on user names and group names (looked + * up and translated to UID/GIDs in the CLI process that runs the code + * in this file). + * + * No code outside of this file knows about these settings (this is not + * exposed in a header); all authentication and authorization happens in + * `daemon.cc`. + */ struct AuthorizationSettings : Config { Setting trustedUsers{ @@ -54,7 +69,9 @@ struct AuthorizationSettings : Config { > directories that are otherwise inacessible to them. )"}; - /* ?Who we trust to use the daemon in safe ways */ + /** + * Who we trust to use the daemon in safe ways + */ Setting allowedUsers{ this, {"*"}, "allowed-users", R"( @@ -112,8 +129,36 @@ static void setSigChldAction(bool autoReap) throw SysError("setting SIGCHLD handler"); } +/** + * @return Is the given user a member of this group? + * + * @param user User specified by username. + * + * @param group Group the user might be a member of. + */ +static bool matchUser(std::string_view user, const struct group & gr) +{ + for (char * * mem = gr.gr_mem; *mem; mem++) + if (user == std::string_view(*mem)) return true; + return false; +} -bool matchUser(const std::string & user, const std::string & group, const Strings & users) + +/** + * Does the given user (specified by user name and primary group name) + * match the given user/group whitelist? + * + * If the list allows all users: Yes. + * + * If the username is in the set: Yes. + * + * If the groupname is in the set: Yes. + * + * If the user is in another group which is in the set: yes. + * + * Otherwise: No. + */ +static bool matchUser(const std::string & user, const std::string & group, const Strings & users) { if (find(users.begin(), users.end(), "*") != users.end()) return true; @@ -126,8 +171,7 @@ bool matchUser(const std::string & user, const std::string & group, const String if (group == i.substr(1)) return true; struct group * gr = getgrnam(i.c_str() + 1); if (!gr) continue; - for (char * * mem = gr->gr_mem; *mem; mem++) - if (user == std::string(*mem)) return true; + if (matchUser(user, *gr)) return true; } return false; @@ -145,7 +189,9 @@ struct PeerInfo }; -// Get the identity of the caller, if possible. +/** + * Get the identity of the caller, if possible. + */ static PeerInfo getPeerInfo(int remote) { PeerInfo peer = { false, 0, false, 0, false, 0 }; @@ -179,6 +225,9 @@ static PeerInfo getPeerInfo(int remote) #define SD_LISTEN_FDS_START 3 +/** + * Open a store without a path info cache. + */ static ref openUncachedStore() { Store::Params params; // FIXME: get params from somewhere @@ -187,8 +236,49 @@ static ref openUncachedStore() return openStore(settings.storeUri, params); } +/** + * Authenticate a potential client + * + * @param peer Information about other end of the connection, the client which + * wants to communicate with us. + * + * @return A pair of a `TrustedFlag`, whether the potential client is trusted, + * and the name of the user (useful for printing messages). + * + * If the potential client is not allowed to talk to us, we throw an `Error`. + */ +static std::pair authPeer(const PeerInfo & peer) +{ + TrustedFlag trusted = NotTrusted; -static void daemonLoop() + struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0; + std::string user = pw ? pw->pw_name : std::to_string(peer.uid); + + struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0; + std::string group = gr ? gr->gr_name : std::to_string(peer.gid); + + const Strings & trustedUsers = authorizationSettings.trustedUsers; + const Strings & allowedUsers = authorizationSettings.allowedUsers; + + if (matchUser(user, group, trustedUsers)) + trusted = Trusted; + + if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup) + throw Error("user '%1%' is not allowed to connect to the Nix daemon", user); + + return { trusted, std::move(user) }; +} + + +/** + * Run a server. The loop opens a socket and accepts new connections from that + * socket. + * + * @param forceTrustClientOpt If present, force trusting or not trusted + * the client. Otherwise, decide based on the authentication settings + * and user credentials (from the unix domain socket). + */ +static void daemonLoop(std::optional forceTrustClientOpt) { if (chdir("/") == -1) throw SysError("cannot change current directory"); @@ -231,27 +321,22 @@ static void daemonLoop() closeOnExec(remote.get()); - TrustedFlag trusted = NotTrusted; - PeerInfo peer = getPeerInfo(remote.get()); + PeerInfo peer { .pidKnown = false }; + TrustedFlag trusted; + std::string user; - struct passwd * pw = peer.uidKnown ? getpwuid(peer.uid) : 0; - std::string user = pw ? pw->pw_name : std::to_string(peer.uid); + if (forceTrustClientOpt) + trusted = *forceTrustClientOpt; + else { + peer = getPeerInfo(remote.get()); + auto [_trusted, _user] = authPeer(peer); + trusted = _trusted; + user = _user; + }; - struct group * gr = peer.gidKnown ? getgrgid(peer.gid) : 0; - std::string group = gr ? gr->gr_name : std::to_string(peer.gid); - - Strings trustedUsers = authorizationSettings.trustedUsers; - Strings allowedUsers = authorizationSettings.allowedUsers; - - if (matchUser(user, group, trustedUsers)) - trusted = Trusted; - - if ((!trusted && !matchUser(user, group, allowedUsers)) || group == settings.buildUsersGroup) - throw Error("user '%1%' is not allowed to connect to the Nix daemon", user); - - printInfo(format((std::string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : "")) - % (peer.pidKnown ? std::to_string(peer.pid) : "") - % (peer.uidKnown ? user : "")); + printInfo((std::string) "accepted connection from pid %1%, user %2%" + (trusted ? " (trusted)" : ""), + peer.pidKnown ? std::to_string(peer.pid) : "", + peer.uidKnown ? user : ""); // Fork a child to handle the connection. ProcessOptions options; @@ -294,53 +379,91 @@ static void daemonLoop() } } -static void runDaemon(bool stdio) +/** + * Forward a standard IO connection to the given remote store. + * + * We just act as a middleman blindly ferry output between the standard + * input/output and the remote store connection, not processing anything. + * + * Loops until standard input disconnects, or an error is encountered. + */ +static void forwardStdioConnection(RemoteStore & store) { + auto conn = store.openConnectionWrapper(); + int from = conn->from.fd; + int to = conn->to.fd; + + auto nfds = std::max(from, STDIN_FILENO) + 1; + while (true) { + fd_set fds; + FD_ZERO(&fds); + FD_SET(from, &fds); + FD_SET(STDIN_FILENO, &fds); + if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1) + throw SysError("waiting for data from client or server"); + if (FD_ISSET(from, &fds)) { + auto res = splice(from, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE); + if (res == -1) + throw SysError("splicing data from daemon socket to stdout"); + else if (res == 0) + throw EndOfFile("unexpected EOF from daemon socket"); + } + if (FD_ISSET(STDIN_FILENO, &fds)) { + auto res = splice(STDIN_FILENO, nullptr, to, nullptr, SSIZE_MAX, SPLICE_F_MOVE); + if (res == -1) + throw SysError("splicing data from stdin to daemon socket"); + else if (res == 0) + return; + } + } +} + +/** + * Process a client connecting to us via standard input/output + * + * Unlike `forwardStdioConnection()` we do process commands ourselves in + * this case, not delegating to another daemon. + * + * @param trustClient Whether to trust the client. Forwarded directly to + * `processConnection()`. + */ +static void processStdioConnection(ref store, TrustedFlag trustClient) +{ + FdSource from(STDIN_FILENO); + FdSink to(STDOUT_FILENO); + processConnection(store, from, to, trustClient, NotRecursive); +} + +/** + * Entry point shared between the new CLI `nix daemon` and old CLI + * `nix-daemon`. + * + * @param forceTrustClientOpt See `daemonLoop()` and the parameter with + * the same name over there for details. + */ +static void runDaemon(bool stdio, std::optional forceTrustClientOpt) { if (stdio) { - if (auto store = openUncachedStore().dynamic_pointer_cast()) { - auto conn = store->openConnectionWrapper(); - int from = conn->from.fd; - int to = conn->to.fd; + auto store = openUncachedStore(); - auto nfds = std::max(from, STDIN_FILENO) + 1; - while (true) { - fd_set fds; - FD_ZERO(&fds); - FD_SET(from, &fds); - FD_SET(STDIN_FILENO, &fds); - if (select(nfds, &fds, nullptr, nullptr, nullptr) == -1) - throw SysError("waiting for data from client or server"); - if (FD_ISSET(from, &fds)) { - auto res = splice(from, nullptr, STDOUT_FILENO, nullptr, SSIZE_MAX, SPLICE_F_MOVE); - if (res == -1) - throw SysError("splicing data from daemon socket to stdout"); - else if (res == 0) - throw EndOfFile("unexpected EOF from daemon socket"); - } - if (FD_ISSET(STDIN_FILENO, &fds)) { - auto res = splice(STDIN_FILENO, nullptr, to, nullptr, SSIZE_MAX, SPLICE_F_MOVE); - if (res == -1) - throw SysError("splicing data from stdin to daemon socket"); - else if (res == 0) - return; - } - } - } else { - FdSource from(STDIN_FILENO); - FdSink to(STDOUT_FILENO); - /* Auth hook is empty because in this mode we blindly trust the - standard streams. Limiting access to those is explicitly - not `nix-daemon`'s responsibility. */ - processConnection(openUncachedStore(), from, to, Trusted, NotRecursive); - } + // If --force-untrusted is passed, we cannot forward the connection and + // must process it ourselves (before delegating to the next store) to + // force untrusting the client. + if (auto remoteStore = store.dynamic_pointer_cast(); remoteStore && (!forceTrustClientOpt || *forceTrustClientOpt != NotTrusted)) + forwardStdioConnection(*remoteStore); + else + // `Trusted` is passed in the auto (no override case) because we + // cannot see who is on the other side of a plain pipe. Limiting + // access to those is explicitly not `nix-daemon`'s responsibility. + processStdioConnection(store, forceTrustClientOpt.value_or(Trusted)); } else - daemonLoop(); + daemonLoop(forceTrustClientOpt); } static int main_nix_daemon(int argc, char * * argv) { { auto stdio = false; + std::optional isTrustedOpt = std::nullopt; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { if (*arg == "--daemon") @@ -351,11 +474,20 @@ static int main_nix_daemon(int argc, char * * argv) printVersion("nix-daemon"); else if (*arg == "--stdio") stdio = true; - else return false; + else if (*arg == "--force-trusted") { + experimentalFeatureSettings.require(Xp::DaemonTrustOverride); + isTrustedOpt = Trusted; + } else if (*arg == "--force-untrusted") { + experimentalFeatureSettings.require(Xp::DaemonTrustOverride); + isTrustedOpt = NotTrusted; + } else if (*arg == "--default-trust") { + experimentalFeatureSettings.require(Xp::DaemonTrustOverride); + isTrustedOpt = std::nullopt; + } else return false; return true; }); - runDaemon(stdio); + runDaemon(stdio, isTrustedOpt); return 0; } @@ -381,7 +513,7 @@ struct CmdDaemon : StoreCommand void run(ref store) override { - runDaemon(false); + runDaemon(false, std::nullopt); } }; diff --git a/src/nix/derivation-add.cc b/src/nix/derivation-add.cc new file mode 100644 index 000000000..4d91d4538 --- /dev/null +++ b/src/nix/derivation-add.cc @@ -0,0 +1,45 @@ +// FIXME: rename to 'nix plan add' or 'nix derivation add'? + +#include "command.hh" +#include "common-args.hh" +#include "store-api.hh" +#include "archive.hh" +#include "derivations.hh" +#include + +using namespace nix; +using json = nlohmann::json; + +struct CmdAddDerivation : MixDryRun, StoreCommand +{ + std::string description() override + { + return "Add a store derivation"; + } + + std::string doc() override + { + return + #include "derivation-add.md" + ; + } + + Category category() override { return catUtility; } + + void run(ref store) override + { + auto json = nlohmann::json::parse(drainFD(STDIN_FILENO)); + + auto drv = Derivation::fromJSON(*store, json); + + auto drvPath = writeDerivation(*store, drv, NoRepair, /* read only */ dryRun); + + drv.checkInvariants(*store, drvPath); + + writeDerivation(*store, drv, NoRepair, dryRun); + + logger->cout("%s", store->printStorePath(drvPath)); + } +}; + +static auto rCmdAddDerivation = registerCommand2({"derivation", "add"}); diff --git a/src/nix/derivation-add.md b/src/nix/derivation-add.md new file mode 100644 index 000000000..f116681ab --- /dev/null +++ b/src/nix/derivation-add.md @@ -0,0 +1,18 @@ +R""( + +# Description + +This command reads from standard input a JSON representation of a +[store derivation] to which an [*installable*](./nix.md#installables) evaluates. + +Store derivations are used internally by Nix. They are store paths with +extension `.drv` that represent the build-time dependency graph to which +a Nix expression evaluates. + +[store derivation]: ../../glossary.md#gloss-store-derivation + +The JSON format is documented under the [`derivation show`] command. + +[`derivation show`]: ./nix3-derivation-show.md + +)"" diff --git a/src/nix/show-derivation.cc b/src/nix/derivation-show.cc similarity index 80% rename from src/nix/show-derivation.cc rename to src/nix/derivation-show.cc index d1a516cad..bf637246d 100644 --- a/src/nix/show-derivation.cc +++ b/src/nix/derivation-show.cc @@ -1,5 +1,5 @@ // FIXME: integrate this with nix path-info? -// FIXME: rename to 'nix store show-derivation' or 'nix debug show-derivation'? +// FIXME: rename to 'nix store derivation show' or 'nix debug derivation show'? #include "command.hh" #include "common-args.hh" @@ -33,13 +33,13 @@ struct CmdShowDerivation : InstallablesCommand std::string doc() override { return - #include "show-derivation.md" + #include "derivation-show.md" ; } Category category() override { return catUtility; } - void run(ref store) override + void run(ref store, Installables && installables) override { auto drvPaths = Installable::toDerivations(store, installables, true); @@ -57,8 +57,8 @@ struct CmdShowDerivation : InstallablesCommand jsonRoot[store->printStorePath(drvPath)] = store->readDerivation(drvPath).toJSON(*store); } - std::cout << jsonRoot.dump(2) << std::endl; + logger->cout(jsonRoot.dump(2)); } }; -static auto rCmdShowDerivation = registerCommand("show-derivation"); +static auto rCmdShowDerivation = registerCommand2({"derivation", "show"}); diff --git a/src/nix/show-derivation.md b/src/nix/derivation-show.md similarity index 84% rename from src/nix/show-derivation.md rename to src/nix/derivation-show.md index 2cd93aa62..1296e2885 100644 --- a/src/nix/show-derivation.md +++ b/src/nix/derivation-show.md @@ -8,7 +8,7 @@ R""( [store derivation]: ../../glossary.md#gloss-store-derivation ```console - # nix show-derivation nixpkgs#hello + # nix derivation show nixpkgs#hello { "/nix/store/s6rn4jz1sin56rf4qj5b5v8jxjm32hlk-hello-2.10.drv": { … @@ -20,14 +20,14 @@ R""( NixOS system: ```console - # nix show-derivation -r /run/current-system + # nix derivation show -r /run/current-system ``` * Print all files fetched using `fetchurl` by Firefox's dependency graph: ```console - # nix show-derivation -r nixpkgs#firefox \ + # nix derivation show -r nixpkgs#firefox \ | jq -r '.[] | select(.outputs.out.hash and .env.urls) | .env.urls' \ | uniq | sort ``` @@ -39,10 +39,11 @@ R""( # Description This command prints on standard output a JSON representation of the -[store derivation]s to which *installables* evaluate. Store derivations -are used internally by Nix. They are store paths with extension `.drv` -that represent the build-time dependency graph to which a Nix -expression evaluates. +[store derivation]s to which [*installables*](./nix.md#installables) evaluate. + +Store derivations are used internally by Nix. They are store paths with +extension `.drv` that represent the build-time dependency graph to which +a Nix expression evaluates. By default, this command only shows top-level derivations, but with `--recursive`, it also shows their dependencies. @@ -51,6 +52,9 @@ The JSON output is a JSON object whose keys are the store paths of the derivations, and whose values are a JSON object with the following fields: +* `name`: The name of the derivation. This is used when calculating the + store paths of the derivation's outputs. + * `outputs`: Information about the output paths of the derivation. This is a JSON object with one member per output, where the key is the output name and the value is a JSON object with these diff --git a/src/nix/derivation.cc b/src/nix/derivation.cc new file mode 100644 index 000000000..cd3975a4f --- /dev/null +++ b/src/nix/derivation.cc @@ -0,0 +1,25 @@ +#include "command.hh" + +using namespace nix; + +struct CmdDerivation : virtual NixMultiCommand +{ + CmdDerivation() : MultiCommand(RegisterCommand::getCommandsFor({"derivation"})) + { } + + std::string description() override + { + return "Work with derivations, Nix's notion of a build plan."; + } + + Category category() override { return catUtility; } + + void run() override + { + if (!command) + throw UsageError("'nix derivation' requires a sub-command."); + command->second->run(); + } +}; + +static auto rCmdDerivation = registerCommand("derivation"); diff --git a/src/nix/describe-stores.cc b/src/nix/describe-stores.cc deleted file mode 100644 index 1dd384c0e..000000000 --- a/src/nix/describe-stores.cc +++ /dev/null @@ -1,44 +0,0 @@ -#include "command.hh" -#include "common-args.hh" -#include "shared.hh" -#include "store-api.hh" - -#include - -using namespace nix; - -struct CmdDescribeStores : Command, MixJSON -{ - std::string description() override - { - return "show registered store types and their available options"; - } - - Category category() override { return catUtility; } - - void run() override - { - auto res = nlohmann::json::object(); - for (auto & implem : *Implementations::registered) { - auto storeConfig = implem.getConfig(); - auto storeName = storeConfig->name(); - res[storeName] = storeConfig->toJSON(); - } - if (json) { - std::cout << res; - } else { - for (auto & [storeName, storeConfig] : res.items()) { - std::cout << "## " << storeName << std::endl << std::endl; - for (auto & [optionName, optionDesc] : storeConfig.items()) { - std::cout << "### " << optionName << std::endl << std::endl; - std::cout << optionDesc["description"].get() << std::endl; - std::cout << "default: " << optionDesc["defaultValue"] << std::endl <("describe-stores"); diff --git a/src/nix/develop.cc b/src/nix/develop.cc index 9d07a7a85..9e2dcff61 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -1,6 +1,6 @@ #include "eval.hh" -#include "command.hh" #include "installable-flake.hh" +#include "command-installable-value.hh" #include "common-args.hh" #include "shared.hh" #include "store-api.hh" @@ -208,7 +208,7 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore drv.name += "-env"; drv.env.emplace("name", drv.name); drv.inputSrcs.insert(std::move(getEnvShPath)); - if (settings.isExperimentalFeatureEnabled(Xp::CaDerivations)) { + if (experimentalFeatureSettings.isEnabled(Xp::CaDerivations)) { for (auto & output : drv.outputs) { output.second = DerivationOutput::Deferred {}, drv.env[output.first] = hashPlaceholder(output.first); @@ -252,7 +252,7 @@ static StorePath getDerivationEnvironment(ref store, ref evalStore throw Error("get-env.sh failed to produce an environment"); } -struct Common : InstallableCommand, MixProfile +struct Common : InstallableValueCommand, MixProfile { std::set ignoreVars{ "BASHOPTS", @@ -313,7 +313,7 @@ struct Common : InstallableCommand, MixProfile buildEnvironment.toBash(out, ignoreVars); for (auto & var : savedVars) - out << fmt("%s=\"$%s:$nix_saved_%s\"\n", var, var, var); + out << fmt("%s=\"$%s${nix_saved_%s:+:$nix_saved_%s}\"\n", var, var, var, var); out << "export NIX_BUILD_TOP=\"$(mktemp -d -t nix-shell.XXXXXX)\"\n"; for (auto & i : {"TMP", "TMPDIR", "TEMP", "TEMPDIR"}) @@ -374,7 +374,7 @@ struct Common : InstallableCommand, MixProfile return res; } - StorePath getShellOutPath(ref store) + StorePath getShellOutPath(ref store, ref installable) { auto path = installable->getStorePath(); if (path && hasSuffix(path->to_string(), "-env")) @@ -392,9 +392,10 @@ struct Common : InstallableCommand, MixProfile } } - std::pair getBuildEnvironment(ref store) + std::pair + getBuildEnvironment(ref store, ref installable) { - auto shellOutPath = getShellOutPath(store); + auto shellOutPath = getShellOutPath(store, installable); auto strPath = store->printStorePath(shellOutPath); @@ -480,9 +481,9 @@ struct CmdDevelop : Common, MixEnvironment ; } - void run(ref store) override + void run(ref store, ref installable) override { - auto [buildEnvironment, gcroot] = getBuildEnvironment(store); + auto [buildEnvironment, gcroot] = getBuildEnvironment(store, installable); auto [rcFileFd, rcFilePath] = createTempFile("nix-shell"); @@ -537,7 +538,7 @@ struct CmdDevelop : Common, MixEnvironment nixpkgsLockFlags.inputOverrides = {}; nixpkgsLockFlags.inputUpdates = {}; - auto bashInstallable = std::make_shared( + auto bashInstallable = make_ref( this, state, installable->nixpkgsFlakeRef(), @@ -573,7 +574,7 @@ struct CmdDevelop : Common, MixEnvironment // Need to chdir since phases assume in flake directory if (phase) { // chdir if installable is a flake of type git+file or path - auto installableFlake = std::dynamic_pointer_cast(installable); + auto installableFlake = installable.dynamic_pointer_cast(); if (installableFlake) { auto sourcePath = installableFlake->getLockedFlake()->flake.resolvedRef.input.getSourcePath(); if (sourcePath) { @@ -604,9 +605,9 @@ struct CmdPrintDevEnv : Common, MixJSON Category category() override { return catUtility; } - void run(ref store) override + void run(ref store, ref installable) override { - auto buildEnvironment = getBuildEnvironment(store).first; + auto buildEnvironment = getBuildEnvironment(store, installable).first; stopProgressBar(); diff --git a/src/nix/develop.md b/src/nix/develop.md index 4e8542d1b..c49b39669 100644 --- a/src/nix/develop.md +++ b/src/nix/develop.md @@ -76,7 +76,7 @@ R""( `nix develop` starts a `bash` shell that provides an interactive build environment nearly identical to what Nix would use to build -*installable*. Inside this shell, environment variables and shell +[*installable*](./nix.md#installables). Inside this shell, environment variables and shell functions are set up so that you can interactively and incrementally build your package. diff --git a/src/nix/diff-closures.cc b/src/nix/diff-closures.cc index 3489cc132..c7c37b66f 100644 --- a/src/nix/diff-closures.cc +++ b/src/nix/diff-closures.cc @@ -97,7 +97,7 @@ void printClosureDiff( items.push_back(fmt("%s → %s", showVersions(removed), showVersions(added))); if (showDelta) items.push_back(fmt("%s%+.1f KiB" ANSI_NORMAL, sizeDelta > 0 ? ANSI_RED : ANSI_GREEN, sizeDelta / 1024.0)); - std::cout << fmt("%s%s: %s\n", indent, name, concatStringsSep(", ", items)); + logger->cout("%s%s: %s", indent, name, concatStringsSep(", ", items)); } } } diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc index ea87e3d87..1aa6831d3 100644 --- a/src/nix/doctor.cc +++ b/src/nix/doctor.cc @@ -18,7 +18,7 @@ std::string formatProtocol(unsigned int proto) if (proto) { auto major = GET_PROTOCOL_MAJOR(proto) >> 8; auto minor = GET_PROTOCOL_MINOR(proto); - return (format("%1%.%2%") % major % minor).str(); + return fmt("%1%.%2%", major, minor); } return "unknown"; } @@ -33,12 +33,24 @@ bool checkFail(const std::string & msg) { return false; } +void checkInfo(const std::string & msg) { + notice(ANSI_BLUE "[INFO] " ANSI_NORMAL + msg); +} + } struct CmdDoctor : StoreCommand { bool success = true; + /** + * This command is stable before the others + */ + std::optional experimentalFeature() override + { + return std::nullopt; + } + std::string description() override { return "check your system for potential problems and print a PASS or FAIL for each check"; @@ -55,6 +67,7 @@ struct CmdDoctor : StoreCommand success &= checkProfileRoots(store); } success &= checkStoreProtocol(store->getProtocol()); + checkTrustedUser(store); if (!success) throw Exit(2); @@ -130,6 +143,14 @@ struct CmdDoctor : StoreCommand return checkPass("Client protocol matches store protocol."); } + + void checkTrustedUser(ref store) + { + std::string_view trusted = store->isTrustedClient() + ? "trusted" + : "not trusted"; + checkInfo(fmt("You are %s by store uri: %s", trusted, store->getUri())); + } }; static auto rCmdDoctor = registerCommand("doctor"); diff --git a/src/nix/edit.cc b/src/nix/edit.cc index dfe75fbdf..66629fab0 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -1,4 +1,4 @@ -#include "command.hh" +#include "command-installable-value.hh" #include "shared.hh" #include "eval.hh" #include "attr-path.hh" @@ -9,7 +9,7 @@ using namespace nix; -struct CmdEdit : InstallableCommand +struct CmdEdit : InstallableValueCommand { std::string description() override { @@ -25,7 +25,7 @@ struct CmdEdit : InstallableCommand Category category() override { return catSecondary; } - void run(ref store) override + void run(ref store, ref installable) override { auto state = getEvalState(); diff --git a/src/nix/eval.cc b/src/nix/eval.cc index a579213fd..d880bef0a 100644 --- a/src/nix/eval.cc +++ b/src/nix/eval.cc @@ -1,4 +1,4 @@ -#include "command.hh" +#include "command-installable-value.hh" #include "common-args.hh" #include "shared.hh" #include "store-api.hh" @@ -11,13 +11,13 @@ using namespace nix; -struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption +struct CmdEval : MixJSON, InstallableValueCommand, MixReadOnlyOption { bool raw = false; std::optional apply; std::optional writeTo; - CmdEval() : InstallableCommand() + CmdEval() : InstallableValueCommand() { addFlag({ .longName = "raw", @@ -54,7 +54,7 @@ struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption Category category() override { return catSecondary; } - void run(ref store) override + void run(ref store, ref installable) override { if (raw && json) throw UsageError("--raw and --json are mutually exclusive"); @@ -62,11 +62,11 @@ struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption auto state = getEvalState(); auto [v, pos] = installable->toValue(*state); - PathSet context; + NixStringContext context; if (apply) { auto vApply = state->allocValue(); - state->eval(state->parseExprFromString(*apply, absPath(".")), *vApply); + state->eval(state->parseExprFromString(*apply, state->rootPath(CanonPath::fromCwd())), *vApply); auto vRes = state->allocValue(); state->callFunction(*vApply, *v, *vRes, noPos); v = vRes; @@ -112,11 +112,11 @@ struct CmdEval : MixJSON, InstallableCommand, MixReadOnlyOption else if (raw) { stopProgressBar(); - std::cout << *state->coerceToString(noPos, *v, context, "while generating the eval command output"); + writeFull(STDOUT_FILENO, *state->coerceToString(noPos, *v, context, "while generating the eval command output")); } else if (json) { - std::cout << printValueAsJSON(*state, true, *v, pos, context, false).dump() << std::endl; + logger->cout("%s", printValueAsJSON(*state, true, *v, pos, context, false)); } else { diff --git a/src/nix/eval.md b/src/nix/eval.md index 61334cde1..3b510737a 100644 --- a/src/nix/eval.md +++ b/src/nix/eval.md @@ -50,7 +50,7 @@ R""( # Description -This command evaluates the Nix expression *installable* and prints the +This command evaluates the given Nix expression and prints the result on standard output. # Output format diff --git a/src/nix/flake.cc b/src/nix/flake.cc index 053a9c9e1..3db655aeb 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -438,10 +438,10 @@ struct CmdFlakeCheck : FlakeCommand if (auto attr = v.attrs->get(state->symbols.create("path"))) { if (attr->name == state->symbols.create("path")) { - PathSet context; + NixStringContext context; auto path = state->coerceToPath(attr->pos, *attr->value, context, ""); - if (!store->isInStore(path)) - throw Error("template '%s' has a bad 'path' attribute"); + if (!path.pathExists()) + throw Error("template '%s' refers to a non-existent path '%s'", attrPath, path); // TODO: recursively check the flake in 'path'. } } else @@ -952,7 +952,7 @@ struct CmdFlakeArchive : FlakeCommand, MixJSON, MixDryRun {"path", store->printStorePath(flake.flake.sourceInfo->storePath)}, {"inputs", traverse(*flake.lockFile.root)}, }; - std::cout << jsonRoot.dump() << std::endl; + logger->cout("%s", jsonRoot); } else { traverse(*flake.lockFile.root); } @@ -1026,36 +1026,43 @@ struct CmdFlakeShow : FlakeCommand, MixJSON auto visitor2 = visitor.getAttr(attrName); - if ((attrPathS[0] == "apps" - || attrPathS[0] == "checks" - || attrPathS[0] == "devShells" - || attrPathS[0] == "legacyPackages" - || attrPathS[0] == "packages") - && (attrPathS.size() == 1 || attrPathS.size() == 2)) { - for (const auto &subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; + try { + if ((attrPathS[0] == "apps" + || attrPathS[0] == "checks" + || attrPathS[0] == "devShells" + || attrPathS[0] == "legacyPackages" + || attrPathS[0] == "packages") + && (attrPathS.size() == 1 || attrPathS.size() == 2)) { + for (const auto &subAttr : visitor2->getAttrs()) { + if (hasContent(*visitor2, attrPath2, subAttr)) { + return true; + } } + return false; } - return false; - } - if ((attrPathS.size() == 1) - && (attrPathS[0] == "formatter" - || attrPathS[0] == "nixosConfigurations" - || attrPathS[0] == "nixosModules" - || attrPathS[0] == "overlays" - )) { - for (const auto &subAttr : visitor2->getAttrs()) { - if (hasContent(*visitor2, attrPath2, subAttr)) { - return true; + if ((attrPathS.size() == 1) + && (attrPathS[0] == "formatter" + || attrPathS[0] == "nixosConfigurations" + || attrPathS[0] == "nixosModules" + || attrPathS[0] == "overlays" + )) { + for (const auto &subAttr : visitor2->getAttrs()) { + if (hasContent(*visitor2, attrPath2, subAttr)) { + return true; + } } + return false; } - return false; - } - // If we don't recognize it, it's probably content - return true; + // If we don't recognize it, it's probably content + return true; + } catch (EvalError & e) { + // Some attrs may contain errors, eg. legacyPackages of + // nixpkgs. We still want to recurse into it, instead of + // skipping it at all. + return true; + } }; std::functionsecond->prepare(); + experimentalFeatureSettings.require(Xp::Flakes); command->second->run(); } }; diff --git a/src/nix/flake.md b/src/nix/flake.md index 810e9ebea..456fd0ea1 100644 --- a/src/nix/flake.md +++ b/src/nix/flake.md @@ -54,7 +54,7 @@ output attribute). They are also allowed in the `inputs` attribute of a flake, e.g. ```nix -inputs.nixpkgs.url = github:NixOS/nixpkgs; +inputs.nixpkgs.url = "github:NixOS/nixpkgs"; ``` is equivalent to @@ -221,11 +221,46 @@ Currently the `type` attribute can be one of the following: commit hash (`rev`). Note that unlike Git, GitHub allows fetching by commit hash without specifying a branch or tag. + You can also specify `host` as a parameter, to point to a custom GitHub + Enterprise server. + Some examples: * `github:edolstra/dwarffs` * `github:edolstra/dwarffs/unstable` * `github:edolstra/dwarffs/d3f2baba8f425779026c6ec04021b2e927f61e31` + * `github:internal/project?host=company-github.example.org` + +* `gitlab`: Similar to `github`, is a more efficient way to fetch + GitLab repositories. The following attributes are required: + + * `owner`: The owner of the repository. + + * `repo`: The name of the repository. + + Like `github`, these are downloaded as tarball archives. + + The URL syntax for `gitlab` flakes is: + + `gitlab:/(/)?(\?)?` + + `` works the same as `github`. Either a branch or tag name + (`ref`), or a commit hash (`rev`) can be specified. + + Since GitLab allows for self-hosting, you can specify `host` as + a parameter, to point to any instances other than `gitlab.com`. + + Some examples: + + * `gitlab:veloren/veloren` + * `gitlab:veloren/veloren/master` + * `gitlab:veloren/veloren/80a4d7f13492d916e47d6195be23acae8001985a` + * `gitlab:openldap/openldap?host=git.openldap.org` + + When accessing a project in a (nested) subgroup, make sure to URL-encode any + slashes, i.e. replace `/` with `%2F`: + + * `gitlab:veloren%2Fdev/rfcs` * `sourcehut`: Similar to `github`, is a more efficient way to fetch SourceHut repositories. The following attributes are required: @@ -275,14 +310,14 @@ Currently the `type` attribute can be one of the following: # Flake format As an example, here is a simple `flake.nix` that depends on the -Nixpkgs flake and provides a single package (i.e. an installable -derivation): +Nixpkgs flake and provides a single package (i.e. an +[installable](./nix.md#installables) derivation): ```nix { description = "A flake for building Hello World"; - inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-20.03; + inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-20.03"; outputs = { self, nixpkgs }: { @@ -317,6 +352,8 @@ The following attributes are supported in `flake.nix`: also contains some metadata about the inputs. These are: * `outPath`: The path in the Nix store of the flake's source tree. + This way, the attribute set can be passed to `import` as if it was a path, + as in the example above (`import nixpkgs`). * `rev`: The commit hash of the flake's repository, if applicable. @@ -344,10 +381,12 @@ The following attributes are supported in `flake.nix`: * `nixConfig`: a set of `nix.conf` options to be set when evaluating any part of a flake. In the interests of security, only a small set of - whitelisted options (currently `bash-prompt`, `bash-prompt-prefix`, - `bash-prompt-suffix`, and `flake-registry`) are allowed to be set without - confirmation so long as `accept-flake-config` is not set in the global - configuration. + set of options is allowed to be set without confirmation so long as [`accept-flake-config`](@docroot@/command-ref/conf-file.md#conf-accept-flake-config) is not enabled in the global configuration: + - [`bash-prompt`](@docroot@/command-ref/conf-file.md#conf-bash-prompt) + - [`bash-prompt-prefix`](@docroot@/command-ref/conf-file.md#conf-bash-prompt-prefix) + - [`bash-prompt-suffix`](@docroot@/command-ref/conf-file.md#conf-bash-prompt-suffix) + - [`flake-registry`](@docroot@/command-ref/conf-file.md#conf-flake-registry) + - [`commit-lockfile-summary`](@docroot@/command-ref/conf-file.md#conf-commit-lockfile-summary) ## Flake inputs @@ -374,7 +413,7 @@ inputs.nixpkgs = { Alternatively, you can use the URL-like syntax: ```nix -inputs.import-cargo.url = github:edolstra/import-cargo; +inputs.import-cargo.url = "github:edolstra/import-cargo"; inputs.nixpkgs.url = "nixpkgs"; ``` diff --git a/src/nix/fmt.cc b/src/nix/fmt.cc index 6f6a4a632..c85eacded 100644 --- a/src/nix/fmt.cc +++ b/src/nix/fmt.cc @@ -1,4 +1,5 @@ #include "command.hh" +#include "installable-value.hh" #include "run.hh" using namespace nix; @@ -31,8 +32,9 @@ struct CmdFmt : SourceExprCommand { auto evalState = getEvalState(); auto evalStore = getEvalStore(); - auto installable = parseInstallable(store, "."); - auto app = installable->toApp(*evalState).resolve(evalStore, store); + auto installable_ = parseInstallable(store, "."); + auto & installable = InstallableValue::require(*installable_); + auto app = installable.toApp(*evalState).resolve(evalStore, store); Strings programArgs{app.program}; diff --git a/src/nix/hash.cc b/src/nix/hash.cc index 60d9593a7..9feca9345 100644 --- a/src/nix/hash.cc +++ b/src/nix/hash.cc @@ -151,7 +151,6 @@ struct CmdHash : NixMultiCommand { if (!command) throw UsageError("'nix hash' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; @@ -161,11 +160,11 @@ static auto rCmdHash = registerCommand("hash"); /* Legacy nix-hash command. */ static int compatNixHash(int argc, char * * argv) { - HashType ht = htMD5; + std::optional ht; bool flat = false; - bool base32 = false; + Base base = Base16; bool truncate = false; - enum { opHash, opTo32, opTo16 } op = opHash; + enum { opHash, opTo } op = opHash; std::vector ss; parseCmdLine(argc, argv, [&](Strings::iterator & arg, const Strings::iterator & end) { @@ -174,14 +173,31 @@ static int compatNixHash(int argc, char * * argv) else if (*arg == "--version") printVersion("nix-hash"); else if (*arg == "--flat") flat = true; - else if (*arg == "--base32") base32 = true; + else if (*arg == "--base16") base = Base16; + else if (*arg == "--base32") base = Base32; + else if (*arg == "--base64") base = Base64; + else if (*arg == "--sri") base = SRI; else if (*arg == "--truncate") truncate = true; else if (*arg == "--type") { std::string s = getArg(*arg, arg, end); ht = parseHashType(s); } - else if (*arg == "--to-base16") op = opTo16; - else if (*arg == "--to-base32") op = opTo32; + else if (*arg == "--to-base16") { + op = opTo; + base = Base16; + } + else if (*arg == "--to-base32") { + op = opTo; + base = Base32; + } + else if (*arg == "--to-base64") { + op = opTo; + base = Base64; + } + else if (*arg == "--to-sri") { + op = opTo; + base = SRI; + } else if (*arg != "" && arg->at(0) == '-') return false; else @@ -191,17 +207,18 @@ static int compatNixHash(int argc, char * * argv) if (op == opHash) { CmdHashBase cmd(flat ? FileIngestionMethod::Flat : FileIngestionMethod::Recursive); - cmd.ht = ht; - cmd.base = base32 ? Base32 : Base16; + if (!ht.has_value()) ht = htMD5; + cmd.ht = ht.value(); + cmd.base = base; cmd.truncate = truncate; cmd.paths = ss; cmd.run(); } else { - CmdToBase cmd(op == opTo32 ? Base32 : Base16); + CmdToBase cmd(base); cmd.args = ss; - cmd.ht = ht; + if (ht.has_value()) cmd.ht = ht; cmd.run(); } diff --git a/src/nix/help-stores.md b/src/nix/help-stores.md new file mode 100644 index 000000000..47ba9b94d --- /dev/null +++ b/src/nix/help-stores.md @@ -0,0 +1,46 @@ +R"( + +Nix supports different types of stores. These are described below. + +## Store URL format + +Stores are specified using a URL-like syntax. For example, the command + +```console +# nix path-info --store https://cache.nixos.org/ --json \ + /nix/store/a7gvj343m05j2s32xcnwr35v31ynlypr-coreutils-9.1 +``` + +fetches information about a store path in the HTTP binary cache +located at https://cache.nixos.org/, which is a type of store. + +Store URLs can specify **store settings** using URL query strings, +i.e. by appending `?name1=value1&name2=value2&...` to the URL. For +instance, + +``` +--store ssh://machine.example.org?ssh-key=/path/to/my/key +``` + +tells Nix to access the store on a remote machine via the SSH +protocol, using `/path/to/my/key` as the SSH private key. The +supported settings for each store type are documented below. + +The special store URL `auto` causes Nix to automatically select a +store as follows: + +* Use the [local store](#local-store) `/nix/store` if `/nix/var/nix` + is writable by the current user. + +* Otherwise, if `/nix/var/nix/daemon-socket/socket` exists, [connect + to the Nix daemon listening on that socket](#local-daemon-store). + +* Otherwise, on Linux only, use the [local chroot store](#local-store) + `~/.local/share/nix/root`, which will be created automatically if it + does not exist. + +* Otherwise, use the [local store](#local-store) `/nix/store`. + +@stores@ + +)" diff --git a/src/nix/home.cc b/src/nix/home.cc index ab97d255e..c3b847b1a 100644 --- a/src/nix/home.cc +++ b/src/nix/home.cc @@ -47,9 +47,9 @@ struct HomeActivationCommand : ActivationCommand : ActivationCommand(activationType, selfCommandName) { } - void run(nix::ref store) override + void runActivatable(nix::ref store, ref installable) override { - auto out = buildActivatable(store); + auto out = buildActivatable(store, installable); execute(store->printStorePath(out) + "/activate", Strings{}); } }; @@ -114,8 +114,6 @@ struct CmdHome : NixMultiCommand { if (!command) throw UsageError("'nix home' requires a sub-command."); - settings.requireExperimentalFeature(Xp::NixCommand); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/log.cc b/src/nix/log.cc index a0598ca13..aaf829764 100644 --- a/src/nix/log.cc +++ b/src/nix/log.cc @@ -23,7 +23,7 @@ struct CmdLog : InstallableCommand Category category() override { return catSecondary; } - void run(ref store) override + void run(ref store, ref installable) override { settings.readOnlyMode = true; @@ -53,7 +53,7 @@ struct CmdLog : InstallableCommand if (!log) continue; stopProgressBar(); printInfo("got build log for '%s' from '%s'", installable->what(), logSub.getUri()); - std::cout << *log; + writeFull(STDOUT_FILENO, *log); return; } diff --git a/src/nix/log.md b/src/nix/log.md index 1c76226a3..01e9801df 100644 --- a/src/nix/log.md +++ b/src/nix/log.md @@ -22,8 +22,7 @@ R""( # Description -This command prints the log of a previous build of the derivation -*installable* on standard output. +This command prints the log of a previous build of the [*installable*](./nix.md#installables) on standard output. Nix looks for build logs in two places: diff --git a/src/nix/ls.cc b/src/nix/ls.cc index e964b01b3..c990a303c 100644 --- a/src/nix/ls.cc +++ b/src/nix/ls.cc @@ -93,7 +93,7 @@ struct MixLs : virtual Args, MixJSON if (json) { if (showDirectory) throw UsageError("'--directory' is useless with '--json'"); - std::cout << listNar(accessor, path, recursive); + logger->cout("%s", listNar(accessor, path, recursive)); } else listText(accessor); } diff --git a/src/nix/main.cc b/src/nix/main.cc index d3d2f5b16..ce0bed2a3 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -54,17 +54,17 @@ static bool haveInternet() std::string programPath; -struct HelpRequested { }; - struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { bool useNet = true; bool refresh = false; + bool helpRequested = false; bool showVersion = false; NixArgs() : MultiCommand(RegisterCommand::getCommandsFor({})), MixCommonArgs("nix") { categories.clear(); + categories[catHelp] = "Help commands"; categories[Command::catDefault] = "Main commands"; categories[catSecondary] = "Infrequently used commands"; categories[catUtility] = "Utility/scripting commands"; @@ -74,7 +74,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .longName = "help", .description = "Show usage information.", .category = miscCategory, - .handler = {[&]() { throw HelpRequested(); }}, + .handler = {[this]() { this->helpRequested = true; }}, }); addFlag({ @@ -83,6 +83,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .description = "Print full build logs on standard error.", .category = loggingCategory, .handler = {[&]() { logger->setPrintBuildLogs(true); }}, + .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -98,6 +99,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .description = "Disable substituters and consider all previously downloaded files up-to-date.", .category = miscCategory, .handler = {[&]() { useNet = false; }}, + .experimentalFeature = Xp::NixCommand, }); addFlag({ @@ -105,6 +107,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs .description = "Consider all previously downloaded files out-of-date.", .category = miscCategory, .handler = {[&]() { refresh = true; }}, + .experimentalFeature = Xp::NixCommand, }); } @@ -124,6 +127,7 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs {"optimise-store", {"store", "optimise"}}, {"ping-store", {"store", "ping"}}, {"sign-paths", {"store", "sign"}}, + {"show-derivation", {"derivation", "show"}}, {"to-base16", {"hash", "to-base16"}}, {"to-base32", {"hash", "to-base32"}}, {"to-base64", {"hash", "to-base64"}}, @@ -164,11 +168,29 @@ struct NixArgs : virtual MultiCommand, virtual MixCommonArgs { commands = RegisterCommand::getCommandsFor({}); } + + std::string dumpCli() + { + auto res = nlohmann::json::object(); + + res["args"] = toJSON(); + + auto stores = nlohmann::json::object(); + for (auto & implem : *Implementations::registered) { + auto storeConfig = implem.getConfig(); + auto storeName = storeConfig->name(); + stores[storeName]["doc"] = storeConfig->doc(); + stores[storeName]["settings"] = storeConfig->toJSON(); + } + res["stores"] = std::move(stores); + + return res.dump(); + } }; /* Render the help for the specified subcommand to stdout using lowdown. */ -static void showHelp(std::vector subcommand, MultiCommand & toplevel) +static void showHelp(std::vector subcommand, NixArgs & toplevel) { auto mdName = subcommand.empty() ? "nix" : fmt("nix3-%s", concatStringsSep("-", subcommand)); @@ -179,21 +201,21 @@ static void showHelp(std::vector subcommand, MultiCommand & topleve auto vGenerateManpage = state.allocValue(); state.eval(state.parseExprFromString( #include "generate-manpage.nix.gen.hh" - , "/"), *vGenerateManpage); + , CanonPath::root), *vGenerateManpage); auto vUtils = state.allocValue(); state.cacheFile( - "/utils.nix", "/utils.nix", + CanonPath("/utils.nix"), CanonPath("/utils.nix"), state.parseExprFromString( #include "utils.nix.gen.hh" - , "/"), + , CanonPath::root), *vUtils); - auto attrs = state.buildBindings(16); - attrs.alloc("toplevel").mkString(toplevel.toJSON().dump()); + auto vDump = state.allocValue(); + vDump->mkString(toplevel.dumpCli()); auto vRes = state.allocValue(); - state.callFunction(*vGenerateManpage, state.allocValue()->mkAttrs(attrs), *vRes, noPos); + state.callFunction(*vGenerateManpage, *vDump, *vRes, noPos); auto attr = vRes->attrs->get(state.symbols.create(mdName + ".md")); if (!attr) @@ -205,6 +227,14 @@ static void showHelp(std::vector subcommand, MultiCommand & topleve std::cout << renderMarkdownToTerminal(markdown) << "\n"; } +static NixArgs & getNixArgs(Command & cmd) +{ + assert(cmd.parent); + MultiCommand * toplevel = cmd.parent; + while (toplevel->parent) toplevel = toplevel->parent; + return dynamic_cast(*toplevel); +} + struct CmdHelp : Command { std::vector subcommand; @@ -229,17 +259,43 @@ struct CmdHelp : Command ; } + Category category() override { return catHelp; } + void run() override { assert(parent); MultiCommand * toplevel = parent; while (toplevel->parent) toplevel = toplevel->parent; - showHelp(subcommand, *toplevel); + showHelp(subcommand, getNixArgs(*this)); } }; static auto rCmdHelp = registerCommand("help"); +struct CmdHelpStores : Command +{ + std::string description() override + { + return "show help about store types and their settings"; + } + + std::string doc() override + { + return + #include "help-stores.md" + ; + } + + Category category() override { return catHelp; } + + void run() override + { + showHelp({"help-stores"}, getNixArgs(*this)); + } +}; + +static auto rCmdHelpStores = registerCommand("help-stores"); + void mainWrapped(int argc, char * * argv) { savedArgv = argv; @@ -291,13 +347,16 @@ void mainWrapped(int argc, char * * argv) NixArgs args; - if (argc == 2 && std::string(argv[1]) == "__dump-args") { - std::cout << args.toJSON().dump() << "\n"; + if (argc == 2 && std::string(argv[1]) == "__dump-cli") { + logger->cout(args.dumpCli()); return; } if (argc == 2 && std::string(argv[1]) == "__dump-builtins") { - settings.experimentalFeatures = {Xp::Flakes, Xp::FetchClosure}; + experimentalFeatureSettings.experimentalFeatures = { + Xp::Flakes, + Xp::FetchClosure, + }; evalSettings.pureEval = false; EvalState state({}, openStore("dummy://")); auto res = nlohmann::json::object(); @@ -312,7 +371,12 @@ void mainWrapped(int argc, char * * argv) b["doc"] = trim(stripIndentation(primOp->doc)); res[state.symbols[builtin.name]] = std::move(b); } - std::cout << res.dump() << "\n"; + logger->cout("%s", res); + return; + } + + if (argc == 2 && std::string(argv[1]) == "__dump-xp-features") { + logger->cout(documentExperimentalFeatures().dump()); return; } @@ -321,20 +385,24 @@ void mainWrapped(int argc, char * * argv) if (completions) { switch (completionType) { case ctNormal: - std::cout << "normal\n"; break; + logger->cout("normal"); break; case ctFilenames: - std::cout << "filenames\n"; break; + logger->cout("filenames"); break; case ctAttrs: - std::cout << "attrs\n"; break; + logger->cout("attrs"); break; } for (auto & s : *completions) - std::cout << s.completion << "\t" << trim(s.description) << "\n"; + logger->cout(s.completion + "\t" + trim(s.description)); } }); try { args.parseCmdline(argvToStrings(argc, argv)); - } catch (HelpRequested &) { + } catch (UsageError &) { + if (!args.helpRequested && !completions) throw; + } + + if (args.helpRequested) { std::vector subcommand; MultiCommand * command = &args; while (command) { @@ -346,8 +414,6 @@ void mainWrapped(int argc, char * * argv) } showHelp(subcommand, args); return; - } catch (UsageError &) { - if (!completions) throw; } if (completions) { @@ -363,10 +429,8 @@ void mainWrapped(int argc, char * * argv) if (!args.command) throw UsageError("no subcommand specified"); - if (args.command->first != "repl" - && args.command->first != "doctor" - && args.command->first != "upgrade-nix") - settings.requireExperimentalFeature(Xp::NixCommand); + experimentalFeatureSettings.require( + args.command->second->experimentalFeature()); if (args.useNet && !haveInternet()) { warn("you don't have Internet access; disabling some network-dependent features"); @@ -394,7 +458,6 @@ void mainWrapped(int argc, char * * argv) if (args.command->second->forceImpureByDefault() && !evalSettings.pureEval.overridden) { evalSettings.pureEval = false; } - args.command->second->prepare(); args.command->second->run(); } diff --git a/src/nix/make-content-addressed.cc b/src/nix/make-content-addressed.cc index d86b90fc7..d9c988a9f 100644 --- a/src/nix/make-content-addressed.cc +++ b/src/nix/make-content-addressed.cc @@ -28,7 +28,6 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, ; } - using StorePathsCommand::run; void run(ref srcStore, StorePaths && storePaths) override { auto dstStore = dstUri.empty() ? openStore() : openStore(dstUri); @@ -45,7 +44,7 @@ struct CmdMakeContentAddressed : virtual CopyCommand, virtual StorePathsCommand, } auto json = json::object(); json["rewrites"] = jsonRewrites; - std::cout << json.dump(); + logger->cout("%s", json); } else { for (auto & path : storePaths) { auto i = remappings.find(path); diff --git a/src/nix/make-content-addressed.md b/src/nix/make-content-addressed.md index 32eecc880..b1f7da525 100644 --- a/src/nix/make-content-addressed.md +++ b/src/nix/make-content-addressed.md @@ -35,7 +35,9 @@ R""( # Description This command converts the closure of the store paths specified by -*installables* to content-addressed form. Nix store paths are usually +[*installables*](./nix.md#installables) to content-addressed form. + +Nix store paths are usually *input-addressed*, meaning that the hash part of the store path is computed from the contents of the derivation (i.e., the build-time dependency graph). Input-addressed paths need to be signed by a diff --git a/src/nix/nar.cc b/src/nix/nar.cc index dbb043d9b..9815410cf 100644 --- a/src/nix/nar.cc +++ b/src/nix/nar.cc @@ -25,7 +25,6 @@ struct CmdNar : NixMultiCommand { if (!command) throw UsageError("'nix nar' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/nix.md b/src/nix/nix.md index db60c59ff..1ef6c7fcd 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -48,102 +48,124 @@ manual](https://nixos.org/manual/nix/stable/). # Installables -Many `nix` subcommands operate on one or more *installables*. These are -command line arguments that represent something that can be built in -the Nix store. Here are the recognised types of installables: +> **Warning** \ +> Installables are part of the unstable +> [`nix-command` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-nix-command), +> and subject to change without notice. -* **Flake output attributes**: `nixpkgs#hello` +Many `nix` subcommands operate on one or more *installables*. +These are command line arguments that represent something that can be realised in the Nix store. - These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a - flake reference and *attrpath* is an optional attribute path. For - more information on flakes, see [the `nix flake` manual - page](./nix3-flake.md). Flake references are most commonly a flake - identifier in the flake registry (e.g. `nixpkgs`), or a raw path - (e.g. `/path/to/my-flake` or `.` or `../foo`), or a full URL - (e.g. `github:nixos/nixpkgs` or `path:.`) +The following types of installable are supported by most commands: - When the flake reference is a raw path (a path without any URL - scheme), it is interpreted as a `path:` or `git+file:` url in the following - way: - - - If the path is within a Git repository, then the url will be of the form - `git+file://[GIT_REPO_ROOT]?dir=[RELATIVE_FLAKE_DIR_PATH]` - where `GIT_REPO_ROOT` is the path to the root of the git repository, - and `RELATIVE_FLAKE_DIR_PATH` is the path (relative to the directory - root) of the closest parent of the given path that contains a `flake.nix` within - the git repository. - If no such directory exists, then Nix will error-out. - - Note that the search will only include files indexed by git. In particular, files - which are matched by `.gitignore` or have never been `git add`-ed will not be - available in the flake. If this is undesirable, specify `path:` explicitly; - - For example, if `/foo/bar` is a git repository with the following structure: - ``` - . - └── baz - ├── blah - │  └── file.txt - └── flake.nix - ``` +- [Flake output attribute](#flake-output-attribute) (experimental) +- [Store path](#store-path) +- [Nix file](#nix-file), optionally qualified by an attribute path +- [Nix expression](#nix-expression), optionally qualified by an attribute path + +For most commands, if no installable is specified, `.` as assumed. +That is, Nix will operate on the default flake output attribute of the flake in the current directory. + +### Flake output attribute + +> **Warning** \ +> Flake output attribute installables depend on both the +> [`flakes`](@docroot@/contributing/experimental-features.md#xp-feature-flakes) +> and +> [`nix-command`](@docroot@/contributing/experimental-features.md#xp-feature-nix-command) +> experimental features, and subject to change without notice. + +Example: `nixpkgs#hello` + +These have the form *flakeref*[`#`*attrpath*], where *flakeref* is a +[flake reference](./nix3-flake.md#flake-references) and *attrpath* is an optional attribute path. For +more information on flakes, see [the `nix flake` manual +page](./nix3-flake.md). Flake references are most commonly a flake +identifier in the flake registry (e.g. `nixpkgs`), or a raw path +(e.g. `/path/to/my-flake` or `.` or `../foo`), or a full URL +(e.g. `github:nixos/nixpkgs` or `path:.`) + +When the flake reference is a raw path (a path without any URL +scheme), it is interpreted as a `path:` or `git+file:` url in the following +way: + +- If the path is within a Git repository, then the url will be of the form + `git+file://[GIT_REPO_ROOT]?dir=[RELATIVE_FLAKE_DIR_PATH]` + where `GIT_REPO_ROOT` is the path to the root of the git repository, + and `RELATIVE_FLAKE_DIR_PATH` is the path (relative to the directory + root) of the closest parent of the given path that contains a `flake.nix` within + the git repository. + If no such directory exists, then Nix will error-out. + + Note that the search will only include files indexed by git. In particular, files + which are matched by `.gitignore` or have never been `git add`-ed will not be + available in the flake. If this is undesirable, specify `path:` explicitly; + + For example, if `/foo/bar` is a git repository with the following structure: + ``` + . + └── baz + ├── blah + │  └── file.txt + └── flake.nix + ``` Then `/foo/bar/baz/blah` will resolve to `git+file:///foo/bar?dir=baz` - - If the supplied path is not a git repository, then the url will have the form - `path:FLAKE_DIR_PATH` where `FLAKE_DIR_PATH` is the closest parent - of the supplied path that contains a `flake.nix` file (within the same file-system). - If no such directory exists, then Nix will error-out. - - For example, if `/foo/bar/flake.nix` exists, then `/foo/bar/baz/` will resolve to - `path:/foo/bar` +- If the supplied path is not a git repository, then the url will have the form + `path:FLAKE_DIR_PATH` where `FLAKE_DIR_PATH` is the closest parent + of the supplied path that contains a `flake.nix` file (within the same file-system). + If no such directory exists, then Nix will error-out. - If *attrpath* is omitted, Nix tries some default values; for most - subcommands, the default is `packages.`*system*`.default` - (e.g. `packages.x86_64-linux.default`), but some subcommands have - other defaults. If *attrpath* *is* specified, *attrpath* is - interpreted as relative to one or more prefixes; for most - subcommands, these are `packages.`*system*, - `legacyPackages.*system*` and the empty prefix. Thus, on - `x86_64-linux` `nix build nixpkgs#hello` will try to build the - attributes `packages.x86_64-linux.hello`, - `legacyPackages.x86_64-linux.hello` and `hello`. + For example, if `/foo/bar/flake.nix` exists, then `/foo/bar/baz/` will resolve to + `path:/foo/bar` -* **Store paths**: `/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10` +If *attrpath* is omitted, Nix tries some default values; for most +subcommands, the default is `packages.`*system*`.default` +(e.g. `packages.x86_64-linux.default`), but some subcommands have +other defaults. If *attrpath* *is* specified, *attrpath* is +interpreted as relative to one or more prefixes; for most +subcommands, these are `packages.`*system*, +`legacyPackages.*system*` and the empty prefix. Thus, on +`x86_64-linux` `nix build nixpkgs#hello` will try to build the +attributes `packages.x86_64-linux.hello`, +`legacyPackages.x86_64-linux.hello` and `hello`. - These are paths inside the Nix store, or symlinks that resolve to a - path in the Nix store. +### Store path -* **Store derivations**: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv` +Example: `/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10` - By default, if you pass a [store derivation] path to a `nix` subcommand, the command will operate on the [output path]s of the derivation. +These are paths inside the Nix store, or symlinks that resolve to a path in the Nix store. - [output path]: ../../glossary.md#gloss-output-path +A [store derivation] is also addressed by store path. - For example, `nix path-info` prints information about the output paths: +Example: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv` - ```console - # nix path-info --json /nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv - [{"path":"/nix/store/v5sv61sszx301i0x6xysaqzla09nksnd-hello-2.10",…}] - ``` +If you want to refer to an output path of that store derivation, add the output name preceded by a caret (`^`). - If you want to operate on the store derivation itself, pass the - `--derivation` flag. +Example: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv^out` -* **Nix attributes**: `--file /path/to/nixpkgs hello` +All outputs can be referred to at once with the special syntax `^*`. - When the `-f` / `--file` *path* option is given, installables are - interpreted as attribute paths referencing a value returned by - evaluating the Nix file *path*. +Example: `/nix/store/p7gp6lxdg32h4ka1q398wd9r2zkbbz2v-hello-2.10.drv^*` -* **Nix expressions**: `--expr '(import {}).hello.overrideDerivation (prev: { name = "my-hello"; })'`. +### Nix file - When the `--expr` option is given, all installables are interpreted - as Nix expressions. You may need to specify `--impure` if the - expression references impure inputs (such as ``). +Example: `--file /path/to/nixpkgs hello` -For most commands, if no installable is specified, the default is `.`, -i.e. Nix will operate on the default flake output attribute of the -flake in the current directory. +When the option `-f` / `--file` *path* \[*attrpath*...\] is given, installables are interpreted as the value of the expression in the Nix file at *path*. +If attribute paths are provided, commands will operate on the corresponding values accessible at these paths. +The Nix expression in that file, or any selected attribute, must evaluate to a derivation. + +### Nix expression + +Example: `--expr 'import {}' hello` + +When the option `--expr` *expression* \[*attrpath*...\] is given, installables are interpreted as the value of the of the Nix expression. +If attribute paths are provided, commands will operate on the corresponding values accessible at these paths. +The Nix expression, or any selected attribute, must evaluate to a derivation. + +You may need to specify `--impure` if the expression references impure inputs (such as ``). ## Derivation output selection @@ -210,8 +232,7 @@ operate are determined as follows: # Nix stores -Most `nix` subcommands operate on a *Nix store*. - -TODO: list store types, options +Most `nix` subcommands operate on a *Nix store*. These are documented +in [`nix help-stores`](./nix3-help-stores.md). )"" diff --git a/src/nix/path-info.md b/src/nix/path-info.md index b30898ac0..6ad23a02e 100644 --- a/src/nix/path-info.md +++ b/src/nix/path-info.md @@ -80,7 +80,7 @@ R""( # Description This command shows information about the store paths produced by -*installables*, or about all paths in the store if you pass `--all`. +[*installables*](./nix.md#installables), or about all paths in the store if you pass `--all`. By default, this command only prints the store paths. You can get additional information by passing flags such as `--closure-size`, diff --git a/src/nix/ping-store.cc b/src/nix/ping-store.cc index 5c44510ab..ec450e8e0 100644 --- a/src/nix/ping-store.cc +++ b/src/nix/ping-store.cc @@ -28,15 +28,20 @@ struct CmdPingStore : StoreCommand, MixJSON store->connect(); if (auto version = store->getVersion()) notice("Version: %s", *version); + if (auto trusted = store->isTrustedClient()) + notice("Trusted: %s", *trusted); } else { nlohmann::json res; Finally printRes([&]() { logger->cout("%s", res); }); + res["url"] = store->getUri(); store->connect(); if (auto version = store->getVersion()) res["version"] = *version; + if (auto trusted = store->isTrustedClient()) + res["trusted"] = *trusted; } } }; diff --git a/src/nix/prefetch.cc b/src/nix/prefetch.cc index fc3823406..3b2e225f6 100644 --- a/src/nix/prefetch.cc +++ b/src/nix/prefetch.cc @@ -27,7 +27,10 @@ std::string resolveMirrorUrl(EvalState & state, const std::string & url) Value vMirrors; // FIXME: use nixpkgs flake - state.eval(state.parseExprFromString("import ", "."), vMirrors); + state.eval(state.parseExprFromString( + "import ", + state.rootPath(CanonPath::root)), + vMirrors); state.forceAttrs(vMirrors, noPos, "while evaluating the set of all mirrors"); auto mirrorList = vMirrors.attrs->find(state.symbols.create(mirrorName)); @@ -67,7 +70,13 @@ std::tuple prefetchFile( the store. */ if (expectedHash) { hashType = expectedHash->type; - storePath = store->makeFixedOutputPath(ingestionMethod, *expectedHash, *name); + storePath = store->makeFixedOutputPath(*name, FixedOutputInfo { + .hash = { + .method = ingestionMethod, + .hash = *expectedHash, + }, + .references = {}, + }); if (store->isValidPath(*storePath)) hash = expectedHash; else @@ -118,7 +127,7 @@ std::tuple prefetchFile( auto info = store->addToStoreSlow(*name, tmpFile, ingestionMethod, hashType, expectedHash); storePath = info.path; assert(info.ca); - hash = getContentAddressHash(*info.ca); + hash = info.ca->getHash(); } return {storePath.value(), hash.value()}; @@ -192,9 +201,11 @@ static int main_nix_prefetch_url(int argc, char * * argv) throw UsageError("you must specify a URL"); url = args[0]; } else { - Path path = resolveExprPath(lookupFileArg(*state, args.empty() ? "." : args[0])); Value vRoot; - state->evalFile(path, vRoot); + state->evalFile( + resolveExprPath( + lookupFileArg(*state, args.empty() ? "." : args[0])), + vRoot); Value & v(*findAlongAttrPath(*state, attrPath, autoArgs, vRoot).first); state->forceAttrs(v, noPos, "while evaluating the source attribute to prefetch"); @@ -234,9 +245,9 @@ static int main_nix_prefetch_url(int argc, char * * argv) if (!printPath) printInfo("path is '%s'", store->printStorePath(storePath)); - std::cout << printHash16or32(hash) << std::endl; + logger->cout(printHash16or32(hash)); if (printPath) - std::cout << store->printStorePath(storePath) << std::endl; + logger->cout(store->printStorePath(storePath)); return 0; } diff --git a/src/nix/print-dev-env.md b/src/nix/print-dev-env.md index 2aad491de..a8ce9d36a 100644 --- a/src/nix/print-dev-env.md +++ b/src/nix/print-dev-env.md @@ -40,7 +40,7 @@ R""( This command prints a shell script that can be sourced by `bash` and that sets the variables and shell functions defined by the build -process of *installable*. This allows you to get a similar build +process of [*installable*](./nix.md#installables). This allows you to get a similar build environment in your current shell rather than in a subshell (as with `nix develop`). diff --git a/src/nix/profile-install.md b/src/nix/profile-install.md index aed414963..4c0f82c09 100644 --- a/src/nix/profile-install.md +++ b/src/nix/profile-install.md @@ -29,6 +29,6 @@ R""( # Description -This command adds *installables* to a Nix profile. +This command adds [*installables*](./nix.md#installables) to a Nix profile. )"" diff --git a/src/nix/profile.cc b/src/nix/profile.cc index f1783d98c..158a6fec9 100644 --- a/src/nix/profile.cc +++ b/src/nix/profile.cc @@ -200,12 +200,22 @@ struct ProfileManifest auto narHash = hashString(htSHA256, sink.s); ValidPathInfo info { - store->makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, "profile", references), + *store, + "profile", + FixedOutputInfo { + .hash = { + .method = FileIngestionMethod::Recursive, + .hash = narHash, + }, + .references = { + .others = std::move(references), + // profiles never refer to themselves + .self = false, + }, + }, narHash, }; - info.references = std::move(references); info.narSize = sink.s.size(); - info.ca = FixedOutputHash { .method = FileIngestionMethod::Recursive, .hash = info.narHash }; StringSource source(sink.s); store->addToStore(info, source); @@ -228,12 +238,12 @@ struct ProfileManifest while (i != prevElems.end() || j != curElems.end()) { if (j != curElems.end() && (i == prevElems.end() || i->describe() > j->describe())) { - std::cout << fmt("%s%s: ∅ -> %s\n", indent, j->describe(), j->versions()); + logger->cout("%s%s: ∅ -> %s", indent, j->describe(), j->versions()); changes = true; ++j; } else if (i != prevElems.end() && (j == curElems.end() || i->describe() < j->describe())) { - std::cout << fmt("%s%s: %s -> ∅\n", indent, i->describe(), i->versions()); + logger->cout("%s%s: %s -> ∅", indent, i->describe(), i->versions()); changes = true; ++i; } @@ -241,7 +251,7 @@ struct ProfileManifest auto v1 = i->versions(); auto v2 = j->versions(); if (v1 != v2) { - std::cout << fmt("%s%s: %s -> %s\n", indent, i->describe(), v1, v2); + logger->cout("%s%s: %s -> %s", indent, i->describe(), v1, v2); changes = true; } ++i; @@ -250,17 +260,23 @@ struct ProfileManifest } if (!changes) - std::cout << fmt("%sNo changes.\n", indent); + logger->cout("%sNo changes.", indent); } }; -static std::map> +static std::map>> builtPathsPerInstallable( - const std::vector, BuiltPathWithResult>> & builtPaths) + const std::vector, BuiltPathWithResult>> & builtPaths) { - std::map> res; + std::map>> res; for (auto & [installable, builtPath] : builtPaths) { - auto & r = res[installable.get()]; + auto & r = res.insert({ + &*installable, + { + {}, + make_ref(), + } + }).first->second; /* Note that there could be conflicting info (e.g. meta.priority fields) if the installable returned multiple derivations. So pick one arbitrarily. FIXME: @@ -296,7 +312,7 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile ; } - void run(ref store) override + void run(ref store, Installables && installables) override { ProfileManifest manifest(*getEvalState(), *profile); @@ -307,14 +323,16 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile for (auto & installable : installables) { ProfileElement element; - auto & [res, info] = builtPaths[installable.get()]; + auto iter = builtPaths.find(&*installable); + if (iter == builtPaths.end()) continue; + auto & [res, info] = iter->second; - if (info.originalRef && info.resolvedRef && info.attrPath && info.extendedOutputsSpec) { + if (auto * info2 = dynamic_cast(&*info)) { element.source = ProfileElementSource { - .originalRef = *info.originalRef, - .resolvedRef = *info.resolvedRef, - .attrPath = *info.attrPath, - .outputs = *info.extendedOutputsSpec, + .originalRef = info2->flake.originalRef, + .resolvedRef = info2->flake.resolvedRef, + .attrPath = info2->value.attrPath, + .outputs = info2->value.extendedOutputsSpec, }; } @@ -323,14 +341,75 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile element.priority = priority ? *priority - : info.priority.value_or(defaultPriority); + : ({ + auto * info2 = dynamic_cast(&*info); + info2 + ? info2->value.priority.value_or(defaultPriority) + : defaultPriority; + }); element.updateStorePaths(getEvalStore(), store, res); manifest.elements.push_back(std::move(element)); } - updateProfile(manifest.build(store)); + try { + updateProfile(manifest.build(store)); + } catch (BuildEnvFileConflictError & conflictError) { + // FIXME use C++20 std::ranges once macOS has it + // See https://github.com/NixOS/nix/compare/3efa476c5439f8f6c1968a6ba20a31d1239c2f04..1fe5d172ece51a619e879c4b86f603d9495cc102 + auto findRefByFilePath = [&](Iterator begin, Iterator end) { + for (auto it = begin; it != end; it++) { + auto profileElement = *it; + for (auto & storePath : profileElement.storePaths) { + if (conflictError.fileA.starts_with(store->printStorePath(storePath))) { + return std::pair(conflictError.fileA, profileElement.source->originalRef); + } + if (conflictError.fileB.starts_with(store->printStorePath(storePath))) { + return std::pair(conflictError.fileB, profileElement.source->originalRef); + } + } + } + throw conflictError; + }; + // There are 2 conflicting files. We need to find out which one is from the already installed package and + // which one is the package that is the new package that is being installed. + // The first matching package is the one that was already installed (original). + auto [originalConflictingFilePath, originalConflictingRef] = findRefByFilePath(manifest.elements.begin(), manifest.elements.end()); + // The last matching package is the one that was going to be installed (new). + auto [newConflictingFilePath, newConflictingRef] = findRefByFilePath(manifest.elements.rbegin(), manifest.elements.rend()); + + throw Error( + "An existing package already provides the following file:\n" + "\n" + " %1%\n" + "\n" + "This is the conflicting file from the new package:\n" + "\n" + " %2%\n" + "\n" + "To remove the existing package:\n" + "\n" + " nix profile remove %3%\n" + "\n" + "The new package can also be installed next to the existing one by assigning a different priority.\n" + "The conflicting packages have a priority of %5%.\n" + "To prioritise the new package:\n" + "\n" + " nix profile install %4% --priority %6%\n" + "\n" + "To prioritise the existing package:\n" + "\n" + " nix profile install %4% --priority %7%\n", + originalConflictingFilePath, + newConflictingFilePath, + originalConflictingRef.to_string(), + newConflictingRef.to_string(), + conflictError.priority, + conflictError.priority - 1, + conflictError.priority + 1 + ); + } } }; @@ -457,7 +536,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf auto matchers = getMatchers(store); - std::vector> installables; + Installables installables; std::vector indices; auto upgradedCount = 0; @@ -473,7 +552,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf Activity act(*logger, lvlChatty, actUnknown, fmt("checking '%s' for updates", element.source->attrPath)); - auto installable = std::make_shared( + auto installable = make_ref( this, getEvalState(), FlakeRef(element.source->originalRef), @@ -485,19 +564,20 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf auto derivedPaths = installable->toDerivedPaths(); if (derivedPaths.empty()) continue; - auto & info = derivedPaths[0].info; + auto * infop = dynamic_cast(&*derivedPaths[0].info); + // `InstallableFlake` should use `ExtraPathInfoFlake`. + assert(infop); + auto & info = *infop; - assert(info.resolvedRef && info.attrPath); - - if (element.source->resolvedRef == info.resolvedRef) continue; + if (element.source->resolvedRef == info.flake.resolvedRef) continue; printInfo("upgrading '%s' from flake '%s' to '%s'", - element.source->attrPath, element.source->resolvedRef, *info.resolvedRef); + element.source->attrPath, element.source->resolvedRef, info.flake.resolvedRef); element.source = ProfileElementSource { .originalRef = installable->flakeRef, - .resolvedRef = *info.resolvedRef, - .attrPath = *info.attrPath, + .resolvedRef = info.flake.resolvedRef, + .attrPath = info.value.attrPath, .outputs = installable->extendedOutputsSpec, }; @@ -526,7 +606,10 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf for (size_t i = 0; i < installables.size(); ++i) { auto & installable = installables.at(i); auto & element = manifest.elements[indices.at(i)]; - element.updateStorePaths(getEvalStore(), store, builtPaths[installable.get()].first); + element.updateStorePaths( + getEvalStore(), + store, + builtPaths.find(&*installable)->second.first); } updateProfile(manifest.build(store)); @@ -587,9 +670,9 @@ struct CmdProfileDiffClosures : virtual StoreCommand, MixDefaultProfile for (auto & gen : gens) { if (prevGen) { - if (!first) std::cout << "\n"; + if (!first) logger->cout(""); first = false; - std::cout << fmt("Version %d -> %d:\n", prevGen->number, gen.number); + logger->cout("Version %d -> %d:", prevGen->number, gen.number); printClosureDiff(store, store->followLinksToStorePath(prevGen->path), store->followLinksToStorePath(gen.path), @@ -625,10 +708,10 @@ struct CmdProfileHistory : virtual StoreCommand, EvalCommand, MixDefaultProfile for (auto & gen : gens) { ProfileManifest manifest(*getEvalState(), gen.path); - if (!first) std::cout << "\n"; + if (!first) logger->cout(""); first = false; - std::cout << fmt("Version %s%d" ANSI_NORMAL " (%s)%s:\n", + logger->cout("Version %s%d" ANSI_NORMAL " (%s)%s:", gen.number == curGen ? ANSI_GREEN : ANSI_BOLD, gen.number, std::put_time(std::gmtime(&gen.creationTime), "%Y-%m-%d"), @@ -745,7 +828,6 @@ struct CmdProfile : NixMultiCommand { if (!command) throw UsageError("'nix profile' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/profile.md b/src/nix/profile.md index 273e02280..bf61ef4b9 100644 --- a/src/nix/profile.md +++ b/src/nix/profile.md @@ -12,7 +12,7 @@ them to be rolled back easily. The default profile used by `nix profile` is `$HOME/.nix-profile`, which, if it does not exist, is created as a symlink to `/nix/var/nix/profiles/default` if Nix is invoked by the -`root` user, or `/nix/var/nix/profiles/per-user/`*username* otherwise. +`root` user, or `${XDG_STATE_HOME-$HOME/.local/state}/nix/profiles/profile` otherwise. You can specify another profile location using `--profile` *path*. @@ -24,11 +24,11 @@ the profile. In turn, *path*`-`*N* is a symlink to a path in the Nix store. For example: ```console -$ ls -l /nix/var/nix/profiles/per-user/alice/profile* -lrwxrwxrwx 1 alice users 14 Nov 25 14:35 /nix/var/nix/profiles/per-user/alice/profile -> profile-7-link -lrwxrwxrwx 1 alice users 51 Oct 28 16:18 /nix/var/nix/profiles/per-user/alice/profile-5-link -> /nix/store/q69xad13ghpf7ir87h0b2gd28lafjj1j-profile -lrwxrwxrwx 1 alice users 51 Oct 29 13:20 /nix/var/nix/profiles/per-user/alice/profile-6-link -> /nix/store/6bvhpysd7vwz7k3b0pndn7ifi5xr32dg-profile -lrwxrwxrwx 1 alice users 51 Nov 25 14:35 /nix/var/nix/profiles/per-user/alice/profile-7-link -> /nix/store/mp0x6xnsg0b8qhswy6riqvimai4gm677-profile +$ ls -l ~alice/.local/state/nix/profiles/profile* +lrwxrwxrwx 1 alice users 14 Nov 25 14:35 /home/alice/.local/state/nix/profiles/profile -> profile-7-link +lrwxrwxrwx 1 alice users 51 Oct 28 16:18 /home/alice/.local/state/nix/profiles/profile-5-link -> /nix/store/q69xad13ghpf7ir87h0b2gd28lafjj1j-profile +lrwxrwxrwx 1 alice users 51 Oct 29 13:20 /home/alice/.local/state/nix/profiles/profile-6-link -> /nix/store/6bvhpysd7vwz7k3b0pndn7ifi5xr32dg-profile +lrwxrwxrwx 1 alice users 51 Nov 25 14:35 /home/alice/.local/state/nix/profiles/profile-7-link -> /nix/store/mp0x6xnsg0b8qhswy6riqvimai4gm677-profile ``` Each of these symlinks is a root for the Nix garbage collector. @@ -38,20 +38,20 @@ profile is a tree of symlinks to the files of the installed packages, e.g. ```console -$ ll -R /nix/var/nix/profiles/per-user/eelco/profile-7-link/ -/nix/var/nix/profiles/per-user/eelco/profile-7-link/: +$ ll -R ~eelco/.local/state/nix/profiles/profile-7-link/ +/home/eelco/.local/state/nix/profiles/profile-7-link/: total 20 dr-xr-xr-x 2 root root 4096 Jan 1 1970 bin -r--r--r-- 2 root root 1402 Jan 1 1970 manifest.json dr-xr-xr-x 4 root root 4096 Jan 1 1970 share -/nix/var/nix/profiles/per-user/eelco/profile-7-link/bin: +/home/eelco/.local/state/nix/profiles/profile-7-link/bin: total 20 lrwxrwxrwx 5 root root 79 Jan 1 1970 chromium -> /nix/store/ijm5k0zqisvkdwjkc77mb9qzb35xfi4m-chromium-86.0.4240.111/bin/chromium lrwxrwxrwx 7 root root 87 Jan 1 1970 spotify -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/bin/spotify lrwxrwxrwx 3 root root 79 Jan 1 1970 zoom-us -> /nix/store/wbhg2ga8f3h87s9h5k0slxk0m81m4cxl-zoom-us-5.3.469451.0927/bin/zoom-us -/nix/var/nix/profiles/per-user/eelco/profile-7-link/share/applications: +/home/eelco/.local/state/nix/profiles/profile-7-link/share/applications: total 12 lrwxrwxrwx 4 root root 120 Jan 1 1970 chromium-browser.desktop -> /nix/store/4cf803y4vzfm3gyk3vzhzb2327v0kl8a-chromium-unwrapped-86.0.4240.111/share/applications/chromium-browser.desktop lrwxrwxrwx 7 root root 110 Jan 1 1970 spotify.desktop -> /nix/store/w9182874m1bl56smps3m5zjj36jhp3rn-spotify-1.1.26.501.gbe11e53b-15/share/applications/spotify.desktop diff --git a/src/nix/realisation.cc b/src/nix/realisation.cc index c9a7157cd..e19e93219 100644 --- a/src/nix/realisation.cc +++ b/src/nix/realisation.cc @@ -21,7 +21,6 @@ struct CmdRealisation : virtual NixMultiCommand { if (!command) throw UsageError("'nix realisation' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; @@ -46,7 +45,7 @@ struct CmdRealisationInfo : BuiltPathsCommand, MixJSON void run(ref store, BuiltPaths && paths) override { - settings.requireExperimentalFeature(Xp::CaDerivations); + experimentalFeatureSettings.require(Xp::CaDerivations); RealisedPath::Set realisations; for (auto & builtPath : paths) { @@ -65,18 +64,16 @@ struct CmdRealisationInfo : BuiltPathsCommand, MixJSON res.push_back(currentPath); } - std::cout << res.dump(); + logger->cout("%s", res); } else { for (auto & path : realisations) { if (auto realisation = std::get_if(&path.raw)) { - std::cout << - realisation->id.to_string() << " " << - store->printStorePath(realisation->outPath); + logger->cout("%s %s", + realisation->id.to_string(), + store->printStorePath(realisation->outPath)); } else - std::cout << store->printStorePath(path.path()); - - std::cout << std::endl; + logger->cout("%s", store->printStorePath(path.path())); } } } diff --git a/src/nix/registry.cc b/src/nix/registry.cc index b5bdfba95..cb94bbd31 100644 --- a/src/nix/registry.cc +++ b/src/nix/registry.cc @@ -224,10 +224,9 @@ struct CmdRegistry : virtual NixMultiCommand void run() override { - settings.requireExperimentalFeature(Xp::Flakes); + experimentalFeatureSettings.require(Xp::Flakes); if (!command) throw UsageError("'nix registry' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/repl.cc b/src/nix/repl.cc index 679bdea77..bb14f3f99 100644 --- a/src/nix/repl.cc +++ b/src/nix/repl.cc @@ -1,28 +1,23 @@ #include "eval.hh" #include "globals.hh" #include "command.hh" +#include "installable-value.hh" #include "repl.hh" namespace nix { -struct CmdRepl : InstallablesCommand +struct CmdRepl : RawInstallablesCommand { CmdRepl() { evalSettings.pureEval = false; } - void prepare() override + /** + * This command is stable before the others + */ + std::optional experimentalFeature() override { - if (!settings.isExperimentalFeatureEnabled(Xp::ReplFlake) && !(file) && this->_installables.size() >= 1) { - warn("future versions of Nix will require using `--file` to load a file"); - if (this->_installables.size() > 1) - warn("more than one input file is not currently supported"); - auto filePath = this->_installables[0].data(); - file = std::optional(filePath); - _installables.front() = _installables.back(); - _installables.pop_back(); - } - installables = InstallablesCommand::load(); + return std::nullopt; } std::vector files; @@ -32,11 +27,6 @@ struct CmdRepl : InstallablesCommand return {""}; } - bool useDefaultInstallables() override - { - return file.has_value() or expr.has_value(); - } - bool forceImpureByDefault() override { return true; @@ -54,17 +44,34 @@ struct CmdRepl : InstallablesCommand ; } - void run(ref store) override + void applyDefaultInstallables(std::vector & rawInstallables) override + { + if (!experimentalFeatureSettings.isEnabled(Xp::ReplFlake) && !(file) && rawInstallables.size() >= 1) { + warn("future versions of Nix will require using `--file` to load a file"); + if (rawInstallables.size() > 1) + warn("more than one input file is not currently supported"); + auto filePath = rawInstallables[0].data(); + file = std::optional(filePath); + rawInstallables.front() = rawInstallables.back(); + rawInstallables.pop_back(); + } + if (rawInstallables.empty() && (file.has_value() || expr.has_value())) { + rawInstallables.push_back("."); + } + } + + void run(ref store, std::vector && rawInstallables) override { auto state = getEvalState(); auto getValues = [&]()->AbstractNixRepl::AnnotatedValues{ - auto installables = load(); + auto installables = parseInstallables(store, rawInstallables); AbstractNixRepl::AnnotatedValues values; - for (auto & installable: installables){ - auto what = installable->what(); + for (auto & installable_: installables){ + auto & installable = InstallableValue::require(*installable_); + auto what = installable.what(); if (file){ - auto [val, pos] = installable->toValue(*state); - auto what = installable->what(); + auto [val, pos] = installable.toValue(*state); + auto what = installable.what(); state->forceValue(*val, pos); auto autoArgs = getAutoArgs(*state); auto valPost = state->allocValue(); @@ -72,7 +79,7 @@ struct CmdRepl : InstallablesCommand state->forceValue(*valPost, pos); values.push_back( {valPost, what }); } else { - auto [val, pos] = installable->toValue(*state); + auto [val, pos] = installable.toValue(*state); values.push_back( {val, what} ); } } diff --git a/src/nix/run.cc b/src/nix/run.cc index de11a8de6..8d0df0528 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -1,5 +1,5 @@ #include "run.hh" -#include "command.hh" +#include "command-installable-value.hh" #include "common-args.hh" #include "shared.hh" #include "store-api.hh" @@ -97,7 +97,7 @@ struct CmdShell : InstallablesCommand, MixEnvironment ; } - void run(ref store) override + void run(ref store, Installables && installables) override { auto outPaths = Installable::toStorePaths(getEvalStore(), store, Realise::Outputs, OperateOn::Output, installables); @@ -179,7 +179,7 @@ struct CmdShell : InstallablesCommand, MixEnvironment static auto rCmdShell = registerCommand("shell"); static auto rCmdNixinate = registerCommand("inate"); -struct CmdRun : InstallableCommand +struct CmdRun : InstallableValueCommand { using InstallableCommand::run; @@ -225,7 +225,7 @@ struct CmdRun : InstallableCommand return res; } - void run(ref store) override + void run(ref store, ref installable) override { auto state = getEvalState(); diff --git a/src/nix/run.hh b/src/nix/run.hh index fed360158..97ddef19b 100644 --- a/src/nix/run.hh +++ b/src/nix/run.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "store-api.hh" diff --git a/src/nix/run.md b/src/nix/run.md index a0f362076..250ea65aa 100644 --- a/src/nix/run.md +++ b/src/nix/run.md @@ -35,7 +35,7 @@ R""( # Description -`nix run` builds and runs *installable*, which must evaluate to an +`nix run` builds and runs [*installable*](./nix.md#installables), which must evaluate to an *app* or a regular Nix derivation. If *installable* evaluates to an *app* (see below), it executes the diff --git a/src/nix/search.cc b/src/nix/search.cc index 40d5d6167..bf68e6657 100644 --- a/src/nix/search.cc +++ b/src/nix/search.cc @@ -1,4 +1,4 @@ -#include "command.hh" +#include "command-installable-value.hh" #include "globals.hh" #include "eval.hh" #include "eval-inline.hh" @@ -85,7 +85,7 @@ struct CmdSearch : SourceExprCommand, MixJSON settings.readOnlyMode = true; evalSettings.enableImportFromDerivation.setDefault(false); - auto installable = parseInstallable(store, (file || expr) ? "" : _installable); + auto installable = InstallableValue::require(parseInstallable(store, (file || expr) ? "" : _installable)); // Empty search string should match all packages // Use "^" here instead of ".*" due to differences in resulting highlighting @@ -217,9 +217,8 @@ struct CmdSearch : SourceExprCommand, MixJSON for (auto & cursor : installable->getCursors(*state)) visit(*cursor, cursor->getAttrPath(), true); - if (json) { - std::cout << jsonOut->dump() << std::endl; - } + if (json) + logger->cout("%s", *jsonOut); if (!json && !results) throw Error("no results for the given search term(s)!"); diff --git a/src/nix/search.md b/src/nix/search.md index 5a5b5ae05..4caa90654 100644 --- a/src/nix/search.md +++ b/src/nix/search.md @@ -62,10 +62,10 @@ R""( # Description -`nix search` searches *installable* (which must be evaluatable, e.g. a -flake) for packages whose name or description matches all of the +`nix search` searches [*installable*](./nix.md#installables) (which can be evaluated, that is, a +flake or Nix expression, but not a store path or store derivation path) for packages whose name or description matches all of the regular expressions *regex*. For each matching package, It prints the -full attribute name (from the root of the installable), the version +full attribute name (from the root of the [installable](./nix.md#installables)), the version and the `meta.description` field, highlighting the substrings that were matched by the regular expressions. If no regular expressions are specified, all packages are shown. diff --git a/src/nix/shell.md b/src/nix/shell.md index 9fa1031f5..13a389103 100644 --- a/src/nix/shell.md +++ b/src/nix/shell.md @@ -48,7 +48,7 @@ R""( # Description `nix shell` runs a command in an environment in which the `$PATH` variable -provides the specified *installables*. If no command is specified, it starts the +provides the specified [*installables*](./nix.md#installable). If no command is specified, it starts the default shell of your user account specified by `$SHELL`. )"" diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 3d659d6d2..45cd2e1a6 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -45,7 +45,7 @@ struct CmdCopySigs : StorePathsCommand //logger->setExpected(doneLabel, storePaths.size()); auto doPath = [&](const Path & storePathS) { - //Activity act(*logger, lvlInfo, format("getting signatures for '%s'") % storePath); + //Activity act(*logger, lvlInfo, "getting signatures for '%s'", storePath); checkInterrupt(); @@ -173,7 +173,7 @@ struct CmdKeyGenerateSecret : Command if (!keyName) throw UsageError("required argument '--key-name' is missing"); - std::cout << SecretKey::generate(*keyName).to_string(); + writeFull(STDOUT_FILENO, SecretKey::generate(*keyName).to_string()); } }; @@ -194,7 +194,7 @@ struct CmdKeyConvertSecretToPublic : Command void run() override { SecretKey secretKey(drainFD(STDIN_FILENO)); - std::cout << secretKey.toPublicKey().to_string(); + writeFull(STDOUT_FILENO, secretKey.toPublicKey().to_string()); } }; @@ -219,7 +219,6 @@ struct CmdKey : NixMultiCommand { if (!command) throw UsageError("'nix key' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/store-copy-log.cc b/src/nix/store-copy-log.cc index d5fab5f2f..a6e8aeff7 100644 --- a/src/nix/store-copy-log.cc +++ b/src/nix/store-copy-log.cc @@ -24,9 +24,7 @@ struct CmdCopyLog : virtual CopyCommand, virtual InstallablesCommand ; } - Category category() override { return catUtility; } - - void run(ref srcStore) override + void run(ref srcStore, Installables && installables) override { auto & srcLogStore = require(*srcStore); diff --git a/src/nix/store-delete.cc b/src/nix/store-delete.cc index ca43f1530..6719227df 100644 --- a/src/nix/store-delete.cc +++ b/src/nix/store-delete.cc @@ -32,7 +32,7 @@ struct CmdStoreDelete : StorePathsCommand ; } - void run(ref store, std::vector && storePaths) override + void run(ref store, StorePaths && storePaths) override { auto & gcStore = require(*store); diff --git a/src/nix/store-delete.md b/src/nix/store-delete.md index db535f87c..431bc5f5e 100644 --- a/src/nix/store-delete.md +++ b/src/nix/store-delete.md @@ -10,7 +10,7 @@ R""( # Description -This command deletes the store paths specified by *installables*. , +This command deletes the store paths specified by [*installables*](./nix.md#installables), but only if it is safe to do so; that is, when the path is not reachable from a root of the garbage collector. This means that you can only delete paths that would also be deleted by `nix store diff --git a/src/nix/store-dump-path.md b/src/nix/store-dump-path.md index 4ef563526..56e2174b6 100644 --- a/src/nix/store-dump-path.md +++ b/src/nix/store-dump-path.md @@ -18,6 +18,6 @@ R""( # Description This command generates a NAR file containing the serialisation of the -store path *installable*. The NAR is written to standard output. +store path [*installable*](./nix.md#installables). The NAR is written to standard output. )"" diff --git a/src/nix/store-repair.cc b/src/nix/store-repair.cc index 8fcb3639a..895e39685 100644 --- a/src/nix/store-repair.cc +++ b/src/nix/store-repair.cc @@ -17,7 +17,7 @@ struct CmdStoreRepair : StorePathsCommand ; } - void run(ref store, std::vector && storePaths) override + void run(ref store, StorePaths && storePaths) override { for (auto & path : storePaths) store->repairPath(path); diff --git a/src/nix/store-repair.md b/src/nix/store-repair.md index 92d2205a9..180c577ac 100644 --- a/src/nix/store-repair.md +++ b/src/nix/store-repair.md @@ -17,7 +17,7 @@ R""( # Description This command attempts to "repair" the store paths specified by -*installables* by redownloading them using the available +[*installables*](./nix.md#installables) by redownloading them using the available substituters. If no substitutes are available, then repair is not possible. diff --git a/src/nix/store.cc b/src/nix/store.cc index 44e53c7c7..2879e03b3 100644 --- a/src/nix/store.cc +++ b/src/nix/store.cc @@ -18,7 +18,6 @@ struct CmdStore : virtual NixMultiCommand { if (!command) throw UsageError("'nix store' requires a sub-command."); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/system.cc b/src/nix/system.cc index 421cc9071..7bbaff8fa 100644 --- a/src/nix/system.cc +++ b/src/nix/system.cc @@ -58,9 +58,9 @@ struct SystemActivationCommand : ActivationCommand, MixProfile : ActivationCommand(activationType, selfCommandName) { } - void run(nix::ref store) override + void runActivatable(ref store, ref installable) override { - auto out = buildActivatable(store); + auto out = buildActivatable(store, installable); if (getuid() != 0) { Strings args { @@ -120,9 +120,9 @@ struct CmdSystemActivate : SystemCommand, MixDryRun #include "system-activate.md" ; } - void run(nix::ref store) override + void runActivatable(ref store, ref installable) override { - auto out = buildActivatable(store); + auto out = buildActivatable(store, installable); executePrivileged(store->printStorePath(out) + "/bin/switch-to-configuration", Strings{dryRun ? "dry-activate" : "test"}); } @@ -194,8 +194,6 @@ struct CmdSystem : NixMultiCommand { if (!command) throw UsageError("'nix system' requires a sub-command."); - settings.requireExperimentalFeature(Xp::NixCommand); - command->second->prepare(); command->second->run(); } }; diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index 17796d6b8..3997c98bf 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -32,6 +32,14 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand }); } + /** + * This command is stable before the others + */ + std::optional experimentalFeature() override + { + return std::nullopt; + } + std::string description() override { return "upgrade Nix to the stable version declared in Nixpkgs"; @@ -140,7 +148,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand auto state = std::make_unique(Strings(), store); auto v = state->allocValue(); - state->eval(state->parseExprFromString(res.data, "/no-such-path"), *v); + state->eval(state->parseExprFromString(res.data, state->rootPath(CanonPath("/no-such-path"))), *v); Bindings & bindings(*state->allocBindings(0)); auto v2 = findAlongAttrPath(*state, settings.thisSystem, bindings, *v).first; diff --git a/src/nix/upgrade-nix.md b/src/nix/upgrade-nix.md index 084c80ba2..08757aebd 100644 --- a/src/nix/upgrade-nix.md +++ b/src/nix/upgrade-nix.md @@ -11,7 +11,7 @@ R""( * Upgrade Nix in a specific profile: ```console - # nix upgrade-nix -p /nix/var/nix/profiles/per-user/alice/profile + # nix upgrade-nix -p ~alice/.local/state/nix/profiles/profile ``` # Description diff --git a/src/nix/verify.md b/src/nix/verify.md index 1c43792e7..cc1122c02 100644 --- a/src/nix/verify.md +++ b/src/nix/verify.md @@ -24,7 +24,7 @@ R""( # Description -This command verifies the integrity of the store paths *installables*, +This command verifies the integrity of the store paths [*installables*](./nix.md#installables), or, if `--all` is given, the entire Nix store. For each path, it checks that diff --git a/src/resolve-system-dependencies/resolve-system-dependencies.cc b/src/resolve-system-dependencies/resolve-system-dependencies.cc index c6023eb03..4ea268d24 100644 --- a/src/resolve-system-dependencies/resolve-system-dependencies.cc +++ b/src/resolve-system-dependencies/resolve-system-dependencies.cc @@ -157,13 +157,9 @@ int main(int argc, char ** argv) uname(&_uname); - auto cacheParentDir = (format("%1%/dependency-maps") % settings.nixStateDir).str(); + auto cacheParentDir = fmt("%1%/dependency-maps", settings.nixStateDir); - cacheDir = (format("%1%/%2%-%3%-%4%") - % cacheParentDir - % _uname.machine - % _uname.sysname - % _uname.release).str(); + cacheDir = fmt("%1%/%2%-%3%-%4%", cacheParentDir, _uname.machine, _uname.sysname, _uname.release); mkdir(cacheParentDir.c_str(), 0755); mkdir(cacheDir.c_str(), 0755); diff --git a/tests/binary-cache.sh b/tests/binary-cache.sh index 0361ac6a8..7c64a115c 100644 --- a/tests/binary-cache.sh +++ b/tests/binary-cache.sh @@ -15,15 +15,15 @@ outPath=$(nix-build dependencies.nix --no-out-link) nix copy --to file://$cacheDir $outPath # Test copying build logs to the binary cache. -nix log --store file://$cacheDir $outPath 2>&1 | grep 'is not available' +expect 1 nix log --store file://$cacheDir $outPath 2>&1 | grep 'is not available' nix store copy-log --to file://$cacheDir $outPath nix log --store file://$cacheDir $outPath | grep FOO rm -rf $TEST_ROOT/var/log/nix -nix log $outPath 2>&1 | grep 'is not available' +expect 1 nix log $outPath 2>&1 | grep 'is not available' nix log --substituters file://$cacheDir $outPath | grep FOO # Test copying build logs from the binary cache. -nix store copy-log --from file://$cacheDir $(nix-store -qd $outPath) +nix store copy-log --from file://$cacheDir $(nix-store -qd $outPath)^'*' nix log $outPath | grep FOO basicDownloadTests() { @@ -78,8 +78,8 @@ mv $nar $nar.good mkdir -p $TEST_ROOT/empty nix-store --dump $TEST_ROOT/empty | xz > $nar -nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log -grep -q "hash mismatch" $TEST_ROOT/log +expect 1 nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log +grepQuiet "hash mismatch" $TEST_ROOT/log mv $nar.good $nar @@ -126,9 +126,9 @@ clearStore rm -v $(grep -l "StorePath:.*dependencies-input-2" $cacheDir/*.narinfo) nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log -grep -q "copying path.*input-0" $TEST_ROOT/log -grep -q "copying path.*input-2" $TEST_ROOT/log -grep -q "copying path.*top" $TEST_ROOT/log +grepQuiet "copying path.*input-0" $TEST_ROOT/log +grepQuiet "copying path.*input-2" $TEST_ROOT/log +grepQuiet "copying path.*top" $TEST_ROOT/log # Idem, but without cached .narinfo. @@ -136,11 +136,11 @@ clearStore clearCacheCache nix-build --substituters "file://$cacheDir" --no-require-sigs dependencies.nix -o $TEST_ROOT/result 2>&1 | tee $TEST_ROOT/log -grep -q "don't know how to build" $TEST_ROOT/log -grep -q "building.*input-1" $TEST_ROOT/log -grep -q "building.*input-2" $TEST_ROOT/log -grep -q "copying path.*input-0" $TEST_ROOT/log -grep -q "copying path.*top" $TEST_ROOT/log +grepQuiet "don't know how to build" $TEST_ROOT/log +grepQuiet "building.*input-1" $TEST_ROOT/log +grepQuiet "building.*input-2" $TEST_ROOT/log +grepQuiet "copying path.*input-0" $TEST_ROOT/log +grepQuiet "copying path.*top" $TEST_ROOT/log # Create a signed binary cache. diff --git a/tests/build-delete.sh b/tests/build-delete.sh index 636681f64..9c56b00e8 100644 --- a/tests/build-delete.sh +++ b/tests/build-delete.sh @@ -2,8 +2,6 @@ source common.sh clearStore -set -o pipefail - # https://github.com/NixOS/nix/issues/6572 issue_6572_independent_outputs() { nix build -f multiple-outputs.nix --json independent --no-link > $TEST_ROOT/independent.json diff --git a/tests/build-dry.sh b/tests/build-dry.sh index 5f29239dc..6d1754af5 100644 --- a/tests/build-dry.sh +++ b/tests/build-dry.sh @@ -54,7 +54,7 @@ clearCache RES=$(nix build -f dependencies.nix --dry-run --json) -if [[ -z "$NIX_TESTS_CA_BY_DEFAULT" ]]; then +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT-}" ]]; then echo "$RES" | jq '.[0] | [ (.drvPath | test("'$NIX_STORE_DIR'.*\\.drv")), (.outputs.out | test("'$NIX_STORE_DIR'")) diff --git a/tests/build-remote-trustless-after.sh b/tests/build-remote-trustless-after.sh new file mode 100644 index 000000000..19f59e6ae --- /dev/null +++ b/tests/build-remote-trustless-after.sh @@ -0,0 +1,2 @@ +outPath=$(readlink -f $TEST_ROOT/result) +grep 'FOO BAR BAZ' ${remoteDir}/${outPath} diff --git a/tests/build-remote-trustless-should-fail-0.sh b/tests/build-remote-trustless-should-fail-0.sh new file mode 100644 index 000000000..5e3d5ae07 --- /dev/null +++ b/tests/build-remote-trustless-should-fail-0.sh @@ -0,0 +1,29 @@ +source common.sh + +enableFeatures "daemon-trust-override" + +restartDaemon + +[[ $busybox =~ busybox ]] || skipTest "no busybox" + +unset NIX_STORE_DIR +unset NIX_STATE_DIR + +# We first build a dependency of the derivation we eventually want to +# build. +nix-build build-hook.nix -A passthru.input2 \ + -o "$TEST_ROOT/input2" \ + --arg busybox "$busybox" \ + --store "$TEST_ROOT/local" \ + --option system-features bar + +# Now when we go to build that downstream derivation, Nix will fail +# because we cannot trustlessly build input-addressed derivations with +# `inputDrv` dependencies. + +file=build-hook.nix +prog=$(readlink -e ./nix-daemon-untrusting.sh) +proto=ssh-ng + +expectStderr 1 source build-remote-trustless.sh \ + | grepQuiet "you are not privileged to build input-addressed derivations" diff --git a/tests/build-remote-trustless-should-pass-0.sh b/tests/build-remote-trustless-should-pass-0.sh new file mode 100644 index 000000000..2a7ebd8c6 --- /dev/null +++ b/tests/build-remote-trustless-should-pass-0.sh @@ -0,0 +1,9 @@ +source common.sh + +# Remote trusts us +file=build-hook.nix +prog=nix-store +proto=ssh + +source build-remote-trustless.sh +source build-remote-trustless-after.sh diff --git a/tests/build-remote-trustless-should-pass-1.sh b/tests/build-remote-trustless-should-pass-1.sh new file mode 100644 index 000000000..516bdf092 --- /dev/null +++ b/tests/build-remote-trustless-should-pass-1.sh @@ -0,0 +1,9 @@ +source common.sh + +# Remote trusts us +file=build-hook.nix +prog=nix-daemon +proto=ssh-ng + +source build-remote-trustless.sh +source build-remote-trustless-after.sh diff --git a/tests/build-remote-trustless-should-pass-3.sh b/tests/build-remote-trustless-should-pass-3.sh new file mode 100644 index 000000000..40f81da5a --- /dev/null +++ b/tests/build-remote-trustless-should-pass-3.sh @@ -0,0 +1,14 @@ +source common.sh + +enableFeatures "daemon-trust-override" + +restartDaemon + +# Remote doesn't trusts us, but this is fine because we are only +# building (fixed) CA derivations. +file=build-hook-ca-fixed.nix +prog=$(readlink -e ./nix-daemon-untrusting.sh) +proto=ssh-ng + +source build-remote-trustless.sh +source build-remote-trustless-after.sh diff --git a/tests/build-remote-trustless.sh b/tests/build-remote-trustless.sh new file mode 100644 index 000000000..9df44e0c5 --- /dev/null +++ b/tests/build-remote-trustless.sh @@ -0,0 +1,14 @@ +requireSandboxSupport +[[ $busybox =~ busybox ]] || skipTest "no busybox" + +unset NIX_STORE_DIR +unset NIX_STATE_DIR + +remoteDir=$TEST_ROOT/remote + +# Note: ssh{-ng}://localhost bypasses ssh. See tests/build-remote.sh for +# more details. +nix-build $file -o $TEST_ROOT/result --max-jobs 0 \ + --arg busybox $busybox \ + --store $TEST_ROOT/local \ + --builders "$proto://localhost?remote-program=$prog&remote-store=${remoteDir}%3Fsystem-features=foo%20bar%20baz - - 1 1 foo,bar,baz" diff --git a/tests/build-remote.sh b/tests/build-remote.sh index 25a482003..78e12b477 100644 --- a/tests/build-remote.sh +++ b/tests/build-remote.sh @@ -1,5 +1,5 @@ -if ! canUseSandbox; then exit 99; fi -if ! [[ $busybox =~ busybox ]]; then exit 99; fi +requireSandboxSupport +[[ $busybox =~ busybox ]] || skipTest "no busybox" unset NIX_STORE_DIR unset NIX_STATE_DIR @@ -7,7 +7,7 @@ unset NIX_STATE_DIR function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; } EXTRA_SYSTEM_FEATURES=() -if [[ -n "$CONTENT_ADDRESSED" ]]; then +if [[ -n "${CONTENT_ADDRESSED-}" ]]; then EXTRA_SYSTEM_FEATURES=("ca-derivations") fi @@ -42,25 +42,26 @@ testPrintOutPath=$(nix build -L -v -f $file --no-link --print-out-paths --max-jo [[ $testPrintOutPath =~ store.*build-remote ]] -set -o pipefail - # Ensure that input1 was built on store1 due to the required feature. -nix path-info --store $TEST_ROOT/machine1 --all \ - | grep builder-build-remote-input-1.sh \ - | grep -v builder-build-remote-input-2.sh \ - | grep -v builder-build-remote-input-3.sh +output=$(nix path-info --store $TEST_ROOT/machine1 --all) +echo "$output" | grepQuiet builder-build-remote-input-1.sh +echo "$output" | grepQuietInverse builder-build-remote-input-2.sh +echo "$output" | grepQuietInverse builder-build-remote-input-3.sh +unset output # Ensure that input2 was built on store2 due to the required feature. -nix path-info --store $TEST_ROOT/machine2 --all \ - | grep -v builder-build-remote-input-1.sh \ - | grep builder-build-remote-input-2.sh \ - | grep -v builder-build-remote-input-3.sh +output=$(nix path-info --store $TEST_ROOT/machine2 --all) +echo "$output" | grepQuietInverse builder-build-remote-input-1.sh +echo "$output" | grepQuiet builder-build-remote-input-2.sh +echo "$output" | grepQuietInverse builder-build-remote-input-3.sh +unset output # Ensure that input3 was built on store3 due to the required feature. -nix path-info --store $TEST_ROOT/machine3 --all \ - | grep -v builder-build-remote-input-1.sh \ - | grep -v builder-build-remote-input-2.sh \ - | grep builder-build-remote-input-3.sh +output=$(nix path-info --store $TEST_ROOT/machine3 --all) +echo "$output" | grepQuietInverse builder-build-remote-input-1.sh +echo "$output" | grepQuietInverse builder-build-remote-input-2.sh +echo "$output" | grepQuiet builder-build-remote-input-3.sh +unset output for i in input1 input3; do diff --git a/tests/build.sh b/tests/build.sh index 2dfd43b65..b579fc374 100644 --- a/tests/build.sh +++ b/tests/build.sh @@ -2,8 +2,6 @@ source common.sh clearStore -set -o pipefail - # Make sure that 'nix build' returns all outputs by default. nix build -f multiple-outputs.nix --json a b --no-link | jq --exit-status ' (.[0] | diff --git a/tests/ca/build.sh b/tests/ca/build.sh index cc225c6c8..7754ad276 100644 --- a/tests/ca/build.sh +++ b/tests/ca/build.sh @@ -2,14 +2,14 @@ source common.sh -drv=$(nix-instantiate --experimental-features ca-derivations ./content-addressed.nix -A rootCA --arg seed 1) -nix --experimental-features 'nix-command ca-derivations' show-derivation "$drv" --arg seed 1 +drv=$(nix-instantiate ./content-addressed.nix -A rootCA --arg seed 1) +nix derivation show "$drv" --arg seed 1 buildAttr () { local derivationPath=$1 local seedValue=$2 shift; shift - local args=("--experimental-features" "ca-derivations" "./content-addressed.nix" "-A" "$derivationPath" --arg seed "$seedValue" "--no-out-link") + local args=("./content-addressed.nix" "-A" "$derivationPath" --arg seed "$seedValue" "--no-out-link") args+=("$@") nix-build "${args[@]}" } @@ -19,7 +19,7 @@ testRemoteCache () { local outPath=$(buildAttr dependentNonCA 1) nix copy --to file://$cacheDir $outPath clearStore - buildAttr dependentNonCA 1 --option substituters file://$cacheDir --no-require-sigs |& (! grep "building dependent-non-ca") + buildAttr dependentNonCA 1 --option substituters file://$cacheDir --no-require-sigs |& grepQuietInverse "building dependent-non-ca" } testDeterministicCA () { @@ -46,17 +46,17 @@ testCutoff () { } testGC () { - nix-instantiate --experimental-features ca-derivations ./content-addressed.nix -A rootCA --arg seed 5 - nix-collect-garbage --experimental-features ca-derivations --option keep-derivations true + nix-instantiate ./content-addressed.nix -A rootCA --arg seed 5 + nix-collect-garbage --option keep-derivations true clearStore buildAttr rootCA 1 --out-link $TEST_ROOT/rootCA - nix-collect-garbage --experimental-features ca-derivations + nix-collect-garbage buildAttr rootCA 1 -j0 } testNixCommand () { clearStore - nix build --experimental-features 'nix-command ca-derivations' --file ./content-addressed.nix --no-link + nix build --file ./content-addressed.nix --no-link } # Regression test for https://github.com/NixOS/nix/issues/4775 diff --git a/tests/ca/derivation-json.sh b/tests/ca/derivation-json.sh new file mode 100644 index 000000000..c1480fd17 --- /dev/null +++ b/tests/ca/derivation-json.sh @@ -0,0 +1,29 @@ +source common.sh + +export NIX_TESTS_CA_BY_DEFAULT=1 + +drvPath=$(nix-instantiate ../simple.nix) + +nix derivation show $drvPath | jq .[] > $TEST_HOME/simple.json + +drvPath2=$(nix derivation add < $TEST_HOME/simple.json) + +[[ "$drvPath" = "$drvPath2" ]] + +# Content-addressed derivations can be renamed. +jq '.name = "foo"' < $TEST_HOME/simple.json > $TEST_HOME/foo.json +drvPath3=$(nix derivation add --dry-run < $TEST_HOME/foo.json) +# With --dry-run nothing is actually written +[[ ! -e "$drvPath3" ]] + +# But the JSON is rejected without the experimental feature +expectStderr 1 nix derivation add < $TEST_HOME/foo.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'ca-derivations' is disabled" + +# Without --dry-run it is actually written +drvPath4=$(nix derivation add < $TEST_HOME/foo.json) +[[ "$drvPath4" = "$drvPath3" ]] +[[ -e "$drvPath3" ]] + +# The modified derivation read back as JSON matches +nix derivation show $drvPath3 | jq .[] > $TEST_HOME/foo-read.json +diff $TEST_HOME/foo.json $TEST_HOME/foo-read.json diff --git a/tests/ca/substitute.sh b/tests/ca/substitute.sh index 819f3fd85..ea981adc4 100644 --- a/tests/ca/substitute.sh +++ b/tests/ca/substitute.sh @@ -28,6 +28,12 @@ nix realisation info --file ./content-addressed.nix transitivelyDependentCA nix realisation info --file ./content-addressed.nix dependentCA # nix realisation info --file ./content-addressed.nix rootCA --outputs out +if isDaemonNewer "2.13"; then + pushToStore="../push-to-store.sh" +else + pushToStore="../push-to-store-old.sh" +fi + # Same thing, but # 1. With non-ca derivations # 2. Erasing the realisations on the remote store @@ -37,7 +43,7 @@ nix realisation info --file ./content-addressed.nix dependentCA # # Regression test for #4725 clearStore -nix build --file ../simple.nix -L --no-link --post-build-hook ../push-to-store.sh +nix build --file ../simple.nix -L --no-link --post-build-hook "$pushToStore" clearStore rm -r "$REMOTE_STORE_DIR/realisations" nix build --file ../simple.nix -L --no-link --substitute --substituters "$REMOTE_STORE" --no-require-sigs -j0 @@ -52,7 +58,7 @@ if [[ -z "$(ls "$REMOTE_STORE_DIR/realisations")" ]]; then fi # Test the local realisation disk cache -buildDrvs --post-build-hook ../push-to-store.sh +buildDrvs --post-build-hook "$pushToStore" clearStore # Add the realisations of rootCA to the cachecache clearCacheCache diff --git a/tests/check-refs.sh b/tests/check-refs.sh index 65a72552a..2778e491d 100644 --- a/tests/check-refs.sh +++ b/tests/check-refs.sh @@ -8,14 +8,14 @@ dep=$(nix-build -o $RESULT check-refs.nix -A dep) # test1 references dep, not itself. test1=$(nix-build -o $RESULT check-refs.nix -A test1) -(! nix-store -q --references $test1 | grep -q $test1) -nix-store -q --references $test1 | grep -q $dep +nix-store -q --references $test1 | grepQuietInverse $test1 +nix-store -q --references $test1 | grepQuiet $dep # test2 references src, not itself nor dep. test2=$(nix-build -o $RESULT check-refs.nix -A test2) -(! nix-store -q --references $test2 | grep -q $test2) -(! nix-store -q --references $test2 | grep -q $dep) -nix-store -q --references $test2 | grep -q aux-ref +nix-store -q --references $test2 | grepQuietInverse $test2 +nix-store -q --references $test2 | grepQuietInverse $dep +nix-store -q --references $test2 | grepQuiet aux-ref # test3 should fail (unallowed ref). (! nix-build -o $RESULT check-refs.nix -A test3) diff --git a/tests/check-reqs.sh b/tests/check-reqs.sh index e9f65fc2a..856c94cec 100644 --- a/tests/check-reqs.sh +++ b/tests/check-reqs.sh @@ -8,8 +8,8 @@ nix-build -o $RESULT check-reqs.nix -A test1 (! nix-build -o $RESULT check-reqs.nix -A test2) (! nix-build -o $RESULT check-reqs.nix -A test3) -(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grep -q 'check-reqs-dep1' -(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grep -q 'check-reqs-dep2' +(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grepQuiet 'check-reqs-dep1' +(! nix-build -o $RESULT check-reqs.nix -A test4) 2>&1 | grepQuiet 'check-reqs-dep2' (! nix-build -o $RESULT check-reqs.nix -A test5) (! nix-build -o $RESULT check-reqs.nix -A test6) diff --git a/tests/check.sh b/tests/check.sh index e77c0405d..645b90222 100644 --- a/tests/check.sh +++ b/tests/check.sh @@ -37,7 +37,7 @@ checkBuildTempDirRemoved $TEST_ROOT/log nix-build check.nix -A deterministic --argstr checkBuildId $checkBuildId \ --no-out-link --check --keep-failed 2> $TEST_ROOT/log -if grep -q 'may not be deterministic' $TEST_ROOT/log; then false; fi +if grepQuiet 'may not be deterministic' $TEST_ROOT/log; then false; fi checkBuildTempDirRemoved $TEST_ROOT/log nix-build check.nix -A nondeterministic --argstr checkBuildId $checkBuildId \ diff --git a/tests/common.sh b/tests/common.sh index 68b90a85f..8941671d6 100644 --- a/tests/common.sh +++ b/tests/common.sh @@ -1,4 +1,4 @@ -set -e +set -eu -o pipefail if [[ -z "${COMMON_SH_SOURCED-}" ]]; then diff --git a/tests/common/vars-and-functions.sh.in b/tests/common/vars-and-functions.sh.in index 0deef4c1c..a9e6c802f 100644 --- a/tests/common/vars-and-functions.sh.in +++ b/tests/common/vars-and-functions.sh.in @@ -1,4 +1,4 @@ -set -e +set -eu -o pipefail if [[ -z "${COMMON_VARS_AND_FUNCTIONS_SH_SOURCED-}" ]]; then @@ -152,31 +152,64 @@ isDaemonNewer () { [[ $(nix eval --expr "builtins.compareVersions ''$daemonVersion'' ''$requiredVersion''") -ge 0 ]] } +skipTest () { + echo "$1, skipping this test..." >&2 + exit 99 +} + requireDaemonNewerThan () { - isDaemonNewer "$1" || exit 99 + isDaemonNewer "$1" || skipTest "Daemon is too old" } canUseSandbox() { - if [[ ! $_canUseSandbox ]]; then - echo "Sandboxing not supported, skipping this test..." - return 1 - fi + [[ ${_canUseSandbox-} ]] +} - return 0 +requireSandboxSupport () { + canUseSandbox || skipTest "Sandboxing not supported" +} + +requireGit() { + [[ $(type -p git) ]] || skipTest "Git not installed" } fail() { - echo "$1" + echo "$1" >&2 exit 1 } +# Run a command failing if it didn't exit with the expected exit code. +# +# Has two advantages over the built-in `!`: +# +# 1. `!` conflates all non-0 codes. `expect` allows testing for an exact +# code. +# +# 2. `!` unexpectedly negates `set -e`, and cannot be used on individual +# pipeline stages with `set -o pipefail`. It only works on the entire +# pipeline, which is useless if we want, say, `nix ...` invocation to +# *fail*, but a grep on the error message it outputs to *succeed*. expect() { local expected res expected="$1" shift - "$@" || res="$?" + "$@" && res=0 || res="$?" if [[ $res -ne $expected ]]; then - echo "Expected '$expected' but got '$res' while running '$*'" + echo "Expected '$expected' but got '$res' while running '${*@Q}'" >&2 + return 1 + fi + return 0 +} + +# Better than just doing `expect ... >&2` because the "Expected..." +# message below will *not* be redirected. +expectStderr() { + local expected res + expected="$1" + shift + "$@" 2>&1 && res=0 || res="$?" + if [[ $res -ne $expected ]]; then + echo "Expected '$expected' but got '$res' while running '${*@Q}'" >&2 return 1 fi return 0 @@ -184,14 +217,13 @@ expect() { needLocalStore() { if [[ "$NIX_REMOTE" == "daemon" ]]; then - echo "Can’t run through the daemon ($1), skipping this test..." - return 99 + skipTest "Can’t run through the daemon ($1)" fi } # Just to make it easy to find which tests should be fixed buggyNeedLocalStore() { - needLocalStore + needLocalStore "$1" } enableFeatures() { @@ -210,6 +242,35 @@ onError() { done } +# `grep -v` doesn't work well for exit codes. We want `!(exist line l. l +# matches)`. It gives us `exist line l. !(l matches)`. +# +# `!` normally doesn't work well with `set -e`, but when we wrap in a +# function it *does*. +grepInverse() { + ! grep "$@" +} + +# A shorthand, `> /dev/null` is a bit noisy. +# +# `grep -q` would seem to do this, no function necessary, but it is a +# bad fit with pipes and `set -o pipefail`: `-q` will exit after the +# first match, and then subsequent writes will result in broken pipes. +# +# Note that reproducing the above is a bit tricky as it depends on +# non-deterministic properties such as the timing between the match and +# the closing of the pipe, the buffering of the pipe, and the speed of +# the producer into the pipe. But rest assured we've seen it happen in +# CI reliably. +grepQuiet() { + grep "$@" > /dev/null +} + +# The previous two, combined +grepQuietInverse() { + ! grep "$@" > /dev/null +} + trap onError ERR fi # COMMON_VARS_AND_FUNCTIONS_SH_SOURCED diff --git a/tests/compute-levels.sh b/tests/compute-levels.sh index e4322dfa1..de3da2ebd 100644 --- a/tests/compute-levels.sh +++ b/tests/compute-levels.sh @@ -3,5 +3,5 @@ source common.sh if [[ $(uname -ms) = "Linux x86_64" ]]; then # x86_64 CPUs must always support the baseline # microarchitecture level. - nix -vv --version | grep -q "x86_64-v1-linux" + nix -vv --version | grepQuiet "x86_64-v1-linux" fi diff --git a/tests/db-migration.sh b/tests/db-migration.sh index 92dd4f3ba..44cd16bc0 100644 --- a/tests/db-migration.sh +++ b/tests/db-migration.sh @@ -1,18 +1,18 @@ # Test that we can successfully migrate from an older db schema +source common.sh + # Only run this if we have an older Nix available # XXX: This assumes that the `daemon` package is older than the `client` one -if [[ -z "$NIX_DAEMON_PACKAGE" ]]; then - exit 99 +if [[ -z "${NIX_DAEMON_PACKAGE-}" ]]; then + skipTest "not using the Nix daemon" fi -source common.sh - killDaemon # Fill the db using the older Nix PATH_WITH_NEW_NIX="$PATH" -export PATH="$NIX_DAEMON_PACKAGE/bin:$PATH" +export PATH="${NIX_DAEMON_PACKAGE}/bin:$PATH" clearStore nix-build simple.nix --no-out-link nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1 diff --git a/tests/dependencies.sh b/tests/dependencies.sh index 092950aa7..f9da0c6bc 100644 --- a/tests/dependencies.sh +++ b/tests/dependencies.sh @@ -36,10 +36,10 @@ deps=$(nix-store -quR "$drvPath") echo "output closure contains $deps" # The output path should be in the closure. -echo "$deps" | grep -q "$outPath" +echo "$deps" | grepQuiet "$outPath" # Input-1 is not retained. -if echo "$deps" | grep -q "dependencies-input-1"; then exit 1; fi +if echo "$deps" | grepQuiet "dependencies-input-1"; then exit 1; fi # Input-2 is retained. input2OutPath=$(echo "$deps" | grep "dependencies-input-2") @@ -49,4 +49,4 @@ nix-store -q --referrers-closure "$input2OutPath" | grep "$outPath" # Check that the derivers are set properly. test $(nix-store -q --deriver "$outPath") = "$drvPath" -nix-store -q --deriver "$input2OutPath" | grep -q -- "-input-2.drv" +nix-store -q --deriver "$input2OutPath" | grepQuiet -- "-input-2.drv" diff --git a/tests/derivation-json.sh b/tests/derivation-json.sh new file mode 100644 index 000000000..b6be5d977 --- /dev/null +++ b/tests/derivation-json.sh @@ -0,0 +1,12 @@ +source common.sh + +drvPath=$(nix-instantiate simple.nix) + +nix derivation show $drvPath | jq .[] > $TEST_HOME/simple.json + +drvPath2=$(nix derivation add < $TEST_HOME/simple.json) + +[[ "$drvPath" = "$drvPath2" ]] + +# Input addressed derivations cannot be renamed. +jq '.name = "foo"' < $TEST_HOME/simple.json | expectStderr 1 nix derivation add | grepQuiet "has incorrect output" diff --git a/tests/describe-stores.sh b/tests/describe-stores.sh deleted file mode 100644 index 3fea61483..000000000 --- a/tests/describe-stores.sh +++ /dev/null @@ -1,8 +0,0 @@ -source common.sh - -# Query an arbitrary value in `nix describe-stores --json`'s output just to -# check that it has the right structure -[[ $(nix --experimental-features 'nix-command flakes' describe-stores --json | jq '.["SSH Store"]["compress"]["defaultValue"]') == false ]] - -# Ensure that the output of `nix describe-stores` isn't empty -[[ -n $(nix --experimental-features 'nix-command flakes' describe-stores) ]] diff --git a/tests/eval-store.sh b/tests/eval-store.sh index 679da5741..8fc859730 100644 --- a/tests/eval-store.sh +++ b/tests/eval-store.sh @@ -2,7 +2,7 @@ source common.sh # Using `--eval-store` with the daemon will eventually copy everything # to the build store, invalidating most of the tests here -needLocalStore +needLocalStore "“--eval-store” doesn't achieve much with the daemon" eval_store=$TEST_ROOT/eval-store diff --git a/tests/experimental-features.sh b/tests/experimental-features.sh new file mode 100644 index 000000000..607bf0a8e --- /dev/null +++ b/tests/experimental-features.sh @@ -0,0 +1,86 @@ +source common.sh + +# Skipping these two for now, because we actually *do* want flags and +# config settings to always show up in the manual, just be marked +# experimental. Will reenable once the manual generation takes advantage +# of the JSON metadata on this. +# +# # Without flakes, flake options should not show up +# # With flakes, flake options should show up +# +# function grep_both_ways { +# nix --experimental-features 'nix-command' "$@" | grepQuietInverse flake +# nix --experimental-features 'nix-command flakes' "$@" | grepQuiet flake +# +# # Also, the order should not matter +# nix "$@" --experimental-features 'nix-command' | grepQuietInverse flake +# nix "$@" --experimental-features 'nix-command flakes' | grepQuiet flake +# } +# +# # Simple case, the configuration effects the running command +# grep_both_ways show-config +# +# # Medium case, the configuration effects --help +# grep_both_ways store gc --help + +# Test settings that are gated on experimental features; the setting is ignored +# with a warning if the experimental feature is not enabled. The order of the +# `setting = value` lines in the configuration should not matter. + +# 'flakes' experimental-feature is disabled before, ignore and warn +NIX_CONFIG=' + experimental-features = nix-command + accept-flake-config = true +' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr +grepQuiet "false" $TEST_ROOT/stdout +grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" $TEST_ROOT/stderr + +# 'flakes' experimental-feature is disabled after, ignore and warn +NIX_CONFIG=' + accept-flake-config = true + experimental-features = nix-command +' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr +grepQuiet "false" $TEST_ROOT/stdout +grepQuiet "Ignoring setting 'accept-flake-config' because experimental feature 'flakes' is not enabled" $TEST_ROOT/stderr + +# 'flakes' experimental-feature is enabled before, process +NIX_CONFIG=' + experimental-features = nix-command flakes + accept-flake-config = true +' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr +grepQuiet "true" $TEST_ROOT/stdout +grepQuietInverse "Ignoring setting 'accept-flake-config'" $TEST_ROOT/stderr + +# 'flakes' experimental-feature is enabled after, process +NIX_CONFIG=' + accept-flake-config = true + experimental-features = nix-command flakes +' nix show-config accept-flake-config 1>$TEST_ROOT/stdout 2>$TEST_ROOT/stderr +grepQuiet "true" $TEST_ROOT/stdout +grepQuietInverse "Ignoring setting 'accept-flake-config'" $TEST_ROOT/stderr + +function exit_code_both_ways { + expect 1 nix --experimental-features 'nix-command' "$@" 1>/dev/null + nix --experimental-features 'nix-command flakes' "$@" 1>/dev/null + + # Also, the order should not matter + expect 1 nix "$@" --experimental-features 'nix-command' 1>/dev/null + nix "$@" --experimental-features 'nix-command flakes' 1>/dev/null +} + +exit_code_both_ways show-config --flake-registry 'https://no' + +# Double check these are stable +nix --experimental-features '' --help 1>/dev/null +nix --experimental-features '' doctor --help 1>/dev/null +nix --experimental-features '' repl --help 1>/dev/null +nix --experimental-features '' upgrade-nix --help 1>/dev/null + +# These 3 arguments are currently given to all commands, which is wrong (as not +# all care). To deal with fixing later, we simply make them require the +# nix-command experimental features --- it so happens that the commands we wish +# stabilizing to do not need them anyways. +for arg in '--print-build-logs' '--offline' '--refresh'; do + nix --experimental-features 'nix-command' "$arg" --help 1>/dev/null + expect 1 nix --experimental-features '' "$arg" --help 1>/dev/null +done diff --git a/tests/export-graph.sh b/tests/export-graph.sh index 4954a6cbc..1f6232a40 100644 --- a/tests/export-graph.sh +++ b/tests/export-graph.sh @@ -4,7 +4,7 @@ clearStore clearProfiles checkRef() { - nix-store -q --references $TEST_ROOT/result | grep -q "$1"'$' || fail "missing reference $1" + nix-store -q --references $TEST_ROOT/result | grepQuiet "$1"'$' || fail "missing reference $1" } # Test the export of the runtime dependency graph. diff --git a/tests/fetchClosure.sh b/tests/fetchClosure.sh index d88c55c3c..a207f647c 100644 --- a/tests/fetchClosure.sh +++ b/tests/fetchClosure.sh @@ -42,13 +42,13 @@ if [[ "$NIX_REMOTE" != "daemon" ]]; then fi # 'toPath' set to empty string should fail but print the expected path. -nix eval -v --json --expr " +expectStderr 1 nix eval -v --json --expr " builtins.fetchClosure { fromStore = \"file://$cacheDir\"; fromPath = $nonCaPath; toPath = \"\"; } -" 2>&1 | grep "error: rewriting.*$nonCaPath.*yielded.*$caPath" +" | grep "error: rewriting.*$nonCaPath.*yielded.*$caPath" # If fromPath is CA, then toPath isn't needed. nix copy --to file://$cacheDir $caPath diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh index da09c3f37..e2ccb0e97 100644 --- a/tests/fetchGit.sh +++ b/tests/fetchGit.sh @@ -1,9 +1,6 @@ source common.sh -if [[ -z $(type -p git) ]]; then - echo "Git not installed; skipping Git tests" - exit 99 -fi +requireGit clearStore @@ -237,3 +234,17 @@ rm -rf $repo/.git # should succeed for a repo without commits git init $repo path10=$(nix eval --impure --raw --expr "(builtins.fetchGit \"file://$repo\").outPath") + +# should succeed for a path with a space +# regression test for #7707 +repo="$TEST_ROOT/a b" +git init "$repo" +git -C "$repo" config user.email "foobar@example.com" +git -C "$repo" config user.name "Foobar" + +echo utrecht > "$repo/hello" +touch "$repo/.gitignore" +git -C "$repo" add hello .gitignore +git -C "$repo" commit -m 'Bla1' +cd "$repo" +path11=$(nix eval --impure --raw --expr "(builtins.fetchGit ./.).outPath") diff --git a/tests/fetchGitRefs.sh b/tests/fetchGitRefs.sh index 52926040b..d643fea04 100644 --- a/tests/fetchGitRefs.sh +++ b/tests/fetchGitRefs.sh @@ -1,9 +1,6 @@ source common.sh -if [[ -z $(type -p git) ]]; then - echo "Git not installed; skipping Git tests" - exit 99 -fi +requireGit clearStore @@ -56,7 +53,7 @@ invalid_ref() { else (! git check-ref-format --branch "$1" >/dev/null 2>&1) fi - nix --debug eval --raw --impure --expr "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath" 2>&1 | grep 'invalid Git branch/tag name' >/dev/null + expect 1 nix --debug eval --raw --impure --expr "(builtins.fetchGit { url = $repo; ref = ''$1''; }).outPath" 2>&1 | grep 'invalid Git branch/tag name' >/dev/null } diff --git a/tests/fetchGitSubmodules.sh b/tests/fetchGitSubmodules.sh index 08ccaa3cd..df81232e5 100644 --- a/tests/fetchGitSubmodules.sh +++ b/tests/fetchGitSubmodules.sh @@ -2,10 +2,7 @@ source common.sh set -u -if [[ -z $(type -p git) ]]; then - echo "Git not installed; skipping Git submodule tests" - exit 99 -fi +requireGit clearStore diff --git a/tests/fetchMercurial.sh b/tests/fetchMercurial.sh index 5c64ffd26..e6f8525c6 100644 --- a/tests/fetchMercurial.sh +++ b/tests/fetchMercurial.sh @@ -1,9 +1,6 @@ source common.sh -if [[ -z $(type -p hg) ]]; then - echo "Mercurial not installed; skipping Mercurial tests" - exit 99 -fi +[[ $(type -p hq) ]] || skipTest "Mercurial not installed" clearStore diff --git a/tests/fetchTree-file.sh b/tests/fetchTree-file.sh index f0c530466..fe569cfb8 100644 --- a/tests/fetchTree-file.sh +++ b/tests/fetchTree-file.sh @@ -79,7 +79,7 @@ EOF EOF popd - [[ -z "${NIX_DAEMON_PACKAGE}" ]] && return 0 + [[ -z "${NIX_DAEMON_PACKAGE-}" ]] && return 0 # Ensure that a lockfile generated by the current Nix for tarball inputs # can still be read by an older Nix @@ -91,7 +91,7 @@ EOF flake = false; }; outputs = { self, tarball }: { - foo = builtins.readFile "${tarball}/test_input_file"; + foo = builtins.readFile "\${tarball}/test_input_file"; }; } nix flake update diff --git a/tests/fetchurl.sh b/tests/fetchurl.sh index b41d8c4b7..8cd40c09f 100644 --- a/tests/fetchurl.sh +++ b/tests/fetchurl.sh @@ -62,7 +62,7 @@ hash=$(nix-hash --flat --type sha256 $nar) outPath=$(nix-build -vvvvv --expr 'import ' --argstr url file://$nar --argstr sha256 $hash \ --arg unpack true --argstr name xyzzy --no-out-link) -echo $outPath | grep -q 'xyzzy' +echo $outPath | grepQuiet 'xyzzy' test -x $outPath/fetchurl.sh test -L $outPath/symlink diff --git a/tests/flakes/build-paths.sh b/tests/flakes/build-paths.sh index 08b4d1763..b399a066e 100644 --- a/tests/flakes/build-paths.sh +++ b/tests/flakes/build-paths.sh @@ -35,7 +35,7 @@ cat > $flake1Dir/flake.nix < $flakeDir/flake.nix <&1 && fail "nix flake check should have failed" || true) -echo "$checkRes" | grep -q "packages.system-1.default" -echo "$checkRes" | grep -q "packages.system-2.default" +echo "$checkRes" | grepQuiet "packages.system-1.default" +echo "$checkRes" | grepQuiet "packages.system-2.default" diff --git a/tests/flakes/common.sh b/tests/flakes/common.sh index 9d79080cd..427abcdde 100644 --- a/tests/flakes/common.sh +++ b/tests/flakes/common.sh @@ -2,13 +2,6 @@ source ../common.sh registry=$TEST_ROOT/registry.json -requireGit() { - if [[ -z $(type -p git) ]]; then - echo "Git not installed; skipping flake tests" - exit 99 - fi -} - writeSimpleFlake() { local flakeDir="$1" cat > $flakeDir/flake.nix < $subRepo/flake.nix < $subRepo/sub.nix +git -C $subRepo add flake.nix sub.nix +git -C $subRepo commit -m Initial + +createGitRepo $rootRepo + +git -C $rootRepo submodule init +git -C $rootRepo submodule add $subRepo submodule +echo '"expression in root repo"' > $rootRepo/root.nix +git -C $rootRepo add root.nix +git -C $rootRepo commit -m "Add root.nix" + +# Flake can live inside a submodule and can be accessed via ?dir=submodule +[[ $(nix eval --json git+file://$rootRepo\?submodules=1\&dir=submodule#sub ) = '"expression in submodule"' ]] +# The flake can access content outside of the submodule +[[ $(nix eval --json git+file://$rootRepo\?submodules=1\&dir=submodule#root ) = '"expression in root repo"' ]] diff --git a/tests/flakes/flakes.sh b/tests/flakes/flakes.sh index ff9a9f820..086f502dd 100644 --- a/tests/flakes/flakes.sh +++ b/tests/flakes/flakes.sh @@ -76,17 +76,17 @@ nix registry add --registry $registry nixpkgs flake1 # Test 'nix registry list'. [[ $(nix registry list | wc -l) == 5 ]] -nix registry list | grep -q '^global' -nix registry list | grep -q -v '^user' # nothing in user registry +nix registry list | grep '^global' +nix registry list | grepInverse '^user' # nothing in user registry # Test 'nix flake metadata'. nix flake metadata flake1 -nix flake metadata flake1 | grep -q 'Locked URL:.*flake1.*' +nix flake metadata flake1 | grepQuiet 'Locked URL:.*flake1.*' # Test 'nix flake metadata' on a local flake. -(cd $flake1Dir && nix flake metadata) | grep -q 'URL:.*flake1.*' -(cd $flake1Dir && nix flake metadata .) | grep -q 'URL:.*flake1.*' -nix flake metadata $flake1Dir | grep -q 'URL:.*flake1.*' +(cd $flake1Dir && nix flake metadata) | grepQuiet 'URL:.*flake1.*' +(cd $flake1Dir && nix flake metadata .) | grepQuiet 'URL:.*flake1.*' +nix flake metadata $flake1Dir | grepQuiet 'URL:.*flake1.*' # Test 'nix flake metadata --json'. json=$(nix flake metadata flake1 --json | jq .) @@ -96,7 +96,9 @@ json=$(nix flake metadata flake1 --json | jq .) hash1=$(echo "$json" | jq -r .revision) echo -n '# foo' >> $flake1Dir/flake.nix +flake1OriginalCommit=$(git -C $flake1Dir rev-parse HEAD) git -C $flake1Dir commit -a -m 'Foo' +flake1NewCommit=$(git -C $flake1Dir rev-parse HEAD) hash2=$(nix flake metadata flake1 --json --refresh | jq -r .revision) [[ $hash1 != $hash2 ]] @@ -134,11 +136,11 @@ nix build -o $TEST_ROOT/result flake2#bar --impure --no-write-lock-file nix eval --expr "builtins.getFlake \"$flake2Dir\"" --impure # Building a local flake with an unlocked dependency should fail with --no-update-lock-file. -nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes' +expect 1 nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes' # But it should succeed without that flag. nix build -o $TEST_ROOT/result $flake2Dir#bar --no-write-lock-file -nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes' +expect 1 nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes' nix build -o $TEST_ROOT/result $flake2Dir#bar --commit-lock-file [[ -e $flake2Dir/flake.lock ]] [[ -z $(git -C $flake2Dir diff main || echo failed) ]] @@ -196,10 +198,10 @@ git -C $flake3Dir add flake.lock git -C $flake3Dir commit -m 'Add lockfile' # Test whether registry caching works. -nix registry list --flake-registry file://$registry | grep -q flake3 +nix registry list --flake-registry file://$registry | grepQuiet flake3 mv $registry $registry.tmp nix store gc -nix registry list --flake-registry file://$registry --refresh | grep -q flake3 +nix registry list --flake-registry file://$registry --refresh | grepQuiet flake3 mv $registry.tmp $registry # Test whether flakes are registered as GC roots for offline use. @@ -346,8 +348,8 @@ nix registry remove flake1 nix registry add user-flake1 git+file://$flake1Dir nix registry add user-flake2 git+file://$flake2Dir [[ $(nix --flake-registry "" registry list | wc -l) == 2 ]] -nix --flake-registry "" registry list | grep -q -v '^global' # nothing in global registry -nix --flake-registry "" registry list | grep -q '^user' +nix --flake-registry "" registry list | grepQuietInverse '^global' # nothing in global registry +nix --flake-registry "" registry list | grepQuiet '^user' nix registry remove user-flake1 nix registry remove user-flake2 [[ $(nix registry list | wc -l) == 5 ]] @@ -454,7 +456,7 @@ url=$(nix flake metadata --json file://$TEST_ROOT/flake.tar.gz | jq -r .url) nix build -o $TEST_ROOT/result $url # Building with an incorrect SRI hash should fail. -nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ2Zz4DNHViCUrp6gTS7EE4+RMqFQtUfWF2UNUtJKS0=" 2>&1 | grep 'NAR hash mismatch' +expectStderr 102 nix build -o $TEST_ROOT/result "file://$TEST_ROOT/flake.tar.gz?narHash=sha256-qQ2Zz4DNHViCUrp6gTS7EE4+RMqFQtUfWF2UNUtJKS0=" | grep 'NAR hash mismatch' # Test --override-input. git -C $flake3Dir reset --hard @@ -491,3 +493,14 @@ nix store delete $(nix store add-path $badFlakeDir) [[ $(nix-instantiate --eval flake:git+file://$flake3Dir -A x) = 123 ]] [[ $(nix-instantiate -I flake3=flake:flake3 --eval '' -A x) = 123 ]] [[ $(NIX_PATH=flake3=flake:flake3 nix-instantiate --eval '' -A x) = 123 ]] + +# Test alternate lockfile paths. +nix flake lock $flake2Dir --output-lock-file $TEST_ROOT/flake2.lock +cmp $flake2Dir/flake.lock $TEST_ROOT/flake2.lock >/dev/null # lockfiles should be identical, since we're referencing flake2's original one + +nix flake lock $flake2Dir --output-lock-file $TEST_ROOT/flake2-overridden.lock --override-input flake1 git+file://$flake1Dir?rev=$flake1OriginalCommit +expectStderr 1 cmp $flake2Dir/flake.lock $TEST_ROOT/flake2-overridden.lock +nix flake metadata $flake2Dir --reference-lock-file $TEST_ROOT/flake2-overridden.lock | grepQuiet $flake1OriginalCommit + +# reference-lock-file can only be used if allow-dirty is set. +expectStderr 1 nix flake metadata $flake2Dir --no-allow-dirty --reference-lock-file $TEST_ROOT/flake2-overridden.lock diff --git a/tests/flakes/follow-paths.sh b/tests/flakes/follow-paths.sh index 19cc1bafa..fe9b51c65 100644 --- a/tests/flakes/follow-paths.sh +++ b/tests/flakes/follow-paths.sh @@ -128,7 +128,7 @@ EOF git -C $flakeFollowsA add flake.nix -nix flake lock $flakeFollowsA 2>&1 | grep 'points outside' +expect 1 nix flake lock $flakeFollowsA 2>&1 | grep 'points outside' # Non-existant follows should print a warning. cat >$flakeFollowsA/flake.nix < $flakeDir/message + cat > $flakeDir/flake.nix < $flakeDir/message + cat > $flakeDir/flake.nix < $clientDir/flake.nix <flake.nix < show-output.json +nix eval --impure --expr ' +let show_output = builtins.fromJSON (builtins.readFile ./show-output.json); +in +assert show_output.legacyPackages.${builtins.currentSystem}.AAAAAASomeThingsFailToEvaluate == { }; +assert show_output.legacyPackages.${builtins.currentSystem}.simple.name == "simple"; +true +' diff --git a/tests/fmt.sh b/tests/fmt.sh index 254681ca2..3c1bd9989 100644 --- a/tests/fmt.sh +++ b/tests/fmt.sh @@ -1,7 +1,5 @@ source common.sh -set -o pipefail - clearStore rm -rf $TEST_HOME/.cache $TEST_HOME/.config $TEST_HOME/.local diff --git a/tests/function-trace.sh b/tests/function-trace.sh index b0d6c9d59..bd804bf18 100755 --- a/tests/function-trace.sh +++ b/tests/function-trace.sh @@ -10,17 +10,15 @@ expect_trace() { --trace-function-calls \ --expr "$expr" 2>&1 \ | grep "function-trace" \ - | sed -e 's/ [0-9]*$//' + | sed -e 's/ [0-9]*$//' \ + || true ) echo -n "Tracing expression '$expr'" - set +e msg=$(diff -swB \ <(echo "$expect") \ <(echo "$actual") - ); - result=$? - set -e + ) && result=0 || result=$? if [ $result -eq 0 ]; then echo " ok." else @@ -67,5 +65,3 @@ expect_trace '1 2' " function-trace entered «string»:1:1 at function-trace exited «string»:1:1 at " - -set -e diff --git a/tests/gc-runtime.sh b/tests/gc-runtime.sh index 6094959cb..dc1826a55 100644 --- a/tests/gc-runtime.sh +++ b/tests/gc-runtime.sh @@ -4,7 +4,7 @@ case $system in *linux*) ;; *) - exit 99; + skipTest "Not running Linux"; esac set -m # enable job control, needed for kill diff --git a/tests/gc.sh b/tests/gc.sh index ad09a8b39..98d6cb032 100644 --- a/tests/gc.sh +++ b/tests/gc.sh @@ -50,3 +50,20 @@ if test -e $outPath/foobar; then false; fi # Check that the store is empty. rmdir $NIX_STORE_DIR/.links rmdir $NIX_STORE_DIR + +## Test `nix-collect-garbage -d` +# `nix-env` doesn't work with CA derivations, so let's ignore that bit if we're +# using them +if [[ -z "${NIX_TESTS_CA_BY_DEFAULT:-}" ]]; then + clearProfiles + # Run two `nix-env` commands, should create two generations of + # the profile + nix-env -f ./user-envs.nix -i foo-1.0 + nix-env -f ./user-envs.nix -i foo-2.0pre1 + [[ $(nix-env --list-generations | wc -l) -eq 2 ]] + + # Clear the profile history. There should be only one generation + # left + nix-collect-garbage -d + [[ $(nix-env --list-generations | wc -l) -eq 1 ]] +fi diff --git a/tests/hash.sh b/tests/hash.sh index e5f75e2cf..34c1bb38a 100644 --- a/tests/hash.sh +++ b/tests/hash.sh @@ -2,13 +2,19 @@ source common.sh try () { printf "%s" "$2" > $TEST_ROOT/vector - hash=$(nix hash file --base16 $EXTRA --type "$1" $TEST_ROOT/vector) - if test "$hash" != "$3"; then - echo "hash $1, expected $3, got $hash" + hash="$(nix-hash --flat ${FORMAT_FLAG-} --type "$1" "$TEST_ROOT/vector")" + if ! (( "${NO_TEST_CLASSIC-}" )) && test "$hash" != "$3"; then + echo "try nix-hash: hash $1, expected $3, got $hash" + exit 1 + fi + hash="$(nix hash file ${FORMAT_FLAG-} --type "$1" "$TEST_ROOT/vector")" + if ! (( "${NO_TEST_NIX_COMMAND-}" )) && test "$hash" != "$3"; then + echo "try nix hash: hash $1, expected $3, got $hash" exit 1 fi } +FORMAT_FLAG=--base16 try md5 "" "d41d8cd98f00b204e9800998ecf8427e" try md5 "a" "0cc175b9c0f1b6a831c399e269772661" try md5 "abc" "900150983cd24fb0d6963f7d28e17f72" @@ -28,16 +34,24 @@ try sha256 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "248d6a61d try sha512 "" "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e" try sha512 "abc" "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f" try sha512 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "204a8fc6dda82f0a0ced7beb8e08a41657c16ef468b228a8279be331a703c33596fd15c13b1b07f9aa1d3bea57789ca031ad85c7a71dd70354ec631238ca3445" +unset FORMAT_FLAG -EXTRA=--base32 +FORMAT_FLAG=--base32 try sha256 "abc" "1b8m03r63zqhnjf7l5wnldhh7c134ap5vpj0850ymkq1iyzicy5s" -EXTRA= +unset FORMAT_FLAG -EXTRA=--sri +FORMAT_FLAG=--sri try sha512 "" "sha512-z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXcg/SpIdNs6c5H0NE8XYXysP+DGNKHfuwvY7kxvUdBeoGlODJ6+SfaPg==" try sha512 "abc" "sha512-3a81oZNherrMQXNJriBBMRLm+k6JqX6iCp7u5ktV05ohkpkqJ0/BqDa6PCOj/uu9RU1EI2Q86A4qmslPpUyknw==" try sha512 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "sha512-IEqPxt2oLwoM7XvrjgikFlfBbvRosiioJ5vjMacDwzWW/RXBOxsH+aodO+pXeJygMa2Fx6cd1wNU7GMSOMo0RQ==" try sha256 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" "sha256-JI1qYdIGOLjlwCaTDD5gOaM85Flk/yFn9uzt1BnbBsE=" +unset FORMAT_FLAG + +# nix-hash [--flat] defaults to the Base16 format +NO_TEST_NIX_COMMAND=1 try sha512 "abc" "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f" + +# nix hash [file|path] defaults to the SRI format +NO_TEST_CLASSIC=1 try sha512 "abc" "sha512-3a81oZNherrMQXNJriBBMRLm+k6JqX6iCp7u5ktV05ohkpkqJ0/BqDa6PCOj/uu9RU1EI2Q86A4qmslPpUyknw==" try2 () { hash=$(nix-hash --type "$1" $TEST_ROOT/hash-path) @@ -69,12 +83,18 @@ try2 md5 "f78b733a68f5edbdf9413899339eaa4a" # Conversion. try3() { + h64=$(nix-hash --type "$1" --to-base64 "$2") + [ "$h64" = "$4" ] h64=$(nix hash to-base64 --type "$1" "$2") [ "$h64" = "$4" ] + sri=$(nix-hash --type "$1" --to-sri "$2") + [ "$sri" = "$1-$4" ] sri=$(nix hash to-sri --type "$1" "$2") [ "$sri" = "$1-$4" ] h32=$(nix-hash --type "$1" --to-base32 "$2") [ "$h32" = "$3" ] + h32=$(nix hash to-base32 --type "$1" "$2") + [ "$h32" = "$3" ] h16=$(nix-hash --type "$1" --to-base16 "$h32") [ "$h16" = "$2" ] h16=$(nix hash to-base16 --type "$1" "$h64") diff --git a/tests/impure-derivations.sh b/tests/impure-derivations.sh index 23a193833..39d053a04 100644 --- a/tests/impure-derivations.sh +++ b/tests/impure-derivations.sh @@ -5,13 +5,20 @@ requireDaemonNewerThan "2.8pre20220311" enableFeatures "ca-derivations impure-derivations" restartDaemon -set -o pipefail - clearStore # Basic test of impure derivations: building one a second time should not use the previous result. printf 0 > $TEST_ROOT/counter +# `nix derivation add` with impure derivations work +drvPath=$(nix-instantiate ./impure-derivations.nix -A impure) +nix derivation show $drvPath | jq .[] > $TEST_HOME/impure-drv.json +drvPath2=$(nix derivation add < $TEST_HOME/impure-drv.json) +[[ "$drvPath" = "$drvPath2" ]] + +# But only with the experimental feature! +expectStderr 1 nix derivation add < $TEST_HOME/impure-drv.json --experimental-features nix-command | grepQuiet "experimental Nix feature 'impure-derivations' is disabled" + nix build --dry-run --json --file ./impure-derivations.nix impure.all json=$(nix build -L --no-link --json --file ./impure-derivations.nix impure.all) path1=$(echo $json | jq -r .[].outputs.out) @@ -39,8 +46,8 @@ path4=$(nix build -L --no-link --json --file ./impure-derivations.nix impureOnIm (! nix build -L --no-link --json --file ./impure-derivations.nix inputAddressed 2>&1) | grep 'depends on impure derivation' drvPath=$(nix eval --json --file ./impure-derivations.nix impure.drvPath | jq -r .) -[[ $(nix show-derivation $drvPath | jq ".[\"$drvPath\"].outputs.out.impure") = true ]] -[[ $(nix show-derivation $drvPath | jq ".[\"$drvPath\"].outputs.stuff.impure") = true ]] +[[ $(nix derivation show $drvPath | jq ".[\"$drvPath\"].outputs.out.impure") = true ]] +[[ $(nix derivation show $drvPath | jq ".[\"$drvPath\"].outputs.stuff.impure") = true ]] # Fixed-output derivations *can* depend on impure derivations. path5=$(nix build -L --no-link --json --file ./impure-derivations.nix contentAddressed | jq -r .[].outputs.out) diff --git a/tests/init.sh b/tests/init.sh index fea659516..c420e8c9f 100755 --- a/tests/init.sh +++ b/tests/init.sh @@ -1,5 +1,3 @@ -set -eu -o pipefail - # Don't start the daemon source common/vars-and-functions.sh diff --git a/tests/install-darwin.sh b/tests/install-darwin.sh index 7e44e54c4..ea2b75323 100755 --- a/tests/install-darwin.sh +++ b/tests/install-darwin.sh @@ -4,7 +4,7 @@ set -eux cleanup() { PLIST="/Library/LaunchDaemons/org.nixos.nix-daemon.plist" - if sudo launchctl list | grep -q nix-daemon; then + if sudo launchctl list | grepQuiet nix-daemon; then sudo launchctl unload "$PLIST" fi diff --git a/tests/installer/default.nix b/tests/installer/default.nix index 31d83699d..49cfd2bcc 100644 --- a/tests/installer/default.nix +++ b/tests/installer/default.nix @@ -17,7 +17,7 @@ let script = '' tar -xf ./nix.tar.xz mv ./nix-* nix - ./nix/install --no-daemon + ./nix/install --no-daemon --no-channel-add ''; }; @@ -30,6 +30,14 @@ let }; }; + mockChannel = pkgs: + pkgs.runCommandNoCC "mock-channel" {} '' + mkdir nixexprs + mkdir -p $out/channel + echo -n 'someContent' > nixexprs/someFile + tar cvf - nixexprs | bzip2 > $out/channel/nixexprs.tar.bz2 + ''; + disableSELinux = "sudo setenforce 0"; images = { @@ -189,6 +197,11 @@ let echo "Running installer..." $ssh "set -eux; $installScript" + echo "Copying the mock channel" + # `scp -r` doesn't seem to work properly on some rhel instances, so let's + # use a plain tarpipe instead + tar -C ${mockChannel pkgs} -c channel | ssh -p 20022 $ssh_opts vagrant@localhost tar x -f- + echo "Testing Nix installation..." $ssh < \$out"]; }') [[ \$(cat \$out) = foobar ]] + + if pgrep nix-daemon; then + MAYBESUDO="sudo" + else + MAYBESUDO="" + fi + + + $MAYBESUDO \$(which nix-channel) --add file://\$HOME/channel myChannel + $MAYBESUDO \$(which nix-channel) --update + [[ \$(nix-instantiate --eval --expr 'builtins.readFile ') = '"someContent"' ]] EOF echo "Done!" diff --git a/tests/lang.sh b/tests/lang.sh index 95e795e2e..8170cb39d 100644 --- a/tests/lang.sh +++ b/tests/lang.sh @@ -4,12 +4,19 @@ export TEST_VAR=foo # for eval-okay-getenv.nix export NIX_REMOTE=dummy:// export NIX_STORE_DIR=/nix/store -nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>&1 | grep -q Hello +nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>&1 | grepQuiet Hello +nix-instantiate --eval -E 'builtins.trace "Hello" 123' 2>/dev/null | grepQuiet 123 nix-instantiate --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 -nix-instantiate --trace-verbose --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grep -q Hello -(! nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grep -q Hello) -(! nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grep -q Hello) -nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' 2>&1 | grep -q Hello +nix-instantiate --trace-verbose --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuiet Hello +nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuietInverse Hello +nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grepQuietInverse Hello +expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello + +nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \ + 2>&1 | grepQuiet -E 'trace: { x = «potential infinite recursion»; }' + +nix-instantiate --eval -E 'let x = { repeating = x; tracing = builtins.trace x true; }; in x.tracing'\ + 2>&1 | grepQuiet -F 'trace: { repeating = «repeated»; tracing = «potential infinite recursion»; }' set +x diff --git a/tests/legacy-ssh-store.sh b/tests/legacy-ssh-store.sh new file mode 100644 index 000000000..71b716b84 --- /dev/null +++ b/tests/legacy-ssh-store.sh @@ -0,0 +1,4 @@ +source common.sh + +# Check that store ping trusted doesn't yet work with ssh:// +nix --store ssh://localhost?remote-store=$TEST_ROOT/other-store store ping --json | jq -e 'has("trusted") | not' diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh index e62039567..5a2cf7abd 100644 --- a/tests/linux-sandbox.sh +++ b/tests/linux-sandbox.sh @@ -4,13 +4,13 @@ needLocalStore "the sandbox only runs on the builder side, so it makes no sense clearStore -if ! canUseSandbox; then exit 99; fi +requireSandboxSupport # Note: we need to bind-mount $SHELL into the chroot. Currently we # only support the case where $SHELL is in the Nix store, because # otherwise things get complicated (e.g. if it's in /bin, do we need # /lib as well?). -if [[ ! $SHELL =~ /nix/store ]]; then exit 99; fi +if [[ ! $SHELL =~ /nix/store ]]; then skipTest "Shell is not from Nix store"; fi chmod -R u+w $TEST_ROOT/store0 || true rm -rf $TEST_ROOT/store0 @@ -35,8 +35,8 @@ nix-build dependencies.nix --no-out-link --check --sandbox-paths /nix/store nix-build check.nix -A nondeterministic --sandbox-paths /nix/store --no-out-link (! nix-build check.nix -A nondeterministic --sandbox-paths /nix/store --no-out-link --check -K 2> $TEST_ROOT/log) -if grep -q 'error: renaming' $TEST_ROOT/log; then false; fi -grep -q 'may not be deterministic' $TEST_ROOT/log +if grepQuiet 'error: renaming' $TEST_ROOT/log; then false; fi +grepQuiet 'may not be deterministic' $TEST_ROOT/log # Test that sandboxed builds cannot write to /etc easily (! nix-build -E 'with import ./config.nix; mkDerivation { name = "etc-write"; buildCommand = "echo > /etc/test"; }' --no-out-link --sandbox-paths /nix/store) diff --git a/tests/local-store.sh b/tests/local-store.sh index 0247346f1..89502f864 100644 --- a/tests/local-store.sh +++ b/tests/local-store.sh @@ -17,3 +17,6 @@ PATH2=$(nix path-info --store "$PWD/x" $CORRECT_PATH) PATH3=$(nix path-info --store "local?root=$PWD/x" $CORRECT_PATH) [ $CORRECT_PATH == $PATH3 ] + +# Ensure store ping trusted works with local store +nix --store ./x store ping --json | jq -e '.trusted' diff --git a/tests/local.mk b/tests/local.mk index d94f1f8b9..7c3b42599 100644 --- a/tests/local.mk +++ b/tests/local.mk @@ -1,20 +1,25 @@ nix_tests = \ + test-infra.sh \ init.sh \ flakes/flakes.sh \ flakes/run.sh \ flakes/mercurial.sh \ flakes/circular.sh \ flakes/init.sh \ + flakes/inputs.sh \ flakes/follow-paths.sh \ flakes/bundle.sh \ flakes/check.sh \ flakes/unlocked-override.sh \ flakes/absolute-paths.sh \ flakes/build-paths.sh \ + flakes/flake-in-submodule.sh \ ca/gc.sh \ gc.sh \ remote-store.sh \ + legacy-ssh-store.sh \ lang.sh \ + experimental-features.sh \ fetchMercurial.sh \ gc-auto.sh \ user-envs.sh \ @@ -65,6 +70,10 @@ nix_tests = \ check-reqs.sh \ build-remote-content-addressed-fixed.sh \ build-remote-content-addressed-floating.sh \ + build-remote-trustless-should-pass-0.sh \ + build-remote-trustless-should-pass-1.sh \ + build-remote-trustless-should-pass-3.sh \ + build-remote-trustless-should-fail-0.sh \ nar-access.sh \ pure-eval.sh \ eval.sh \ @@ -97,6 +106,8 @@ nix_tests = \ eval-store.sh \ why-depends.sh \ ca/why-depends.sh \ + derivation-json.sh \ + ca/derivation-json.sh \ import-derivation.sh \ ca/import-derivation.sh \ nix_path.sh \ @@ -112,8 +123,8 @@ nix_tests = \ db-migration.sh \ bash-profile.sh \ pass-as-file.sh \ - describe-stores.sh \ - flake-bundler.sh \ + nix-profile.sh \ + suggestions.sh \ store-ping.sh \ fetchClosure.sh \ completions.sh \ @@ -128,9 +139,9 @@ endif install-tests += $(foreach x, $(nix_tests), tests/$(x)) -clean-files += $(d)/tests/common/vars-and-functions.sh $(d)/config.nix $(d)/ca/config.nix +clean-files += $(d)/common/vars-and-functions.sh $(d)/config.nix $(d)/ca/config.nix -test-deps += tests/common/vars-and-functions.sh tests/config.nix tests/ca/config.nix tests/plugins/libplugintest.$(SO_EXT) +test-deps += tests/common/vars-and-functions.sh tests/config.nix tests/ca/config.nix ifeq ($(BUILD_SHARED_LIBS), 1) test-deps += tests/plugins/libplugintest.$(SO_EXT) diff --git a/tests/misc.sh b/tests/misc.sh index 2830856ae..60d58310e 100644 --- a/tests/misc.sh +++ b/tests/misc.sh @@ -3,17 +3,17 @@ source common.sh # Tests miscellaneous commands. # Do all commands have help? -#nix-env --help | grep -q install -#nix-store --help | grep -q realise -#nix-instantiate --help | grep -q eval -#nix-hash --help | grep -q base32 +#nix-env --help | grepQuiet install +#nix-store --help | grepQuiet realise +#nix-instantiate --help | grepQuiet eval +#nix-hash --help | grepQuiet base32 # Can we ask for the version number? nix-env --version | grep "$version" # Usage errors. -nix-env --foo 2>&1 | grep "no operation" -nix-env -q --foo 2>&1 | grep "unknown flag" +expect 1 nix-env --foo 2>&1 | grep "no operation" +expect 1 nix-env -q --foo 2>&1 | grep "unknown flag" # Eval Errors. eval_arg_res=$(nix-instantiate --eval -E 'let a = {} // a; in a.foo' 2>&1 || true) diff --git a/tests/multiple-outputs.sh b/tests/multiple-outputs.sh index 66be6fa64..330600d08 100644 --- a/tests/multiple-outputs.sh +++ b/tests/multiple-outputs.sh @@ -19,8 +19,8 @@ echo "evaluating c..." # outputs. drvPath=$(nix-instantiate multiple-outputs.nix -A c) #[ "$drvPath" = "$drvPath2" ] -grep -q 'multiple-outputs-a.drv",\["first","second"\]' $drvPath -grep -q 'multiple-outputs-b.drv",\["out"\]' $drvPath +grepQuiet 'multiple-outputs-a.drv",\["first","second"\]' $drvPath +grepQuiet 'multiple-outputs-b.drv",\["out"\]' $drvPath # While we're at it, test the ‘unsafeDiscardOutputDependency’ primop. outPath=$(nix-build multiple-outputs.nix -A d --no-out-link) @@ -84,5 +84,5 @@ nix-store --gc --print-roots rm -rf $NIX_STORE_DIR/.links rmdir $NIX_STORE_DIR -nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character' -nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' +expect 1 nix build -f multiple-outputs.nix invalid-output-name-1 2>&1 | grep 'contains illegal character' +expect 1 nix build -f multiple-outputs.nix invalid-output-name-2 2>&1 | grep 'contains illegal character' diff --git a/tests/nar-access.sh b/tests/nar-access.sh index dcc2e8a36..d487d58d2 100644 --- a/tests/nar-access.sh +++ b/tests/nar-access.sh @@ -46,8 +46,8 @@ diff -u \ <(echo '{"type":"regular","size":0}' | jq -S) # Test missing files. -nix store ls --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR' -nix store ls $storePath/xyzzy 2>&1 | grep 'does not exist' +expect 1 nix store ls --json -R $storePath/xyzzy 2>&1 | grep 'does not exist in NAR' +expect 1 nix store ls $storePath/xyzzy 2>&1 | grep 'does not exist' # Test failure to dump. if nix-store --dump $storePath >/dev/full ; then diff --git a/tests/nix-channel.sh b/tests/nix-channel.sh index b64283f48..dbb3114f1 100644 --- a/tests/nix-channel.sh +++ b/tests/nix-channel.sh @@ -6,7 +6,7 @@ rm -f $TEST_HOME/.nix-channels $TEST_HOME/.nix-profile # Test add/list/remove. nix-channel --add http://foo/bar xyzzy -nix-channel --list | grep -q http://foo/bar +nix-channel --list | grepQuiet http://foo/bar nix-channel --remove xyzzy [ -e $TEST_HOME/.nix-channels ] @@ -17,7 +17,7 @@ nix-channel --remove xyzzy export NIX_CONFIG="use-xdg-base-directories = true" nix-channel --add http://foo/bar xyzzy -nix-channel --list | grep -q http://foo/bar +nix-channel --list | grepQuiet http://foo/bar nix-channel --remove xyzzy unset NIX_CONFIG @@ -41,8 +41,8 @@ nix-channel --update # Do a query. nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grep -q 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml # Do an install. nix-env -i dependencies-top @@ -54,9 +54,9 @@ nix-channel --update # Do a query. nix-env -qa \* --meta --xml --out-path > $TEST_ROOT/meta.xml -grep -q 'meta.*description.*Random test package' $TEST_ROOT/meta.xml -grep -q 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml -grep -q 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml +grepQuiet 'meta.*description.*Random test package' $TEST_ROOT/meta.xml +grepQuiet 'item.*attrPath="bar".*name="dependencies-top"' $TEST_ROOT/meta.xml +grepQuiet 'item.*attrPath="foo".*name="dependencies-top"' $TEST_ROOT/meta.xml # Do an install. nix-env -i dependencies-top diff --git a/tests/nix-daemon-untrusting.sh b/tests/nix-daemon-untrusting.sh new file mode 100755 index 000000000..bcdb70989 --- /dev/null +++ b/tests/nix-daemon-untrusting.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +exec nix-daemon --force-untrusted "$@" diff --git a/tests/nix-profile.sh b/tests/nix-profile.sh index 0e042f407..abf9eddc0 100644 --- a/tests/nix-profile.sh +++ b/tests/nix-profile.sh @@ -140,6 +140,37 @@ printf World2 > $flake2Dir/who nix profile install $flake1Dir [[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] +expect 1 nix profile install $flake2Dir +diff -u <( + nix --offline profile install $flake2Dir 2>&1 1> /dev/null \ + | grep -vE "^warning: " \ + | grep -vE "^error \(ignored\): " \ + || true +) <(cat << EOF +error: An existing package already provides the following file: + + $(nix build --no-link --print-out-paths ${flake1Dir}"#default.out")/bin/hello + + This is the conflicting file from the new package: + + $(nix build --no-link --print-out-paths ${flake2Dir}"#default.out")/bin/hello + + To remove the existing package: + + nix profile remove path:${flake1Dir} + + The new package can also be installed next to the existing one by assigning a different priority. + The conflicting packages have a priority of 5. + To prioritise the new package: + + nix profile install path:${flake2Dir} --priority 4 + + To prioritise the existing package: + + nix profile install path:${flake2Dir} --priority 6 +EOF +) +[[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] nix profile install $flake2Dir --priority 100 [[ $($TEST_HOME/.nix-profile/bin/hello) = "Hello World" ]] nix profile install $flake2Dir --priority 0 diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh index f291c6f79..044b96d54 100644 --- a/tests/nix-shell.sh +++ b/tests/nix-shell.sh @@ -88,20 +88,43 @@ output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby) nix develop -f "$shellDotNix" shellDrv -c bash -c '[[ -n $stdenv ]]' # Ensure `nix develop -c` preserves stdin -echo foo | nix develop -f "$shellDotNix" shellDrv -c cat | grep -q foo +echo foo | nix develop -f "$shellDotNix" shellDrv -c cat | grepQuiet foo # Ensure `nix develop -c` actually executes the command if stdout isn't a terminal -nix develop -f "$shellDotNix" shellDrv -c echo foo |& grep -q foo +nix develop -f "$shellDotNix" shellDrv -c echo foo |& grepQuiet foo # Test 'nix print-dev-env'. -[[ $(nix print-dev-env -f "$shellDotNix" shellDrv --json | jq -r .variables.arr1.value[2]) = '3 4' ]] -source <(nix print-dev-env -f "$shellDotNix" shellDrv) -[[ -n $stdenv ]] -[[ ${arr1[2]} = "3 4" ]] -[[ ${arr2[1]} = $'\n' ]] -[[ ${arr2[2]} = $'x\ny' ]] -[[ $(fun) = blabla ]] +nix print-dev-env -f "$shellDotNix" shellDrv > $TEST_ROOT/dev-env.sh +nix print-dev-env -f "$shellDotNix" shellDrv --json > $TEST_ROOT/dev-env.json + +# Ensure `nix print-dev-env --json` contains variable assignments. +[[ $(jq -r .variables.arr1.value[2] $TEST_ROOT/dev-env.json) = '3 4' ]] + +# Run tests involving `source <(nix print-dev-inv)` in subshells to avoid modifying the current +# environment. + +set +u # FIXME: Make print-dev-env `set -u` compliant (issue #7951) + +# Ensure `source <(nix print-dev-env)` modifies the environment. +( + path=$PATH + source $TEST_ROOT/dev-env.sh + [[ -n $stdenv ]] + [[ ${arr1[2]} = "3 4" ]] + [[ ${arr2[1]} = $'\n' ]] + [[ ${arr2[2]} = $'x\ny' ]] + [[ $(fun) = blabla ]] + [[ $PATH = $(jq -r .variables.PATH.value $TEST_ROOT/dev-env.json):$path ]] +) + +# Ensure `source <(nix print-dev-env)` handles the case when PATH is empty. +( + path=$PATH + PATH= + source $TEST_ROOT/dev-env.sh + [[ $PATH = $(PATH=$path jq -r .variables.PATH.value $TEST_ROOT/dev-env.json) ]] +) # Test nix-shell with ellipsis and no `inNixShell` argument (for backwards compat with old nixpkgs) cat >$TEST_ROOT/shell-ellipsis.nix <' --restrict-eval [[ $(nix-instantiate --find-file by-absolute-path/simple.nix) = $PWD/simple.nix ]] [[ $(nix-instantiate --find-file by-relative-path/simple.nix) = $PWD/simple.nix ]] - -unset NIX_PATH - -[[ $(nix-instantiate --option nix-path by-relative-path=. --find-file by-relative-path/simple.nix) = "$PWD/simple.nix" ]] -[[ $(NIX_PATH= nix-instantiate --option nix-path by-relative-path=. --find-file by-relative-path/simple.nix) = "$PWD/simple.nix" ]] diff --git a/tests/nixos/nix-copy.nix b/tests/nixos/nix-copy.nix new file mode 100644 index 000000000..ee8b77100 --- /dev/null +++ b/tests/nixos/nix-copy.nix @@ -0,0 +1,85 @@ +# Test that ‘nix copy’ works over ssh. + +{ lib, config, nixpkgs, hostPkgs, ... }: + +let + pkgs = config.nodes.client.nixpkgs.pkgs; + + pkgA = pkgs.cowsay; + pkgB = pkgs.wget; + pkgC = pkgs.hello; + pkgD = pkgs.tmux; + +in { + name = "nix-copy"; + + enableOCR = true; + + nodes = + { client = + { config, lib, pkgs, ... }: + { virtualisation.writableStore = true; + virtualisation.additionalPaths = [ pkgA pkgD.drvPath ]; + nix.settings.substituters = lib.mkForce [ ]; + nix.settings.experimental-features = [ "nix-command" ]; + services.getty.autologinUser = "root"; + }; + + server = + { config, pkgs, ... }: + { services.openssh.enable = true; + services.openssh.permitRootLogin = "yes"; + users.users.root.password = "foobar"; + virtualisation.writableStore = true; + virtualisation.additionalPaths = [ pkgB pkgC ]; + }; + }; + + testScript = { nodes }: '' + # fmt: off + import subprocess + + # Create an SSH key on the client. + subprocess.run([ + "${pkgs.openssh}/bin/ssh-keygen", "-t", "ed25519", "-f", "key", "-N", "" + ], capture_output=True, check=True) + + start_all() + + server.wait_for_unit("sshd") + client.wait_for_unit("network.target") + client.wait_for_unit("getty@tty1.service") + client.wait_for_text("]#") + + # Copy the closure of package A from the client to the server using password authentication, + # and check that all prompts are visible + server.fail("nix-store --check-validity ${pkgA}") + client.send_chars("nix copy --to ssh://server ${pkgA} >&2; echo done\n") + client.wait_for_text("continue connecting") + client.send_chars("yes\n") + client.wait_for_text("Password:") + client.send_chars("foobar\n") + client.wait_for_text("done") + server.succeed("nix-store --check-validity ${pkgA}") + + client.copy_from_host("key", "/root/.ssh/id_ed25519") + client.succeed("chmod 600 /root/.ssh/id_ed25519") + + # Install the SSH key on the server. + server.copy_from_host("key.pub", "/root/.ssh/authorized_keys") + server.succeed("systemctl restart sshd") + client.succeed(f"ssh -o StrictHostKeyChecking=no {server.name} 'echo hello world'") + + # Copy the closure of package B from the server to the client, using ssh-ng. + client.fail("nix-store --check-validity ${pkgB}") + # Shouldn't download untrusted paths by default + client.fail("nix copy --from ssh-ng://server ${pkgB} >&2") + client.succeed("nix copy --no-check-sigs --from ssh-ng://server ${pkgB} >&2") + client.succeed("nix-store --check-validity ${pkgB}") + + # Copy the derivation of package D's derivation from the client to the server. + server.fail("nix-store --check-validity ${pkgD.drvPath}") + client.succeed("nix copy --derivation --to ssh://server ${pkgD.drvPath} >&2") + server.succeed("nix-store --check-validity ${pkgD.drvPath}") + ''; +} diff --git a/tests/plugins.sh b/tests/plugins.sh index 6e278ad9d..baf71a362 100644 --- a/tests/plugins.sh +++ b/tests/plugins.sh @@ -1,10 +1,7 @@ source common.sh -set -o pipefail - if [[ $BUILD_SHARED_LIBS != 1 ]]; then - echo "plugins are not supported" - exit 99 + skipTest "Plugins are not supported" fi res=$(nix --option setting-set true --option plugin-files $PWD/plugins/libplugintest* eval --expr builtins.anotherNull) diff --git a/tests/plugins/local.mk b/tests/plugins/local.mk index 8182a6a83..40350aa96 100644 --- a/tests/plugins/local.mk +++ b/tests/plugins/local.mk @@ -8,4 +8,4 @@ libplugintest_ALLOW_UNDEFINED := 1 libplugintest_EXCLUDE_FROM_LIBRARY_LIST := 1 -libplugintest_CXXFLAGS := -I src/libutil -I src/libstore -I src/libexpr +libplugintest_CXXFLAGS := -I src/libutil -I src/libstore -I src/libexpr -I src/libfetchers diff --git a/tests/post-hook.sh b/tests/post-hook.sh index 4eff5f511..0266eb15d 100644 --- a/tests/post-hook.sh +++ b/tests/post-hook.sh @@ -9,8 +9,14 @@ echo 'require-sigs = false' >> $NIX_CONF_DIR/nix.conf restartDaemon +if isDaemonNewer "2.13"; then + pushToStore="$PWD/push-to-store.sh" +else + pushToStore="$PWD/push-to-store-old.sh" +fi + # Build the dependencies and push them to the remote store. -nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook $PWD/push-to-store.sh +nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook "$pushToStore" clearStore diff --git a/tests/pure-eval.sh b/tests/pure-eval.sh index b83ab8afe..5334bf28e 100644 --- a/tests/pure-eval.sh +++ b/tests/pure-eval.sh @@ -8,7 +8,7 @@ nix eval --expr 'assert 1 + 2 == 3; true' missingImpureErrorMsg=$(! nix eval --expr 'builtins.readFile ./pure-eval.sh' 2>&1) -echo "$missingImpureErrorMsg" | grep -q -- --impure || \ +echo "$missingImpureErrorMsg" | grepQuiet -- --impure || \ fail "The error message should mention the “--impure” flag to unblock users" [[ $(nix eval --expr 'builtins.pathExists ./pure-eval.sh') == false ]] || \ diff --git a/tests/push-to-store-old.sh b/tests/push-to-store-old.sh new file mode 100755 index 000000000..b1495c9e2 --- /dev/null +++ b/tests/push-to-store-old.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -x +set -e + +[ -n "$OUT_PATHS" ] +[ -n "$DRV_PATH" ] + +echo Pushing "$OUT_PATHS" to "$REMOTE_STORE" +printf "%s" "$DRV_PATH" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs diff --git a/tests/push-to-store.sh b/tests/push-to-store.sh index b1495c9e2..0b090e1b3 100755 --- a/tests/push-to-store.sh +++ b/tests/push-to-store.sh @@ -7,4 +7,4 @@ set -e [ -n "$DRV_PATH" ] echo Pushing "$OUT_PATHS" to "$REMOTE_STORE" -printf "%s" "$DRV_PATH" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs +printf "%s" "$DRV_PATH"^'*' | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs diff --git a/tests/recursive.nix b/tests/recursive.nix new file mode 100644 index 000000000..fa8cc04db --- /dev/null +++ b/tests/recursive.nix @@ -0,0 +1,56 @@ +with import ./config.nix; + +mkDerivation rec { + name = "recursive"; + dummy = builtins.toFile "dummy" "bla bla"; + SHELL = shell; + + # Note: this is a string without context. + unreachable = builtins.getEnv "unreachable"; + + NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT"; + + requiredSystemFeatures = [ "recursive-nix" ]; + + buildCommand = '' + mkdir $out + opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}" + + PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH + + # Check that we can query/build paths in our input closure. + nix $opts path-info $dummy + nix $opts build $dummy + + # Make sure we cannot query/build paths not in out input closure. + [[ -e $unreachable ]] + (! nix $opts path-info $unreachable) + (! nix $opts build $unreachable) + + # Add something to the store. + echo foobar > foobar + foobar=$(nix $opts store add-path ./foobar) + + nix $opts path-info $foobar + nix $opts build $foobar + + # Add it to our closure. + ln -s $foobar $out/foobar + + [[ $(nix $opts path-info --all | wc -l) -eq 4 ]] + + # Build a derivation. + nix $opts build -L --impure --expr ' + with import ${./config.nix}; + mkDerivation { + name = "inner1"; + buildCommand = "echo $fnord blaat > $out"; + fnord = builtins.toFile "fnord" "fnord"; + } + ' + + [[ $(nix $opts path-info --json ./result) =~ fnord ]] + + ln -s $(nix $opts path-info ./result) $out/inner1 + ''; +} diff --git a/tests/recursive.sh b/tests/recursive.sh index 91518d67d..638f06f85 100644 --- a/tests/recursive.sh +++ b/tests/recursive.sh @@ -4,7 +4,7 @@ sed -i 's/experimental-features .*/& recursive-nix/' "$NIX_CONF_DIR"/nix.conf restartDaemon # FIXME -if [[ $(uname) != Linux ]]; then exit 99; fi +if [[ $(uname) != Linux ]]; then skipTest "Not running Linux"; fi clearStore @@ -12,63 +12,7 @@ rm -f $TEST_ROOT/result export unreachable=$(nix store add-path ./recursive.sh) -NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr ' - with import ./config.nix; - mkDerivation rec { - name = "recursive"; - dummy = builtins.toFile "dummy" "bla bla"; - SHELL = shell; - - # Note: this is a string without context. - unreachable = builtins.getEnv "unreachable"; - - NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT"; - - requiredSystemFeatures = [ "recursive-nix" ]; - - buildCommand = '\'\'' - mkdir $out - opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}" - - PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH - - # Check that we can query/build paths in our input closure. - nix $opts path-info $dummy - nix $opts build $dummy - - # Make sure we cannot query/build paths not in out input closure. - [[ -e $unreachable ]] - (! nix $opts path-info $unreachable) - (! nix $opts build $unreachable) - - # Add something to the store. - echo foobar > foobar - foobar=$(nix $opts store add-path ./foobar) - - nix $opts path-info $foobar - nix $opts build $foobar - - # Add it to our closure. - ln -s $foobar $out/foobar - - [[ $(nix $opts path-info --all | wc -l) -eq 4 ]] - - # Build a derivation. - nix $opts build -L --impure --expr '\'' - with import ${./config.nix}; - mkDerivation { - name = "inner1"; - buildCommand = "echo $fnord blaat > $out"; - fnord = builtins.toFile "fnord" "fnord"; - } - '\'' - - [[ $(nix $opts path-info --json ./result) =~ fnord ]] - - ln -s $(nix $opts path-info ./result) $out/inner1 - '\'\''; - } -' +NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --file ./recursive.nix [[ $(cat $TEST_ROOT/result/inner1) =~ blaat ]] diff --git a/tests/remote-store.sh b/tests/remote-store.sh index 1ae126794..ea32a20d3 100644 --- a/tests/remote-store.sh +++ b/tests/remote-store.sh @@ -5,8 +5,19 @@ clearStore # Ensure "fake ssh" remote store works just as legacy fake ssh would. nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store doctor +# Ensure that store ping trusted works with ssh-ng:// +nix --store ssh-ng://localhost?remote-store=$TEST_ROOT/other-store store ping --json | jq -e '.trusted' + startDaemon +if isDaemonNewer "2.15pre0"; then + # Ensure that ping works trusted with new daemon + nix store ping --json | jq -e '.trusted' +else + # And the the field is absent with the old daemon + nix store ping --json | jq -e 'has("trusted") | not' +fi + # Test import-from-derivation through the daemon. [[ $(nix eval --impure --raw --expr ' with import ./config.nix; diff --git a/tests/repl.sh b/tests/repl.sh index c555560cc..2b3789521 100644 --- a/tests/repl.sh +++ b/tests/repl.sh @@ -33,14 +33,14 @@ testRepl () { nix repl "${nixArgs[@]}" <<< "$replCmds" || fail "nix repl does not work twice with the same inputs" # simple.nix prints a PATH during build - echo "$replOutput" | grep -qs 'PATH=' || fail "nix repl :log doesn't output logs" + echo "$replOutput" | grepQuiet -s 'PATH=' || fail "nix repl :log doesn't output logs" local replOutput="$(nix repl "${nixArgs[@]}" <<< "$replFailingCmds" 2>&1)" echo "$replOutput" - echo "$replOutput" | grep -qs 'This should fail' \ + echo "$replOutput" | grepQuiet -s 'This should fail' \ || fail "nix repl :log doesn't output logs for a failed derivation" local replOutput="$(nix repl --show-trace "${nixArgs[@]}" <<< "$replUndefinedVariable" 2>&1)" echo "$replOutput" - echo "$replOutput" | grep -qs "while evaluating the file" \ + echo "$replOutput" | grepQuiet -s "while evaluating the file" \ || fail "nix repl --show-trace doesn't show the trace" nix repl "${nixArgs[@]}" --option pure-eval true 2>&1 <<< "builtins.currentSystem" \ @@ -58,7 +58,7 @@ testReplResponse () { local commands="$1"; shift local expectedResponse="$1"; shift local response="$(nix repl "$@" <<< "$commands")" - echo "$response" | grep -qs "$expectedResponse" \ + echo "$response" | grepQuiet -s "$expectedResponse" \ || fail "repl command set: $commands @@ -79,6 +79,14 @@ testReplResponse ' "result: ${a}" ' "result: 2" +# check dollar escaping https://github.com/NixOS/nix/issues/4909 +# note the escaped \, +# \\ +# because the second argument is a regex +testReplResponse ' +"$" + "{hi}" +' '"\\${hi}"' + testReplResponse ' drvPath ' '".*-simple.drv"' \ @@ -121,5 +129,5 @@ sed -i 's/beforeChange/afterChange/' flake/flake.nix echo ":reload" echo "changingThing" ) | nix repl ./flake --experimental-features 'flakes repl-flake') -echo "$replResult" | grep -qs beforeChange -echo "$replResult" | grep -qs afterChange +echo "$replResult" | grepQuiet -s beforeChange +echo "$replResult" | grepQuiet -s afterChange diff --git a/tests/restricted.sh b/tests/restricted.sh index 3b6ee2af1..776893a56 100644 --- a/tests/restricted.sh +++ b/tests/restricted.sh @@ -17,9 +17,6 @@ nix-instantiate --restrict-eval --eval -E 'builtins.readDir ../src/nix-channel' (! nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in ') nix-instantiate --restrict-eval --eval -E 'let __nixPath = [ { prefix = "foo"; path = ./.; } ]; in ' -I src=. -# no default NIX_PATH -(unset NIX_PATH; ! nix-instantiate --restrict-eval --find-file .) - p=$(nix eval --raw --expr "builtins.fetchurl file://$(pwd)/restricted.sh" --impure --restrict-eval --allowed-uris "file://$(pwd)") cmp $p restricted.sh @@ -51,4 +48,4 @@ output="$(nix eval --raw --restrict-eval -I "$traverseDir" \ --expr "builtins.readFile \"$traverseDir/$goUp$(pwd)/restricted-innocent\"" \ 2>&1 || :)" echo "$output" | grep "is forbidden" -! echo "$output" | grep -F restricted-secret +echo "$output" | grepInverse -F restricted-secret diff --git a/tests/search.sh b/tests/search.sh index ab3d7ff8d..841cad800 100644 --- a/tests/search.sh +++ b/tests/search.sh @@ -20,9 +20,9 @@ clearCache ## Search expressions # Check that empty search string matches all -nix search -f search.nix |grep -q foo -nix search -f search.nix |grep -q bar -nix search -f search.nix |grep -q hello +nix search -f search.nix |grepQuiet foo +nix search -f search.nix |grepQuiet bar +nix search -f search.nix |grepQuiet hello ## Tests for multiple regex/match highlighting diff --git a/tests/shell.sh b/tests/shell.sh index 6a80e8385..d2f7cf14e 100644 --- a/tests/shell.sh +++ b/tests/shell.sh @@ -10,7 +10,7 @@ nix shell -f shell-hello.nix hello -c hello NixOS | grep 'Hello NixOS' nix shell -f shell-hello.nix hello^dev -c hello2 | grep 'Hello2' nix shell -f shell-hello.nix 'hello^*' -c hello2 | grep 'Hello2' -if ! canUseSandbox; then exit 99; fi +requireSandboxSupport chmod -R u+w $TEST_ROOT/store0 || true rm -rf $TEST_ROOT/store0 diff --git a/tests/tarball.sh b/tests/tarball.sh index d5cab879c..5f39658c9 100644 --- a/tests/tarball.sh +++ b/tests/tarball.sh @@ -19,7 +19,7 @@ test_tarball() { tarball=$TEST_ROOT/tarball.tar$ext (cd $TEST_ROOT && tar cf - tarball) | $compressor > $tarball - nix-env -f file://$tarball -qa --out-path | grep -q dependencies + nix-env -f file://$tarball -qa --out-path | grepQuiet dependencies nix-build -o $TEST_ROOT/result file://$tarball @@ -34,7 +34,7 @@ test_tarball() { nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })" # Do not re-fetch paths already present nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file:///does-not-exist/must-remain-unused/$tarball; narHash = \"$hash\"; })" - nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"sha256-xdKv2pq/IiwLSnBBJXW8hNowI4MrdZfW+SYqDQs7Tzc=\"; })" 2>&1 | grep 'NAR hash mismatch in input' + expectStderr 102 nix-build -o $TEST_ROOT/result -E "import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"sha256-xdKv2pq/IiwLSnBBJXW8hNowI4MrdZfW+SYqDQs7Tzc=\"; })" | grep 'NAR hash mismatch in input' nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" >&2 nix-instantiate --strict --eval -E "!((import (fetchTree { type = \"tarball\"; url = file://$tarball; narHash = \"$hash\"; })) ? submodules)" 2>&1 | grep 'true' diff --git a/tests/test-infra.sh b/tests/test-infra.sh new file mode 100644 index 000000000..54ae120e7 --- /dev/null +++ b/tests/test-infra.sh @@ -0,0 +1,85 @@ +# Test the functions for testing themselves! +# Also test some assumptions on how bash works that they rely on. +source common.sh + +# `true` should exit with 0 +expect 0 true + +# `false` should exit with 1 +expect 1 false + +# `expect` will fail when we get it wrong +expect 1 expect 0 false + +noisyTrue () { + echo YAY! >&2 + true +} + +noisyFalse () { + echo NAY! >&2 + false +} + +# These should redirect standard error to standard output +expectStderr 0 noisyTrue | grepQuiet YAY +expectStderr 1 noisyFalse | grepQuiet NAY + +# `set -o pipefile` is enabled + +pipefailure () { + # shellcheck disable=SC2216 + true | false | true +} +expect 1 pipefailure +unset pipefailure + +pipefailure () { + # shellcheck disable=SC2216 + false | true | true +} +expect 1 pipefailure +unset pipefailure + +commandSubstitutionPipeFailure () { + # shellcheck disable=SC2216 + res=$(set -eu -o pipefail; false | true | echo 0) +} +expect 1 commandSubstitutionPipeFailure + +# `set -u` is enabled + +# note (...), making function use subshell, as unbound variable errors +# in the outer shell are *rightly* not recoverable. +useUnbound () ( + set -eu + # shellcheck disable=SC2154 + echo "$thisVariableIsNotBound" +) +expect 1 useUnbound + +# ! alone unfortunately negates `set -e`, but it works in functions: +# shellcheck disable=SC2251 +! true +funBang () { + ! true +} +expect 1 funBang +unset funBang + +# `grep -v -q` is not what we want for exit codes, but `grepInverse` is +# Avoid `grep -v -q`. The following line proves the point, and if it fails, +# we'll know that `grep` had a breaking change or `-v -q` may not be portable. +{ echo foo; echo bar; } | grep -v -q foo +{ echo foo; echo bar; } | expect 1 grepInverse foo + +# `grepQuiet` is quiet +res=$(set -eu -o pipefail; echo foo | grepQuiet foo | wc -c) +(( res == 0 )) +unset res + +# `greqQietInverse` is both +{ echo foo; echo bar; } | expect 1 grepQuietInverse foo +res=$(set -eu -o pipefail; echo foo | expect 1 grepQuietInverse foo | wc -c) +(( res == 0 )) +unset res diff --git a/tests/timeout.sh b/tests/timeout.sh index e3fb3ebcc..b179b79a2 100644 --- a/tests/timeout.sh +++ b/tests/timeout.sh @@ -5,17 +5,14 @@ source common.sh # XXX: This shouldn’t be, but #4813 cause this test to fail needLocalStore "see #4813" -set +e -messages=$(nix-build -Q timeout.nix -A infiniteLoop --timeout 2 2>&1) -status=$? -set -e +messages=$(nix-build -Q timeout.nix -A infiniteLoop --timeout 2 2>&1) && status=0 || status=$? if [ $status -ne 101 ]; then echo "error: 'nix-store' exited with '$status'; should have exited 101" exit 1 fi -if ! echo "$messages" | grep -q "timed out"; then +if echo "$messages" | grepQuietInvert "timed out"; then echo "error: build may have failed for reasons other than timeout; output:" echo "$messages" >&2 exit 1 diff --git a/tests/user-envs-migration.sh b/tests/user-envs-migration.sh index 467c28fbb..187372b16 100644 --- a/tests/user-envs-migration.sh +++ b/tests/user-envs-migration.sh @@ -4,7 +4,7 @@ source common.sh if isDaemonNewer "2.4pre20211005"; then - exit 99 + skipTest "Daemon is too new" fi diff --git a/tests/user-envs.sh b/tests/user-envs.sh index d63fe780a..d1260ba04 100644 --- a/tests/user-envs.sh +++ b/tests/user-envs.sh @@ -1,6 +1,6 @@ source common.sh -if [ -z "$storeCleared" ]; then +if [ -z "${storeCleared-}" ]; then clearStore fi @@ -28,13 +28,13 @@ nix-env -f ./user-envs.nix -qa --json --out-path | jq -e '.[] | select(.name == ] | all' # Query descriptions. -nix-env -f ./user-envs.nix -qa '*' --description | grep -q silly +nix-env -f ./user-envs.nix -qa '*' --description | grepQuiet silly rm -rf $HOME/.nix-defexpr ln -s $(pwd)/user-envs.nix $HOME/.nix-defexpr -nix-env -qa '*' --description | grep -q silly +nix-env -qa '*' --description | grepQuiet silly # Query the system. -nix-env -qa '*' --system | grep -q $system +nix-env -qa '*' --system | grepQuiet $system # Install "foo-1.0". nix-env -i foo-1.0 @@ -42,19 +42,19 @@ nix-env -i foo-1.0 # Query installed: should contain foo-1.0 now (which should be # executable). test "$(nix-env -q '*' | wc -l)" -eq 1 -nix-env -q '*' | grep -q foo-1.0 +nix-env -q '*' | grepQuiet foo-1.0 test "$($profiles/test/bin/foo)" = "foo-1.0" # Test nix-env -qc to compare installed against available packages, and vice versa. -nix-env -qc '*' | grep -q '< 2.0' -nix-env -qac '*' | grep -q '> 1.0' +nix-env -qc '*' | grepQuiet '< 2.0' +nix-env -qac '*' | grepQuiet '> 1.0' # Test the -b flag to filter out source-only packages. [ "$(nix-env -qab | wc -l)" -eq 1 ] # Test the -s flag to get package status. -nix-env -qas | grep -q 'IP- foo-1.0' -nix-env -qas | grep -q -- '--- bar-0.1' +nix-env -qas | grepQuiet 'IP- foo-1.0' +nix-env -qas | grepQuiet -- '--- bar-0.1' # Disable foo. nix-env --set-flag active false foo @@ -74,15 +74,15 @@ nix-env -i foo-2.0pre1 # Query installed: should contain foo-2.0pre1 now. test "$(nix-env -q '*' | wc -l)" -eq 1 -nix-env -q '*' | grep -q foo-2.0pre1 +nix-env -q '*' | grepQuiet foo-2.0pre1 test "$($profiles/test/bin/foo)" = "foo-2.0pre1" # Upgrade "foo": should install foo-2.0. -NIX_PATH=nixpkgs=./user-envs.nix:$NIX_PATH nix-env -f '' -u foo +NIX_PATH=nixpkgs=./user-envs.nix:${NIX_PATH-} nix-env -f '' -u foo # Query installed: should contain foo-2.0 now. test "$(nix-env -q '*' | wc -l)" -eq 1 -nix-env -q '*' | grep -q foo-2.0 +nix-env -q '*' | grepQuiet foo-2.0 test "$($profiles/test/bin/foo)" = "foo-2.0" # Store the path of foo-2.0. @@ -94,20 +94,20 @@ nix-env -i bar-0.1 nix-env -e foo # Query installed: should only contain bar-0.1 now. -if nix-env -q '*' | grep -q foo; then false; fi -nix-env -q '*' | grep -q bar +if nix-env -q '*' | grepQuiet foo; then false; fi +nix-env -q '*' | grepQuiet bar # Rollback: should bring "foo" back. oldGen="$(nix-store -q --resolve $profiles/test)" nix-env --rollback [ "$(nix-store -q --resolve $profiles/test)" != "$oldGen" ] -nix-env -q '*' | grep -q foo-2.0 -nix-env -q '*' | grep -q bar +nix-env -q '*' | grepQuiet foo-2.0 +nix-env -q '*' | grepQuiet bar # Rollback again: should remove "bar". nix-env --rollback -nix-env -q '*' | grep -q foo-2.0 -if nix-env -q '*' | grep -q bar; then false; fi +nix-env -q '*' | grepQuiet foo-2.0 +if nix-env -q '*' | grepQuiet bar; then false; fi # Count generations. nix-env --list-generations @@ -129,7 +129,7 @@ nix-env --switch-generation 7 # Install foo-1.0, now using its store path. nix-env -i "$outPath10" -nix-env -q '*' | grep -q foo-1.0 +nix-env -q '*' | grepQuiet foo-1.0 nix-store -qR $profiles/test | grep "$outPath10" nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve $profiles/test)" [ "$(nix-store -q --deriver "$outPath10")" = $drvPath10 ] @@ -137,12 +137,12 @@ nix-store -q --referrers-closure $profiles/test | grep "$(nix-store -q --resolve # Uninstall foo-1.0, using a symlink to its store path. ln -sfn $outPath10/bin/foo $TEST_ROOT/symlink nix-env -e $TEST_ROOT/symlink -if nix-env -q '*' | grep -q foo; then false; fi -(! nix-store -qR $profiles/test | grep "$outPath10") +if nix-env -q '*' | grepQuiet foo; then false; fi +nix-store -qR $profiles/test | grepInverse "$outPath10" # Install foo-1.0, now using a symlink to its store path. nix-env -i $TEST_ROOT/symlink -nix-env -q '*' | grep -q foo +nix-env -q '*' | grepQuiet foo # Delete all old generations. nix-env --delete-generations old @@ -160,7 +160,7 @@ test "$(nix-env -q '*' | wc -l)" -eq 0 # Installing "foo" should only install the newest foo. nix-env -i foo test "$(nix-env -q '*' | grep foo- | wc -l)" -eq 1 -nix-env -q '*' | grep -q foo-2.0 +nix-env -q '*' | grepQuiet foo-2.0 # On the other hand, this should install both (and should fail due to # a collision). @@ -171,8 +171,8 @@ nix-env -e '*' nix-env -e '*' nix-env -i '*' test "$(nix-env -q '*' | wc -l)" -eq 2 -nix-env -q '*' | grep -q foo-2.0 -nix-env -q '*' | grep -q bar-0.1.1 +nix-env -q '*' | grepQuiet foo-2.0 +nix-env -q '*' | grepQuiet bar-0.1.1 # Test priorities: foo-0.1 has a lower priority than foo-1.0, so it # should be possible to install both without a collision. Also test diff --git a/tests/why-depends.sh b/tests/why-depends.sh index a04d529b5..b35a0d1cf 100644 --- a/tests/why-depends.sh +++ b/tests/why-depends.sh @@ -16,9 +16,9 @@ FAST_WHY_DEPENDS_OUTPUT=$(nix why-depends ./toplevel ./dep) PRECISE_WHY_DEPENDS_OUTPUT=$(nix why-depends ./toplevel ./dep --precise) # Both outputs should show that `input-2` is in the dependency chain -echo "$FAST_WHY_DEPENDS_OUTPUT" | grep -q input-2 -echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grep -q input-2 +echo "$FAST_WHY_DEPENDS_OUTPUT" | grepQuiet input-2 +echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grepQuiet input-2 -# But only the “precise” one should refere to `reference-to-input-2` -echo "$FAST_WHY_DEPENDS_OUTPUT" | (! grep -q reference-to-input-2) -echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grep -q reference-to-input-2 +# But only the “precise” one should refer to `reference-to-input-2` +echo "$FAST_WHY_DEPENDS_OUTPUT" | grepQuietInverse reference-to-input-2 +echo "$PRECISE_WHY_DEPENDS_OUTPUT" | grepQuiet reference-to-input-2