mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-22 14:06:16 +02:00
Merge remote-tracking branch 'origin/master' into cross-jobs
This commit is contained in:
commit
c3a929349f
204 changed files with 25777 additions and 2334 deletions
2
.github/STALE-BOT.md
vendored
2
.github/STALE-BOT.md
vendored
|
@ -3,7 +3,7 @@
|
||||||
- Thanks for your contribution!
|
- Thanks for your contribution!
|
||||||
- To remove the stale label, just leave a new comment.
|
- To remove the stale label, just leave a new comment.
|
||||||
- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
|
- _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
|
||||||
- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos).
|
- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org).
|
||||||
|
|
||||||
## Suggestions for PRs
|
## Suggestions for PRs
|
||||||
|
|
||||||
|
|
44
.github/workflows/test.yml
vendored
44
.github/workflows/test.yml
vendored
|
@ -8,52 +8,62 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
|
||||||
CACHIX_NAME: nix-ci
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v12
|
- uses: cachix/install-nix-action@v13
|
||||||
- uses: cachix/cachix-action@v8
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
|
- uses: cachix/cachix-action@v10
|
||||||
with:
|
with:
|
||||||
name: '${{ env.CACHIX_NAME }}'
|
name: '${{ env.CACHIX_NAME }}'
|
||||||
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
||||||
|
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||||
#- run: nix flake check
|
#- run: nix flake check
|
||||||
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
|
- run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
|
||||||
installer:
|
check_cachix:
|
||||||
if: github.event_name == 'push'
|
name: Cachix secret present for installer tests
|
||||||
needs: tests
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
secret: ${{ steps.secret.outputs.secret }}
|
||||||
|
steps:
|
||||||
|
- name: Check for Cachix secret
|
||||||
|
id: secret
|
||||||
env:
|
env:
|
||||||
CACHIX_NAME: nix-ci
|
_CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
|
||||||
|
run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
|
||||||
|
installer:
|
||||||
|
needs: [tests, check_cachix]
|
||||||
|
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
|
installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: cachix/install-nix-action@v12
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
- uses: cachix/cachix-action@v8
|
- uses: cachix/install-nix-action@v13
|
||||||
|
- uses: cachix/cachix-action@v10
|
||||||
with:
|
with:
|
||||||
name: '${{ env.CACHIX_NAME }}'
|
name: '${{ env.CACHIX_NAME }}'
|
||||||
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
|
||||||
|
authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
|
||||||
- id: prepare-installer
|
- id: prepare-installer
|
||||||
run: scripts/prepare-installer-for-github-actions
|
run: scripts/prepare-installer-for-github-actions
|
||||||
installer_test:
|
installer_test:
|
||||||
if: github.event_name == 'push'
|
needs: [installer, check_cachix]
|
||||||
needs: installer
|
if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, macos-latest]
|
os: [ubuntu-latest, macos-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
|
||||||
CACHIX_NAME: nix-ci
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2.3.4
|
- uses: actions/checkout@v2.3.4
|
||||||
- uses: cachix/install-nix-action@master
|
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
|
||||||
|
- uses: cachix/install-nix-action@v13
|
||||||
with:
|
with:
|
||||||
install_url: '${{needs.installer.outputs.installerURL}}'
|
install_url: '${{needs.installer.outputs.installerURL}}'
|
||||||
install_options: '--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve'
|
install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
|
||||||
- run: nix-instantiate -E 'builtins.currentTime' --eval
|
- run: nix-instantiate -E 'builtins.currentTime' --eval
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -82,6 +82,7 @@ perl/Makefile.config
|
||||||
/tests/shell
|
/tests/shell
|
||||||
/tests/shell.drv
|
/tests/shell.drv
|
||||||
/tests/config.nix
|
/tests/config.nix
|
||||||
|
/tests/ca/config.nix
|
||||||
|
|
||||||
# /tests/lang/
|
# /tests/lang/
|
||||||
/tests/lang/*.out
|
/tests/lang/*.out
|
||||||
|
|
1
Makefile
1
Makefile
|
@ -12,6 +12,7 @@ makefiles = \
|
||||||
src/resolve-system-dependencies/local.mk \
|
src/resolve-system-dependencies/local.mk \
|
||||||
scripts/local.mk \
|
scripts/local.mk \
|
||||||
misc/bash/local.mk \
|
misc/bash/local.mk \
|
||||||
|
misc/zsh/local.mk \
|
||||||
misc/systemd/local.mk \
|
misc/systemd/local.mk \
|
||||||
misc/launchd/local.mk \
|
misc/launchd/local.mk \
|
||||||
misc/upstart/local.mk \
|
misc/upstart/local.mk \
|
||||||
|
|
|
@ -15,7 +15,6 @@ LDFLAGS = @LDFLAGS@
|
||||||
LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
|
LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
|
||||||
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
|
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
|
||||||
LIBCURL_LIBS = @LIBCURL_LIBS@
|
LIBCURL_LIBS = @LIBCURL_LIBS@
|
||||||
LIBLZMA_LIBS = @LIBLZMA_LIBS@
|
|
||||||
OPENSSL_LIBS = @OPENSSL_LIBS@
|
OPENSSL_LIBS = @OPENSSL_LIBS@
|
||||||
LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@
|
LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@
|
||||||
PACKAGE_NAME = @PACKAGE_NAME@
|
PACKAGE_NAME = @PACKAGE_NAME@
|
||||||
|
|
|
@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment.
|
||||||
- [Nix manual](https://nixos.org/nix/manual)
|
- [Nix manual](https://nixos.org/nix/manual)
|
||||||
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
|
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
|
||||||
- [NixOS Discourse](https://discourse.nixos.org/)
|
- [NixOS Discourse](https://discourse.nixos.org/)
|
||||||
- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
|
- [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org)
|
||||||
|
- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|
20
config/config.guess
vendored
20
config/config.guess
vendored
|
@ -1,8 +1,8 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Attempt to guess a canonical system name.
|
# Attempt to guess a canonical system name.
|
||||||
# Copyright 1992-2020 Free Software Foundation, Inc.
|
# Copyright 1992-2021 Free Software Foundation, Inc.
|
||||||
|
|
||||||
timestamp='2020-11-19'
|
timestamp='2021-01-25'
|
||||||
|
|
||||||
# This file is free software; you can redistribute it and/or modify it
|
# This file is free software; you can redistribute it and/or modify it
|
||||||
# under the terms of the GNU General Public License as published by
|
# under the terms of the GNU General Public License as published by
|
||||||
|
@ -50,7 +50,7 @@ version="\
|
||||||
GNU config.guess ($timestamp)
|
GNU config.guess ($timestamp)
|
||||||
|
|
||||||
Originally written by Per Bothner.
|
Originally written by Per Bothner.
|
||||||
Copyright 1992-2020 Free Software Foundation, Inc.
|
Copyright 1992-2021 Free Software Foundation, Inc.
|
||||||
|
|
||||||
This is free software; see the source for copying conditions. There is NO
|
This is free software; see the source for copying conditions. There is NO
|
||||||
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
||||||
|
@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
|
||||||
#
|
#
|
||||||
# Note: NetBSD doesn't particularly care about the vendor
|
# Note: NetBSD doesn't particularly care about the vendor
|
||||||
# portion of the name. We always set it to "unknown".
|
# portion of the name. We always set it to "unknown".
|
||||||
sysctl="sysctl -n hw.machine_arch"
|
|
||||||
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
|
UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
|
||||||
"/sbin/$sysctl" 2>/dev/null || \
|
/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
|
||||||
"/usr/sbin/$sysctl" 2>/dev/null || \
|
/usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
|
||||||
echo unknown))
|
echo unknown))
|
||||||
case "$UNAME_MACHINE_ARCH" in
|
case "$UNAME_MACHINE_ARCH" in
|
||||||
aarch64eb) machine=aarch64_be-unknown ;;
|
aarch64eb) machine=aarch64_be-unknown ;;
|
||||||
|
@ -996,6 +995,9 @@ EOF
|
||||||
k1om:Linux:*:*)
|
k1om:Linux:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
|
loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
|
||||||
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
|
exit ;;
|
||||||
m32r*:Linux:*:*)
|
m32r*:Linux:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
|
@ -1084,7 +1086,7 @@ EOF
|
||||||
ppcle:Linux:*:*)
|
ppcle:Linux:*:*)
|
||||||
echo powerpcle-unknown-linux-"$LIBC"
|
echo powerpcle-unknown-linux-"$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
riscv32:Linux:*:* | riscv64:Linux:*:*)
|
riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
|
||||||
exit ;;
|
exit ;;
|
||||||
s390:Linux:*:* | s390x:Linux:*:*)
|
s390:Linux:*:* | s390x:Linux:*:*)
|
||||||
|
@ -1480,8 +1482,8 @@ EOF
|
||||||
i*86:rdos:*:*)
|
i*86:rdos:*:*)
|
||||||
echo "$UNAME_MACHINE"-pc-rdos
|
echo "$UNAME_MACHINE"-pc-rdos
|
||||||
exit ;;
|
exit ;;
|
||||||
i*86:AROS:*:*)
|
*:AROS:*:*)
|
||||||
echo "$UNAME_MACHINE"-pc-aros
|
echo "$UNAME_MACHINE"-unknown-aros
|
||||||
exit ;;
|
exit ;;
|
||||||
x86_64:VMkernel:*:*)
|
x86_64:VMkernel:*:*)
|
||||||
echo "$UNAME_MACHINE"-unknown-esx
|
echo "$UNAME_MACHINE"-unknown-esx
|
||||||
|
|
20
config/config.sub
vendored
20
config/config.sub
vendored
|
@ -1,8 +1,8 @@
|
||||||
#! /bin/sh
|
#! /bin/sh
|
||||||
# Configuration validation subroutine script.
|
# Configuration validation subroutine script.
|
||||||
# Copyright 1992-2020 Free Software Foundation, Inc.
|
# Copyright 1992-2021 Free Software Foundation, Inc.
|
||||||
|
|
||||||
timestamp='2020-12-02'
|
timestamp='2021-01-08'
|
||||||
|
|
||||||
# This file is free software; you can redistribute it and/or modify it
|
# This file is free software; you can redistribute it and/or modify it
|
||||||
# under the terms of the GNU General Public License as published by
|
# under the terms of the GNU General Public License as published by
|
||||||
|
@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
|
||||||
version="\
|
version="\
|
||||||
GNU config.sub ($timestamp)
|
GNU config.sub ($timestamp)
|
||||||
|
|
||||||
Copyright 1992-2020 Free Software Foundation, Inc.
|
Copyright 1992-2021 Free Software Foundation, Inc.
|
||||||
|
|
||||||
This is free software; see the source for copying conditions. There is NO
|
This is free software; see the source for copying conditions. There is NO
|
||||||
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
|
||||||
|
@ -1185,6 +1185,7 @@ case $cpu-$vendor in
|
||||||
| k1om \
|
| k1om \
|
||||||
| le32 | le64 \
|
| le32 | le64 \
|
||||||
| lm32 \
|
| lm32 \
|
||||||
|
| loongarch32 | loongarch64 | loongarchx32 \
|
||||||
| m32c | m32r | m32rle \
|
| m32c | m32r | m32rle \
|
||||||
| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
|
| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
|
||||||
| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
|
| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
|
||||||
|
@ -1229,7 +1230,7 @@ case $cpu-$vendor in
|
||||||
| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
|
| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
|
||||||
| pru \
|
| pru \
|
||||||
| pyramid \
|
| pyramid \
|
||||||
| riscv | riscv32 | riscv64 \
|
| riscv | riscv32 | riscv32be | riscv64 | riscv64be \
|
||||||
| rl78 | romp | rs6000 | rx \
|
| rl78 | romp | rs6000 | rx \
|
||||||
| s390 | s390x \
|
| s390 | s390x \
|
||||||
| score \
|
| score \
|
||||||
|
@ -1682,11 +1683,14 @@ fi
|
||||||
|
|
||||||
# Now, validate our (potentially fixed-up) OS.
|
# Now, validate our (potentially fixed-up) OS.
|
||||||
case $os in
|
case $os in
|
||||||
# Sometimes we do "kernel-abi", so those need to count as OSes.
|
# Sometimes we do "kernel-libc", so those need to count as OSes.
|
||||||
musl* | newlib* | uclibc*)
|
musl* | newlib* | uclibc*)
|
||||||
;;
|
;;
|
||||||
# Likewise for "kernel-libc"
|
# Likewise for "kernel-abi"
|
||||||
eabi | eabihf | gnueabi | gnueabihf)
|
eabi* | gnueabi*)
|
||||||
|
;;
|
||||||
|
# VxWorks passes extra cpu info in the 4th filed.
|
||||||
|
simlinux | simwindows | spe)
|
||||||
;;
|
;;
|
||||||
# Now accept the basic system types.
|
# Now accept the basic system types.
|
||||||
# The portable systems comes first.
|
# The portable systems comes first.
|
||||||
|
@ -1750,6 +1754,8 @@ case $kernel-$os in
|
||||||
;;
|
;;
|
||||||
kfreebsd*-gnu* | kopensolaris*-gnu*)
|
kfreebsd*-gnu* | kopensolaris*-gnu*)
|
||||||
;;
|
;;
|
||||||
|
vxworks-simlinux | vxworks-simwindows | vxworks-spe)
|
||||||
|
;;
|
||||||
nto-qnx*)
|
nto-qnx*)
|
||||||
;;
|
;;
|
||||||
os2-emx)
|
os2-emx)
|
||||||
|
|
61
configure.ac
61
configure.ac
|
@ -1,4 +1,4 @@
|
||||||
AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"]))
|
AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")])
|
||||||
AC_CONFIG_MACRO_DIRS([m4])
|
AC_CONFIG_MACRO_DIRS([m4])
|
||||||
AC_CONFIG_SRCDIR(README.md)
|
AC_CONFIG_SRCDIR(README.md)
|
||||||
AC_CONFIG_AUX_DIR(config)
|
AC_CONFIG_AUX_DIR(config)
|
||||||
|
@ -9,8 +9,7 @@ AC_PROG_SED
|
||||||
AC_CANONICAL_HOST
|
AC_CANONICAL_HOST
|
||||||
AC_MSG_CHECKING([for the canonical Nix system name])
|
AC_MSG_CHECKING([for the canonical Nix system name])
|
||||||
|
|
||||||
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
|
AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]),
|
||||||
[Platform identifier (e.g., `i686-linux').]),
|
|
||||||
[system=$withval],
|
[system=$withval],
|
||||||
[case "$host_cpu" in
|
[case "$host_cpu" in
|
||||||
i*86)
|
i*86)
|
||||||
|
@ -66,7 +65,7 @@ AC_SYS_LARGEFILE
|
||||||
AC_STRUCT_DIRENT_D_TYPE
|
AC_STRUCT_DIRENT_D_TYPE
|
||||||
if test "$sys_name" = sunos; then
|
if test "$sys_name" = sunos; then
|
||||||
# Solaris requires -lsocket -lnsl for network functions
|
# Solaris requires -lsocket -lnsl for network functions
|
||||||
LIBS="-lsocket -lnsl $LIBS"
|
LDFLAGS="-lsocket -lnsl $LDFLAGS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
@ -127,8 +126,7 @@ NEED_PROG(jq, jq)
|
||||||
AC_SUBST(coreutils, [$(dirname $(type -p cat))])
|
AC_SUBST(coreutils, [$(dirname $(type -p cat))])
|
||||||
|
|
||||||
|
|
||||||
AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
|
AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]),
|
||||||
[path of the Nix store (defaults to /nix/store)]),
|
|
||||||
storedir=$withval, storedir='/nix/store')
|
storedir=$withval, storedir='/nix/store')
|
||||||
AC_SUBST(storedir)
|
AC_SUBST(storedir)
|
||||||
|
|
||||||
|
@ -152,13 +150,12 @@ int main() {
|
||||||
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
|
}]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
|
||||||
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
|
AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
|
||||||
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
|
if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
|
||||||
LDFLAGS="$LDFLAGS -latomic"
|
LDFLAGS="-latomic $LDFLAGS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PKG_PROG_PKG_CONFIG
|
PKG_PROG_PKG_CONFIG
|
||||||
|
|
||||||
AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
|
AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]),
|
||||||
[Build shared libraries for Nix [default=yes]]),
|
|
||||||
shared=$enableval, shared=yes)
|
shared=$enableval, shared=yes)
|
||||||
if test "$shared" = yes; then
|
if test "$shared" = yes; then
|
||||||
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
|
AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
|
||||||
|
@ -172,11 +169,6 @@ fi
|
||||||
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
|
|
||||||
# Look for libbz2, a required dependency.
|
|
||||||
AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true],
|
|
||||||
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://sourceware.org/bzip2/.])])
|
|
||||||
AC_CHECK_HEADERS([bzlib.h], [true],
|
|
||||||
[AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2. See https://sourceware.org/bzip2/.])])
|
|
||||||
# Checks for libarchive
|
# Checks for libarchive
|
||||||
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
|
||||||
# Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
|
# Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
|
||||||
|
@ -205,16 +197,6 @@ PKG_CHECK_MODULES([EDITLINE], [libeditline], [CXXFLAGS="$EDITLINE_CFLAGS $CXXFLA
|
||||||
# Look for libsodium, an optional dependency.
|
# Look for libsodium, an optional dependency.
|
||||||
PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
# Look for liblzma, a required dependency.
|
|
||||||
PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"])
|
|
||||||
AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
|
|
||||||
[AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])])
|
|
||||||
|
|
||||||
# Look for zlib, a required dependency.
|
|
||||||
PKG_CHECK_MODULES([ZLIB], [zlib], [CXXFLAGS="$ZLIB_CFLAGS $CXXFLAGS"])
|
|
||||||
AC_CHECK_HEADER([zlib.h],[:],[AC_MSG_ERROR([could not find the zlib.h header])])
|
|
||||||
LDFLAGS="-lz $LDFLAGS"
|
|
||||||
|
|
||||||
# Look for libbrotli{enc,dec}.
|
# Look for libbrotli{enc,dec}.
|
||||||
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
|
PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
|
||||||
|
|
||||||
|
@ -230,9 +212,8 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
|
||||||
# Look for libseccomp, required for Linux sandboxing.
|
# Look for libseccomp, required for Linux sandboxing.
|
||||||
if test "$sys_name" = linux; then
|
if test "$sys_name" = linux; then
|
||||||
AC_ARG_ENABLE([seccomp-sandboxing],
|
AC_ARG_ENABLE([seccomp-sandboxing],
|
||||||
AC_HELP_STRING([--disable-seccomp-sandboxing],
|
AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)
|
||||||
[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)]
|
]))
|
||||||
))
|
|
||||||
if test "x$enable_seccomp_sandboxing" != "xno"; then
|
if test "x$enable_seccomp_sandboxing" != "xno"; then
|
||||||
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
|
PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
|
||||||
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
|
[CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
|
||||||
|
@ -250,8 +231,8 @@ AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
|
||||||
# Look for aws-cpp-sdk-s3.
|
# Look for aws-cpp-sdk-s3.
|
||||||
AC_LANG_PUSH(C++)
|
AC_LANG_PUSH(C++)
|
||||||
AC_CHECK_HEADERS([aws/s3/S3Client.h],
|
AC_CHECK_HEADERS([aws/s3/S3Client.h],
|
||||||
[AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.])
|
[AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
|
||||||
enable_s3=1], [enable_s3=])
|
[AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
|
||||||
AC_SUBST(ENABLE_S3, [$enable_s3])
|
AC_SUBST(ENABLE_S3, [$enable_s3])
|
||||||
AC_LANG_POP(C++)
|
AC_LANG_POP(C++)
|
||||||
|
|
||||||
|
@ -264,8 +245,7 @@ fi
|
||||||
|
|
||||||
|
|
||||||
# Whether to use the Boehm garbage collector.
|
# Whether to use the Boehm garbage collector.
|
||||||
AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
|
AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
|
||||||
[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
|
|
||||||
gc=$enableval, gc=yes)
|
gc=$enableval, gc=yes)
|
||||||
if test "$gc" = yes; then
|
if test "$gc" = yes; then
|
||||||
PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
|
PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
|
||||||
|
@ -279,8 +259,7 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
|
||||||
|
|
||||||
|
|
||||||
# documentation generation switch
|
# documentation generation switch
|
||||||
AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
|
AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
|
||||||
[disable documentation generation]),
|
|
||||||
doc_generate=$enableval, doc_generate=yes)
|
doc_generate=$enableval, doc_generate=yes)
|
||||||
AC_SUBST(doc_generate)
|
AC_SUBST(doc_generate)
|
||||||
|
|
||||||
|
@ -300,19 +279,7 @@ if test "$(uname)" = "Darwin"; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
# Do we have GNU tar?
|
AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]),
|
||||||
AC_MSG_CHECKING([if you have a recent GNU tar])
|
|
||||||
if $tar --version 2> /dev/null | grep -q GNU && tar cvf /dev/null --warning=no-timestamp ./config.log > /dev/null; then
|
|
||||||
AC_MSG_RESULT(yes)
|
|
||||||
tarFlags="--warning=no-timestamp"
|
|
||||||
else
|
|
||||||
AC_MSG_RESULT(no)
|
|
||||||
fi
|
|
||||||
AC_SUBST(tarFlags)
|
|
||||||
|
|
||||||
|
|
||||||
AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
|
|
||||||
[path of a statically-linked shell to use as /bin/sh in sandboxes]),
|
|
||||||
sandbox_shell=$withval)
|
sandbox_shell=$withval)
|
||||||
AC_SUBST(sandbox_shell)
|
AC_SUBST(sandbox_shell)
|
||||||
|
|
||||||
|
@ -327,6 +294,6 @@ done
|
||||||
|
|
||||||
rm -f Makefile.config
|
rm -f Makefile.config
|
||||||
|
|
||||||
AC_CONFIG_HEADER([config.h])
|
AC_CONFIG_HEADERS([config.h])
|
||||||
AC_CONFIG_FILES([])
|
AC_CONFIG_FILES([])
|
||||||
AC_OUTPUT
|
AC_OUTPUT
|
||||||
|
|
|
@ -25,19 +25,19 @@ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -
|
||||||
$(d)/%.1: $(d)/src/command-ref/%.md
|
$(d)/%.1: $(d)/src/command-ref/%.md
|
||||||
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
|
@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
|
||||||
@cat $^ >> $^.tmp
|
@cat $^ >> $^.tmp
|
||||||
$(trace-gen) lowdown -sT man $^.tmp -o $@
|
$(trace-gen) lowdown -sT man -M section=1 $^.tmp -o $@
|
||||||
@rm $^.tmp
|
@rm $^.tmp
|
||||||
|
|
||||||
$(d)/%.8: $(d)/src/command-ref/%.md
|
$(d)/%.8: $(d)/src/command-ref/%.md
|
||||||
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
|
@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
|
||||||
@cat $^ >> $^.tmp
|
@cat $^ >> $^.tmp
|
||||||
$(trace-gen) lowdown -sT man $^.tmp -o $@
|
$(trace-gen) lowdown -sT man -M section=8 $^.tmp -o $@
|
||||||
@rm $^.tmp
|
@rm $^.tmp
|
||||||
|
|
||||||
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
|
$(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
|
||||||
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
|
@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
|
||||||
@cat $^ >> $^.tmp
|
@cat $^ >> $^.tmp
|
||||||
$(trace-gen) lowdown -sT man $^.tmp -o $@
|
$(trace-gen) lowdown -sT man -M section=5 $^.tmp -o $@
|
||||||
@rm $^.tmp
|
@rm $^.tmp
|
||||||
|
|
||||||
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
|
$(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
|
||||||
|
@ -80,7 +80,7 @@ install: $(d)/src/command-ref/new-cli
|
||||||
if [[ $$name = SUMMARY ]]; then continue; fi; \
|
if [[ $$name = SUMMARY ]]; then continue; fi; \
|
||||||
printf "Title: %s\n\n" "$$name" > $$i.tmp; \
|
printf "Title: %s\n\n" "$$name" > $$i.tmp; \
|
||||||
cat $$i >> $$i.tmp; \
|
cat $$i >> $$i.tmp; \
|
||||||
lowdown -sT man $$i.tmp -o $(mandir)/man1/$$name.1; \
|
lowdown -sT man -M section=1 $$i.tmp -o $(mandir)/man1/$$name.1; \
|
||||||
done
|
done
|
||||||
|
|
||||||
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
|
$(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
|
||||||
|
|
|
@ -4,13 +4,13 @@ Nix has two relevant settings with regards to how your CPU cores will
|
||||||
be utilized: `cores` and `max-jobs`. This chapter will talk about what
|
be utilized: `cores` and `max-jobs`. This chapter will talk about what
|
||||||
they are, how they interact, and their configuration trade-offs.
|
they are, how they interact, and their configuration trade-offs.
|
||||||
|
|
||||||
- `max-jobs`
|
- `max-jobs`\
|
||||||
Dictates how many separate derivations will be built at the same
|
Dictates how many separate derivations will be built at the same
|
||||||
time. If you set this to zero, the local machine will do no
|
time. If you set this to zero, the local machine will do no
|
||||||
builds. Nix will still substitute from binary caches, and build
|
builds. Nix will still substitute from binary caches, and build
|
||||||
remotely if remote builders are configured.
|
remotely if remote builders are configured.
|
||||||
|
|
||||||
- `cores`
|
- `cores`\
|
||||||
Suggests how many cores each derivation should use. Similar to
|
Suggests how many cores each derivation should use. Similar to
|
||||||
`make -j`.
|
`make -j`.
|
||||||
|
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
Most Nix commands interpret the following environment variables:
|
Most Nix commands interpret the following environment variables:
|
||||||
|
|
||||||
- `IN_NIX_SHELL`
|
- `IN_NIX_SHELL`\
|
||||||
Indicator that tells if the current environment was set up by
|
Indicator that tells if the current environment was set up by
|
||||||
`nix-shell`. Since Nix 2.0 the values are `"pure"` and `"impure"`
|
`nix-shell`. Since Nix 2.0 the values are `"pure"` and `"impure"`
|
||||||
|
|
||||||
- `NIX_PATH`
|
- `NIX_PATH`\
|
||||||
A colon-separated list of directories used to look up Nix
|
A colon-separated list of directories used to look up Nix
|
||||||
expressions enclosed in angle brackets (i.e., `<path>`). For
|
expressions enclosed in angle brackets (i.e., `<path>`). For
|
||||||
instance, the value
|
instance, the value
|
||||||
|
@ -28,19 +28,23 @@ Most Nix commands interpret the following environment variables:
|
||||||
consist of a single top-level directory. For example, setting
|
consist of a single top-level directory. For example, setting
|
||||||
`NIX_PATH` to
|
`NIX_PATH` to
|
||||||
|
|
||||||
nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz
|
nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
|
||||||
|
|
||||||
tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09
|
tells Nix to download and use the current contents of the
|
||||||
channel.
|
`master` branch in the `nixpkgs` repository.
|
||||||
|
|
||||||
A following shorthand can be used to refer to the official channels:
|
The URLs of the tarballs from the official nixos.org channels (see
|
||||||
|
[the manual for `nix-channel`](nix-channel.md)) can be abbreviated
|
||||||
|
as `channel:<channel-name>`. For instance, the following two
|
||||||
|
values of `NIX_PATH` are equivalent:
|
||||||
|
|
||||||
nixpkgs=channel:nixos-15.09
|
nixpkgs=channel:nixos-21.05
|
||||||
|
nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
|
||||||
|
|
||||||
The search path can be extended using the `-I` option, which takes
|
The Nix search path can also be extended using the `-I` option to
|
||||||
precedence over `NIX_PATH`.
|
many Nix commands, which takes precedence over `NIX_PATH`.
|
||||||
|
|
||||||
- `NIX_IGNORE_SYMLINK_STORE`
|
- `NIX_IGNORE_SYMLINK_STORE`\
|
||||||
Normally, the Nix store directory (typically `/nix/store`) is not
|
Normally, the Nix store directory (typically `/nix/store`) is not
|
||||||
allowed to contain any symlink components. This is to prevent
|
allowed to contain any symlink components. This is to prevent
|
||||||
“impure” builds. Builders sometimes “canonicalise” paths by
|
“impure” builds. Builders sometimes “canonicalise” paths by
|
||||||
|
@ -62,41 +66,41 @@ Most Nix commands interpret the following environment variables:
|
||||||
|
|
||||||
Consult the mount 8 manual page for details.
|
Consult the mount 8 manual page for details.
|
||||||
|
|
||||||
- `NIX_STORE_DIR`
|
- `NIX_STORE_DIR`\
|
||||||
Overrides the location of the Nix store (default `prefix/store`).
|
Overrides the location of the Nix store (default `prefix/store`).
|
||||||
|
|
||||||
- `NIX_DATA_DIR`
|
- `NIX_DATA_DIR`\
|
||||||
Overrides the location of the Nix static data directory (default
|
Overrides the location of the Nix static data directory (default
|
||||||
`prefix/share`).
|
`prefix/share`).
|
||||||
|
|
||||||
- `NIX_LOG_DIR`
|
- `NIX_LOG_DIR`\
|
||||||
Overrides the location of the Nix log directory (default
|
Overrides the location of the Nix log directory (default
|
||||||
`prefix/var/log/nix`).
|
`prefix/var/log/nix`).
|
||||||
|
|
||||||
- `NIX_STATE_DIR`
|
- `NIX_STATE_DIR`\
|
||||||
Overrides the location of the Nix state directory (default
|
Overrides the location of the Nix state directory (default
|
||||||
`prefix/var/nix`).
|
`prefix/var/nix`).
|
||||||
|
|
||||||
- `NIX_CONF_DIR`
|
- `NIX_CONF_DIR`\
|
||||||
Overrides the location of the system Nix configuration directory
|
Overrides the location of the system Nix configuration directory
|
||||||
(default `prefix/etc/nix`).
|
(default `prefix/etc/nix`).
|
||||||
|
|
||||||
- `NIX_CONFIG`
|
- `NIX_CONFIG`\
|
||||||
Applies settings from Nix configuration from the environment.
|
Applies settings from Nix configuration from the environment.
|
||||||
The content is treated as if it was read from a Nix configuration file.
|
The content is treated as if it was read from a Nix configuration file.
|
||||||
Settings are separated by the newline character.
|
Settings are separated by the newline character.
|
||||||
|
|
||||||
- `NIX_USER_CONF_FILES`
|
- `NIX_USER_CONF_FILES`\
|
||||||
Overrides the location of the user Nix configuration files to load
|
Overrides the location of the user Nix configuration files to load
|
||||||
from (defaults to the XDG spec locations). The variable is treated
|
from (defaults to the XDG spec locations). The variable is treated
|
||||||
as a list separated by the `:` token.
|
as a list separated by the `:` token.
|
||||||
|
|
||||||
- `TMPDIR`
|
- `TMPDIR`\
|
||||||
Use the specified directory to store temporary files. In particular,
|
Use the specified directory to store temporary files. In particular,
|
||||||
this includes temporary build directories; these can take up
|
this includes temporary build directories; these can take up
|
||||||
substantial amounts of disk space. The default is `/tmp`.
|
substantial amounts of disk space. The default is `/tmp`.
|
||||||
|
|
||||||
- `NIX_REMOTE`
|
- `NIX_REMOTE`\
|
||||||
This variable should be set to `daemon` if you want to use the Nix
|
This variable should be set to `daemon` if you want to use the Nix
|
||||||
daemon to execute Nix operations. This is necessary in [multi-user
|
daemon to execute Nix operations. This is necessary in [multi-user
|
||||||
Nix installations](../installation/multi-user.md). If the Nix
|
Nix installations](../installation/multi-user.md). If the Nix
|
||||||
|
@ -104,16 +108,16 @@ Most Nix commands interpret the following environment variables:
|
||||||
should be set to `unix://path/to/socket`. Otherwise, it should be
|
should be set to `unix://path/to/socket`. Otherwise, it should be
|
||||||
left unset.
|
left unset.
|
||||||
|
|
||||||
- `NIX_SHOW_STATS`
|
- `NIX_SHOW_STATS`\
|
||||||
If set to `1`, Nix will print some evaluation statistics, such as
|
If set to `1`, Nix will print some evaluation statistics, such as
|
||||||
the number of values allocated.
|
the number of values allocated.
|
||||||
|
|
||||||
- `NIX_COUNT_CALLS`
|
- `NIX_COUNT_CALLS`\
|
||||||
If set to `1`, Nix will print how often functions were called during
|
If set to `1`, Nix will print how often functions were called during
|
||||||
Nix expression evaluation. This is useful for profiling your Nix
|
Nix expression evaluation. This is useful for profiling your Nix
|
||||||
expressions.
|
expressions.
|
||||||
|
|
||||||
- `GC_INITIAL_HEAP_SIZE`
|
- `GC_INITIAL_HEAP_SIZE`\
|
||||||
If Nix has been configured to use the Boehm garbage collector, this
|
If Nix has been configured to use the Boehm garbage collector, this
|
||||||
variable sets the initial size of the heap in bytes. It defaults to
|
variable sets the initial size of the heap in bytes. It defaults to
|
||||||
384 MiB. Setting it to a low value reduces memory consumption, but
|
384 MiB. Setting it to a low value reduces memory consumption, but
|
||||||
|
|
|
@ -47,16 +47,16 @@ All options not listed here are passed to `nix-store
|
||||||
--realise`, except for `--arg` and `--attr` / `-A` which are passed to
|
--realise`, except for `--arg` and `--attr` / `-A` which are passed to
|
||||||
`nix-instantiate`.
|
`nix-instantiate`.
|
||||||
|
|
||||||
- `--no-out-link`
|
- `--no-out-link`\
|
||||||
Do not create a symlink to the output path. Note that as a result
|
Do not create a symlink to the output path. Note that as a result
|
||||||
the output does not become a root of the garbage collector, and so
|
the output does not become a root of the garbage collector, and so
|
||||||
might be deleted by `nix-store
|
might be deleted by `nix-store
|
||||||
--gc`.
|
--gc`.
|
||||||
|
|
||||||
- `--dry-run`
|
- `--dry-run`\
|
||||||
Show what store paths would be built or downloaded.
|
Show what store paths would be built or downloaded.
|
||||||
|
|
||||||
- `--out-link` / `-o` *outlink*
|
- `--out-link` / `-o` *outlink*\
|
||||||
Change the name of the symlink to the output path created from
|
Change the name of the symlink to the output path created from
|
||||||
`result` to *outlink*.
|
`result` to *outlink*.
|
||||||
|
|
||||||
|
|
|
@ -17,26 +17,26 @@ To see the list of official NixOS channels, visit
|
||||||
|
|
||||||
This command has the following operations:
|
This command has the following operations:
|
||||||
|
|
||||||
- `--add` *url* \[*name*\]
|
- `--add` *url* \[*name*\]\
|
||||||
Adds a channel named *name* with URL *url* to the list of subscribed
|
Adds a channel named *name* with URL *url* to the list of subscribed
|
||||||
channels. If *name* is omitted, it defaults to the last component of
|
channels. If *name* is omitted, it defaults to the last component of
|
||||||
*url*, with the suffixes `-stable` or `-unstable` removed.
|
*url*, with the suffixes `-stable` or `-unstable` removed.
|
||||||
|
|
||||||
- `--remove` *name*
|
- `--remove` *name*\
|
||||||
Removes the channel named *name* from the list of subscribed
|
Removes the channel named *name* from the list of subscribed
|
||||||
channels.
|
channels.
|
||||||
|
|
||||||
- `--list`
|
- `--list`\
|
||||||
Prints the names and URLs of all subscribed channels on standard
|
Prints the names and URLs of all subscribed channels on standard
|
||||||
output.
|
output.
|
||||||
|
|
||||||
- `--update` \[*names*…\]
|
- `--update` \[*names*…\]\
|
||||||
Downloads the Nix expressions of all subscribed channels (or only
|
Downloads the Nix expressions of all subscribed channels (or only
|
||||||
those included in *names* if specified) and makes them the default
|
those included in *names* if specified) and makes them the default
|
||||||
for `nix-env` operations (by symlinking them from the directory
|
for `nix-env` operations (by symlinking them from the directory
|
||||||
`~/.nix-defexpr`).
|
`~/.nix-defexpr`).
|
||||||
|
|
||||||
- `--rollback` \[*generation*\]
|
- `--rollback` \[*generation*\]\
|
||||||
Reverts the previous call to `nix-channel
|
Reverts the previous call to `nix-channel
|
||||||
--update`. Optionally, you can specify a specific channel generation
|
--update`. Optionally, you can specify a specific channel generation
|
||||||
number to restore.
|
number to restore.
|
||||||
|
@ -70,14 +70,14 @@ $ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
|
||||||
|
|
||||||
# Files
|
# Files
|
||||||
|
|
||||||
- `/nix/var/nix/profiles/per-user/username/channels`
|
- `/nix/var/nix/profiles/per-user/username/channels`\
|
||||||
`nix-channel` uses a `nix-env` profile to keep track of previous
|
`nix-channel` uses a `nix-env` profile to keep track of previous
|
||||||
versions of the subscribed channels. Every time you run `nix-channel
|
versions of the subscribed channels. Every time you run `nix-channel
|
||||||
--update`, a new channel generation (that is, a symlink to the
|
--update`, a new channel generation (that is, a symlink to the
|
||||||
channel Nix expressions in the Nix store) is created. This enables
|
channel Nix expressions in the Nix store) is created. This enables
|
||||||
`nix-channel --rollback` to revert to previous versions.
|
`nix-channel --rollback` to revert to previous versions.
|
||||||
|
|
||||||
- `~/.nix-defexpr/channels`
|
- `~/.nix-defexpr/channels`\
|
||||||
This is a symlink to
|
This is a symlink to
|
||||||
`/nix/var/nix/profiles/per-user/username/channels`. It ensures that
|
`/nix/var/nix/profiles/per-user/username/channels`. It ensures that
|
||||||
`nix-env` can find your channels. In a multi-user installation, you
|
`nix-env` can find your channels. In a multi-user installation, you
|
||||||
|
@ -89,7 +89,7 @@ $ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
|
||||||
A channel URL should point to a directory containing the following
|
A channel URL should point to a directory containing the following
|
||||||
files:
|
files:
|
||||||
|
|
||||||
- `nixexprs.tar.xz`
|
- `nixexprs.tar.xz`\
|
||||||
A tarball containing Nix expressions and files referenced by them
|
A tarball containing Nix expressions and files referenced by them
|
||||||
(such as build scripts and patches). At the top level, the tarball
|
(such as build scripts and patches). At the top level, the tarball
|
||||||
should contain a single directory. That directory must contain a
|
should contain a single directory. That directory must contain a
|
||||||
|
|
|
@ -35,21 +35,21 @@ and second to send the dump of those paths. If this bothers you, use
|
||||||
|
|
||||||
# Options
|
# Options
|
||||||
|
|
||||||
- `--to`
|
- `--to`\
|
||||||
Copy the closure of _paths_ from the local Nix store to the Nix
|
Copy the closure of _paths_ from the local Nix store to the Nix
|
||||||
store on _machine_. This is the default.
|
store on _machine_. This is the default.
|
||||||
|
|
||||||
- `--from`
|
- `--from`\
|
||||||
Copy the closure of _paths_ from the Nix store on _machine_ to the
|
Copy the closure of _paths_ from the Nix store on _machine_ to the
|
||||||
local Nix store.
|
local Nix store.
|
||||||
|
|
||||||
- `--gzip`
|
- `--gzip`\
|
||||||
Enable compression of the SSH connection.
|
Enable compression of the SSH connection.
|
||||||
|
|
||||||
- `--include-outputs`
|
- `--include-outputs`\
|
||||||
Also copy the outputs of store derivations included in the closure.
|
Also copy the outputs of store derivations included in the closure.
|
||||||
|
|
||||||
- `--use-substitutes` / `-s`
|
- `--use-substitutes` / `-s`\
|
||||||
Attempt to download missing paths on the target machine using Nix’s
|
Attempt to download missing paths on the target machine using Nix’s
|
||||||
substitute mechanism. Any paths that cannot be substituted on the
|
substitute mechanism. Any paths that cannot be substituted on the
|
||||||
target are still copied normally from the source. This is useful,
|
target are still copied normally from the source. This is useful,
|
||||||
|
@ -58,12 +58,12 @@ and second to send the dump of those paths. If this bothers you, use
|
||||||
`nixos.org` (the default binary cache server) is
|
`nixos.org` (the default binary cache server) is
|
||||||
fast.
|
fast.
|
||||||
|
|
||||||
- `-v`
|
- `-v`\
|
||||||
Show verbose output.
|
Show verbose output.
|
||||||
|
|
||||||
# Environment variables
|
# Environment variables
|
||||||
|
|
||||||
- `NIX_SSHOPTS`
|
- `NIX_SSHOPTS`\
|
||||||
Additional options to be passed to `ssh` on the command
|
Additional options to be passed to `ssh` on the command
|
||||||
line.
|
line.
|
||||||
|
|
||||||
|
|
|
@ -36,27 +36,27 @@ case-sensitive. The regular expression can optionally be followed by a
|
||||||
dash and a version number; if omitted, any version of the package will
|
dash and a version number; if omitted, any version of the package will
|
||||||
match. Here are some examples:
|
match. Here are some examples:
|
||||||
|
|
||||||
- `firefox`
|
- `firefox`\
|
||||||
Matches the package name `firefox` and any version.
|
Matches the package name `firefox` and any version.
|
||||||
|
|
||||||
- `firefox-32.0`
|
- `firefox-32.0`\
|
||||||
Matches the package name `firefox` and version `32.0`.
|
Matches the package name `firefox` and version `32.0`.
|
||||||
|
|
||||||
- `gtk\\+`
|
- `gtk\\+`\
|
||||||
Matches the package name `gtk+`. The `+` character must be escaped
|
Matches the package name `gtk+`. The `+` character must be escaped
|
||||||
using a backslash to prevent it from being interpreted as a
|
using a backslash to prevent it from being interpreted as a
|
||||||
quantifier, and the backslash must be escaped in turn with another
|
quantifier, and the backslash must be escaped in turn with another
|
||||||
backslash to ensure that the shell passes it on.
|
backslash to ensure that the shell passes it on.
|
||||||
|
|
||||||
- `.\*`
|
- `.\*`\
|
||||||
Matches any package name. This is the default for most commands.
|
Matches any package name. This is the default for most commands.
|
||||||
|
|
||||||
- `'.*zip.*'`
|
- `'.*zip.*'`\
|
||||||
Matches any package name containing the string `zip`. Note the dots:
|
Matches any package name containing the string `zip`. Note the dots:
|
||||||
`'*zip*'` does not work, because in a regular expression, the
|
`'*zip*'` does not work, because in a regular expression, the
|
||||||
character `*` is interpreted as a quantifier.
|
character `*` is interpreted as a quantifier.
|
||||||
|
|
||||||
- `'.*(firefox|chromium).*'`
|
- `'.*(firefox|chromium).*'`\
|
||||||
Matches any package name containing the strings `firefox` or
|
Matches any package name containing the strings `firefox` or
|
||||||
`chromium`.
|
`chromium`.
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ This section lists the options that are common to all operations. These
|
||||||
options are allowed for every subcommand, though they may not always
|
options are allowed for every subcommand, though they may not always
|
||||||
have an effect.
|
have an effect.
|
||||||
|
|
||||||
- `--file` / `-f` *path*
|
- `--file` / `-f` *path*\
|
||||||
Specifies the Nix expression (designated below as the *active Nix
|
Specifies the Nix expression (designated below as the *active Nix
|
||||||
expression*) used by the `--install`, `--upgrade`, and `--query
|
expression*) used by the `--install`, `--upgrade`, and `--query
|
||||||
--available` operations to obtain derivations. The default is
|
--available` operations to obtain derivations. The default is
|
||||||
|
@ -77,13 +77,13 @@ have an effect.
|
||||||
unpacked to a temporary location. The tarball must include a single
|
unpacked to a temporary location. The tarball must include a single
|
||||||
top-level directory containing at least a file named `default.nix`.
|
top-level directory containing at least a file named `default.nix`.
|
||||||
|
|
||||||
- `--profile` / `-p` *path*
|
- `--profile` / `-p` *path*\
|
||||||
Specifies the profile to be used by those operations that operate on
|
Specifies the profile to be used by those operations that operate on
|
||||||
a profile (designated below as the *active profile*). A profile is a
|
a profile (designated below as the *active profile*). A profile is a
|
||||||
sequence of user environments called *generations*, one of which is
|
sequence of user environments called *generations*, one of which is
|
||||||
the *current generation*.
|
the *current generation*.
|
||||||
|
|
||||||
- `--dry-run`
|
- `--dry-run`\
|
||||||
For the `--install`, `--upgrade`, `--uninstall`,
|
For the `--install`, `--upgrade`, `--uninstall`,
|
||||||
`--switch-generation`, `--delete-generations` and `--rollback`
|
`--switch-generation`, `--delete-generations` and `--rollback`
|
||||||
operations, this flag will cause `nix-env` to print what *would* be
|
operations, this flag will cause `nix-env` to print what *would* be
|
||||||
|
@ -93,7 +93,7 @@ have an effect.
|
||||||
[substituted](../glossary.md) (i.e., downloaded) and which paths
|
[substituted](../glossary.md) (i.e., downloaded) and which paths
|
||||||
will be built from source (because no substitute is available).
|
will be built from source (because no substitute is available).
|
||||||
|
|
||||||
- `--system-filter` *system*
|
- `--system-filter` *system*\
|
||||||
By default, operations such as `--query
|
By default, operations such as `--query
|
||||||
--available` show derivations matching any platform. This option
|
--available` show derivations matching any platform. This option
|
||||||
allows you to use derivations for the specified platform *system*.
|
allows you to use derivations for the specified platform *system*.
|
||||||
|
@ -102,7 +102,7 @@ have an effect.
|
||||||
|
|
||||||
# Files
|
# Files
|
||||||
|
|
||||||
- `~/.nix-defexpr`
|
- `~/.nix-defexpr`\
|
||||||
The source for the default Nix expressions used by the
|
The source for the default Nix expressions used by the
|
||||||
`--install`, `--upgrade`, and `--query --available` operations to
|
`--install`, `--upgrade`, and `--query --available` operations to
|
||||||
obtain derivations. The `--file` option may be used to override
|
obtain derivations. The `--file` option may be used to override
|
||||||
|
@ -140,7 +140,7 @@ have an effect.
|
||||||
The command `nix-channel` places symlinks to the downloaded Nix
|
The command `nix-channel` places symlinks to the downloaded Nix
|
||||||
expressions from each subscribed channel in this directory.
|
expressions from each subscribed channel in this directory.
|
||||||
|
|
||||||
- `~/.nix-profile`
|
- `~/.nix-profile`\
|
||||||
A symbolic link to the user's current profile. By default, this
|
A symbolic link to the user's current profile. By default, this
|
||||||
symlink points to `prefix/var/nix/profiles/default`. The `PATH`
|
symlink points to `prefix/var/nix/profiles/default`. The `PATH`
|
||||||
environment variable should include `~/.nix-profile/bin` for the
|
environment variable should include `~/.nix-profile/bin` for the
|
||||||
|
@ -217,13 +217,13 @@ a number of possible ways:
|
||||||
|
|
||||||
## Flags
|
## Flags
|
||||||
|
|
||||||
- `--prebuilt-only` / `-b`
|
- `--prebuilt-only` / `-b`\
|
||||||
Use only derivations for which a substitute is registered, i.e.,
|
Use only derivations for which a substitute is registered, i.e.,
|
||||||
there is a pre-built binary available that can be downloaded in lieu
|
there is a pre-built binary available that can be downloaded in lieu
|
||||||
of building the derivation. Thus, no packages will be built from
|
of building the derivation. Thus, no packages will be built from
|
||||||
source.
|
source.
|
||||||
|
|
||||||
- `--preserve-installed`; `-P`
|
- `--preserve-installed`; `-P`\
|
||||||
Do not remove derivations with a name matching one of the
|
Do not remove derivations with a name matching one of the
|
||||||
derivations being installed. Usually, trying to have two versions of
|
derivations being installed. Usually, trying to have two versions of
|
||||||
the same package installed in the same generation of a profile will
|
the same package installed in the same generation of a profile will
|
||||||
|
@ -231,7 +231,7 @@ a number of possible ways:
|
||||||
clashes between the two versions. However, this is not the case for
|
clashes between the two versions. However, this is not the case for
|
||||||
all packages.
|
all packages.
|
||||||
|
|
||||||
- `--remove-all`; `-r`
|
- `--remove-all`; `-r`\
|
||||||
Remove all previously installed packages first. This is equivalent
|
Remove all previously installed packages first. This is equivalent
|
||||||
to running `nix-env -e '.*'` first, except that everything happens
|
to running `nix-env -e '.*'` first, except that everything happens
|
||||||
in a single transaction.
|
in a single transaction.
|
||||||
|
@ -346,24 +346,24 @@ version is installed.
|
||||||
|
|
||||||
## Flags
|
## Flags
|
||||||
|
|
||||||
- `--lt`
|
- `--lt`\
|
||||||
Only upgrade a derivation to newer versions. This is the default.
|
Only upgrade a derivation to newer versions. This is the default.
|
||||||
|
|
||||||
- `--leq`
|
- `--leq`\
|
||||||
In addition to upgrading to newer versions, also “upgrade” to
|
In addition to upgrading to newer versions, also “upgrade” to
|
||||||
derivations that have the same version. Version are not a unique
|
derivations that have the same version. Version are not a unique
|
||||||
identification of a derivation, so there may be many derivations
|
identification of a derivation, so there may be many derivations
|
||||||
that have the same version. This flag may be useful to force
|
that have the same version. This flag may be useful to force
|
||||||
“synchronisation” between the installed and available derivations.
|
“synchronisation” between the installed and available derivations.
|
||||||
|
|
||||||
- `--eq`
|
- `--eq`\
|
||||||
*Only* “upgrade” to derivations that have the same version. This may
|
*Only* “upgrade” to derivations that have the same version. This may
|
||||||
not seem very useful, but it actually is, e.g., when there is a new
|
not seem very useful, but it actually is, e.g., when there is a new
|
||||||
release of Nixpkgs and you want to replace installed applications
|
release of Nixpkgs and you want to replace installed applications
|
||||||
with the same versions built against newer dependencies (to reduce
|
with the same versions built against newer dependencies (to reduce
|
||||||
the number of dependencies floating around on your system).
|
the number of dependencies floating around on your system).
|
||||||
|
|
||||||
- `--always`
|
- `--always`\
|
||||||
In addition to upgrading to newer versions, also “upgrade” to
|
In addition to upgrading to newer versions, also “upgrade” to
|
||||||
derivations that have the same or a lower version. I.e., derivations
|
derivations that have the same or a lower version. I.e., derivations
|
||||||
may actually be downgraded depending on what is available in the
|
may actually be downgraded depending on what is available in the
|
||||||
|
@ -578,11 +578,11 @@ The derivations are sorted by their `name` attributes.
|
||||||
The following flags specify the set of things on which the query
|
The following flags specify the set of things on which the query
|
||||||
operates.
|
operates.
|
||||||
|
|
||||||
- `--installed`
|
- `--installed`\
|
||||||
The query operates on the store paths that are installed in the
|
The query operates on the store paths that are installed in the
|
||||||
current generation of the active profile. This is the default.
|
current generation of the active profile. This is the default.
|
||||||
|
|
||||||
- `--available`; `-a`
|
- `--available`; `-a`\
|
||||||
The query operates on the derivations that are available in the
|
The query operates on the derivations that are available in the
|
||||||
active Nix expression.
|
active Nix expression.
|
||||||
|
|
||||||
|
@ -593,24 +593,24 @@ selected derivations. Multiple flags may be specified, in which case the
|
||||||
information is shown in the order given here. Note that the name of the
|
information is shown in the order given here. Note that the name of the
|
||||||
derivation is shown unless `--no-name` is specified.
|
derivation is shown unless `--no-name` is specified.
|
||||||
|
|
||||||
- `--xml`
|
- `--xml`\
|
||||||
Print the result in an XML representation suitable for automatic
|
Print the result in an XML representation suitable for automatic
|
||||||
processing by other tools. The root element is called `items`, which
|
processing by other tools. The root element is called `items`, which
|
||||||
contains a `item` element for each available or installed
|
contains a `item` element for each available or installed
|
||||||
derivation. The fields discussed below are all stored in attributes
|
derivation. The fields discussed below are all stored in attributes
|
||||||
of the `item` elements.
|
of the `item` elements.
|
||||||
|
|
||||||
- `--json`
|
- `--json`\
|
||||||
Print the result in a JSON representation suitable for automatic
|
Print the result in a JSON representation suitable for automatic
|
||||||
processing by other tools.
|
processing by other tools.
|
||||||
|
|
||||||
- `--prebuilt-only` / `-b`
|
- `--prebuilt-only` / `-b`\
|
||||||
Show only derivations for which a substitute is registered, i.e.,
|
Show only derivations for which a substitute is registered, i.e.,
|
||||||
there is a pre-built binary available that can be downloaded in lieu
|
there is a pre-built binary available that can be downloaded in lieu
|
||||||
of building the derivation. Thus, this shows all packages that
|
of building the derivation. Thus, this shows all packages that
|
||||||
probably can be installed quickly.
|
probably can be installed quickly.
|
||||||
|
|
||||||
- `--status`; `-s`
|
- `--status`; `-s`\
|
||||||
Print the *status* of the derivation. The status consists of three
|
Print the *status* of the derivation. The status consists of three
|
||||||
characters. The first is `I` or `-`, indicating whether the
|
characters. The first is `I` or `-`, indicating whether the
|
||||||
derivation is currently installed in the current generation of the
|
derivation is currently installed in the current generation of the
|
||||||
|
@ -621,49 +621,49 @@ derivation is shown unless `--no-name` is specified.
|
||||||
derivation to be built. The third is `S` or `-`, indicating whether
|
derivation to be built. The third is `S` or `-`, indicating whether
|
||||||
a substitute is available for the derivation.
|
a substitute is available for the derivation.
|
||||||
|
|
||||||
- `--attr-path`; `-P`
|
- `--attr-path`; `-P`\
|
||||||
Print the *attribute path* of the derivation, which can be used to
|
Print the *attribute path* of the derivation, which can be used to
|
||||||
unambiguously select it using the `--attr` option available in
|
unambiguously select it using the `--attr` option available in
|
||||||
commands that install derivations like `nix-env --install`. This
|
commands that install derivations like `nix-env --install`. This
|
||||||
option only works together with `--available`
|
option only works together with `--available`
|
||||||
|
|
||||||
- `--no-name`
|
- `--no-name`\
|
||||||
Suppress printing of the `name` attribute of each derivation.
|
Suppress printing of the `name` attribute of each derivation.
|
||||||
|
|
||||||
- `--compare-versions` / `-c`
|
- `--compare-versions` / `-c`\
|
||||||
Compare installed versions to available versions, or vice versa (if
|
Compare installed versions to available versions, or vice versa (if
|
||||||
`--available` is given). This is useful for quickly seeing whether
|
`--available` is given). This is useful for quickly seeing whether
|
||||||
upgrades for installed packages are available in a Nix expression. A
|
upgrades for installed packages are available in a Nix expression. A
|
||||||
column is added with the following meaning:
|
column is added with the following meaning:
|
||||||
|
|
||||||
- `<` *version*
|
- `<` *version*\
|
||||||
A newer version of the package is available or installed.
|
A newer version of the package is available or installed.
|
||||||
|
|
||||||
- `=` *version*
|
- `=` *version*\
|
||||||
At most the same version of the package is available or
|
At most the same version of the package is available or
|
||||||
installed.
|
installed.
|
||||||
|
|
||||||
- `>` *version*
|
- `>` *version*\
|
||||||
Only older versions of the package are available or installed.
|
Only older versions of the package are available or installed.
|
||||||
|
|
||||||
- `- ?`
|
- `- ?`\
|
||||||
No version of the package is available or installed.
|
No version of the package is available or installed.
|
||||||
|
|
||||||
- `--system`
|
- `--system`\
|
||||||
Print the `system` attribute of the derivation.
|
Print the `system` attribute of the derivation.
|
||||||
|
|
||||||
- `--drv-path`
|
- `--drv-path`\
|
||||||
Print the path of the store derivation.
|
Print the path of the store derivation.
|
||||||
|
|
||||||
- `--out-path`
|
- `--out-path`\
|
||||||
Print the output path of the derivation.
|
Print the output path of the derivation.
|
||||||
|
|
||||||
- `--description`
|
- `--description`\
|
||||||
Print a short (one-line) description of the derivation, if
|
Print a short (one-line) description of the derivation, if
|
||||||
available. The description is taken from the `meta.description`
|
available. The description is taken from the `meta.description`
|
||||||
attribute of the derivation.
|
attribute of the derivation.
|
||||||
|
|
||||||
- `--meta`
|
- `--meta`\
|
||||||
Print all of the meta-attributes of the derivation. This option is
|
Print all of the meta-attributes of the derivation. This option is
|
||||||
only available with `--xml` or `--json`.
|
only available with `--xml` or `--json`.
|
||||||
|
|
||||||
|
@ -874,7 +874,7 @@ error: no generation older than the current (91) exists
|
||||||
|
|
||||||
# Environment variables
|
# Environment variables
|
||||||
|
|
||||||
- `NIX_PROFILE`
|
- `NIX_PROFILE`\
|
||||||
Location of the Nix profile. Defaults to the target of the symlink
|
Location of the Nix profile. Defaults to the target of the symlink
|
||||||
`~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default`
|
`~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default`
|
||||||
otherwise.
|
otherwise.
|
||||||
|
|
|
@ -29,29 +29,29 @@ md5sum`.
|
||||||
|
|
||||||
# Options
|
# Options
|
||||||
|
|
||||||
- `--flat`
|
- `--flat`\
|
||||||
Print the cryptographic hash of the contents of each regular file
|
Print the cryptographic hash of the contents of each regular file
|
||||||
*path*. That is, do not compute the hash over the dump of *path*.
|
*path*. That is, do not compute the hash over the dump of *path*.
|
||||||
The result is identical to that produced by the GNU commands
|
The result is identical to that produced by the GNU commands
|
||||||
`md5sum` and `sha1sum`.
|
`md5sum` and `sha1sum`.
|
||||||
|
|
||||||
- `--base32`
|
- `--base32`\
|
||||||
Print the hash in a base-32 representation rather than hexadecimal.
|
Print the hash in a base-32 representation rather than hexadecimal.
|
||||||
This base-32 representation is more compact and can be used in Nix
|
This base-32 representation is more compact and can be used in Nix
|
||||||
expressions (such as in calls to `fetchurl`).
|
expressions (such as in calls to `fetchurl`).
|
||||||
|
|
||||||
- `--truncate`
|
- `--truncate`\
|
||||||
Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits.
|
Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits.
|
||||||
|
|
||||||
- `--type` *hashAlgo*
|
- `--type` *hashAlgo*\
|
||||||
Use the specified cryptographic hash algorithm, which can be one of
|
Use the specified cryptographic hash algorithm, which can be one of
|
||||||
`md5`, `sha1`, `sha256`, and `sha512`.
|
`md5`, `sha1`, `sha256`, and `sha512`.
|
||||||
|
|
||||||
- `--to-base16`
|
- `--to-base16`\
|
||||||
Don’t hash anything, but convert the base-32 hash representation
|
Don’t hash anything, but convert the base-32 hash representation
|
||||||
*hash* to hexadecimal.
|
*hash* to hexadecimal.
|
||||||
|
|
||||||
- `--to-base32`
|
- `--to-base32`\
|
||||||
Don’t hash anything, but convert the hexadecimal hash representation
|
Don’t hash anything, but convert the hexadecimal hash representation
|
||||||
*hash* to base-32.
|
*hash* to base-32.
|
||||||
|
|
||||||
|
|
|
@ -29,26 +29,26 @@ standard input.
|
||||||
|
|
||||||
# Options
|
# Options
|
||||||
|
|
||||||
- `--add-root` *path*
|
- `--add-root` *path*\
|
||||||
See the [corresponding option](nix-store.md) in `nix-store`.
|
See the [corresponding option](nix-store.md) in `nix-store`.
|
||||||
|
|
||||||
- `--parse`
|
- `--parse`\
|
||||||
Just parse the input files, and print their abstract syntax trees on
|
Just parse the input files, and print their abstract syntax trees on
|
||||||
standard output in ATerm format.
|
standard output in ATerm format.
|
||||||
|
|
||||||
- `--eval`
|
- `--eval`\
|
||||||
Just parse and evaluate the input files, and print the resulting
|
Just parse and evaluate the input files, and print the resulting
|
||||||
values on standard output. No instantiation of store derivations
|
values on standard output. No instantiation of store derivations
|
||||||
takes place.
|
takes place.
|
||||||
|
|
||||||
- `--find-file`
|
- `--find-file`\
|
||||||
Look up the given files in Nix’s search path (as specified by the
|
Look up the given files in Nix’s search path (as specified by the
|
||||||
`NIX_PATH` environment variable). If found, print the corresponding
|
`NIX_PATH` environment variable). If found, print the corresponding
|
||||||
absolute paths on standard output. For instance, if `NIX_PATH` is
|
absolute paths on standard output. For instance, if `NIX_PATH` is
|
||||||
`nixpkgs=/home/alice/nixpkgs`, then `nix-instantiate --find-file
|
`nixpkgs=/home/alice/nixpkgs`, then `nix-instantiate --find-file
|
||||||
nixpkgs/default.nix` will print `/home/alice/nixpkgs/default.nix`.
|
nixpkgs/default.nix` will print `/home/alice/nixpkgs/default.nix`.
|
||||||
|
|
||||||
- `--strict`
|
- `--strict`\
|
||||||
When used with `--eval`, recursively evaluate list elements and
|
When used with `--eval`, recursively evaluate list elements and
|
||||||
attributes. Normally, such sub-expressions are left unevaluated
|
attributes. Normally, such sub-expressions are left unevaluated
|
||||||
(since the Nix expression language is lazy).
|
(since the Nix expression language is lazy).
|
||||||
|
@ -58,17 +58,17 @@ standard input.
|
||||||
> This option can cause non-termination, because lazy data
|
> This option can cause non-termination, because lazy data
|
||||||
> structures can be infinitely large.
|
> structures can be infinitely large.
|
||||||
|
|
||||||
- `--json`
|
- `--json`\
|
||||||
When used with `--eval`, print the resulting value as an JSON
|
When used with `--eval`, print the resulting value as an JSON
|
||||||
representation of the abstract syntax tree rather than as an ATerm.
|
representation of the abstract syntax tree rather than as an ATerm.
|
||||||
|
|
||||||
- `--xml`
|
- `--xml`\
|
||||||
When used with `--eval`, print the resulting value as an XML
|
When used with `--eval`, print the resulting value as an XML
|
||||||
representation of the abstract syntax tree rather than as an ATerm.
|
representation of the abstract syntax tree rather than as an ATerm.
|
||||||
The schema is the same as that used by the [`toXML`
|
The schema is the same as that used by the [`toXML`
|
||||||
built-in](../expressions/builtins.md).
|
built-in](../expressions/builtins.md).
|
||||||
|
|
||||||
- `--read-write-mode`
|
- `--read-write-mode`\
|
||||||
When used with `--eval`, perform evaluation in read/write mode so
|
When used with `--eval`, perform evaluation in read/write mode so
|
||||||
nix language features that require it will still work (at the cost
|
nix language features that require it will still work (at the cost
|
||||||
of needing to do instantiation of every evaluated derivation). If
|
of needing to do instantiation of every evaluated derivation). If
|
||||||
|
|
|
@ -37,22 +37,22 @@ Nix store is also printed.
|
||||||
|
|
||||||
# Options
|
# Options
|
||||||
|
|
||||||
- `--type` *hashAlgo*
|
- `--type` *hashAlgo*\
|
||||||
Use the specified cryptographic hash algorithm, which can be one of
|
Use the specified cryptographic hash algorithm, which can be one of
|
||||||
`md5`, `sha1`, `sha256`, and `sha512`.
|
`md5`, `sha1`, `sha256`, and `sha512`.
|
||||||
|
|
||||||
- `--print-path`
|
- `--print-path`\
|
||||||
Print the store path of the downloaded file on standard output.
|
Print the store path of the downloaded file on standard output.
|
||||||
|
|
||||||
- `--unpack`
|
- `--unpack`\
|
||||||
Unpack the archive (which must be a tarball or zip file) and add the
|
Unpack the archive (which must be a tarball or zip file) and add the
|
||||||
result to the Nix store. The resulting hash can be used with
|
result to the Nix store. The resulting hash can be used with
|
||||||
functions such as Nixpkgs’s `fetchzip` or `fetchFromGitHub`.
|
functions such as Nixpkgs’s `fetchzip` or `fetchFromGitHub`.
|
||||||
|
|
||||||
- `--executable`
|
- `--executable`\
|
||||||
Set the executable bit on the downloaded file.
|
Set the executable bit on the downloaded file.
|
||||||
|
|
||||||
- `--name` *name*
|
- `--name` *name*\
|
||||||
Override the name of the file in the Nix store. By default, this is
|
Override the name of the file in the Nix store. By default, this is
|
||||||
`hash-basename`, where *basename* is the last component of *url*.
|
`hash-basename`, where *basename* is the last component of *url*.
|
||||||
Overriding the name is necessary when *basename* contains characters
|
Overriding the name is necessary when *basename* contains characters
|
||||||
|
|
|
@ -54,7 +54,7 @@ All options not listed here are passed to `nix-store
|
||||||
--realise`, except for `--arg` and `--attr` / `-A` which are passed to
|
--realise`, except for `--arg` and `--attr` / `-A` which are passed to
|
||||||
`nix-instantiate`.
|
`nix-instantiate`.
|
||||||
|
|
||||||
- `--command` *cmd*
|
- `--command` *cmd*\
|
||||||
In the environment of the derivation, run the shell command *cmd*.
|
In the environment of the derivation, run the shell command *cmd*.
|
||||||
This command is executed in an interactive shell. (Use `--run` to
|
This command is executed in an interactive shell. (Use `--run` to
|
||||||
use a non-interactive shell instead.) However, a call to `exit` is
|
use a non-interactive shell instead.) However, a call to `exit` is
|
||||||
|
@ -64,36 +64,34 @@ All options not listed here are passed to `nix-store
|
||||||
drop you into the interactive shell. This can be useful for doing
|
drop you into the interactive shell. This can be useful for doing
|
||||||
any additional initialisation.
|
any additional initialisation.
|
||||||
|
|
||||||
- `--run` *cmd*
|
- `--run` *cmd*\
|
||||||
Like `--command`, but executes the command in a non-interactive
|
Like `--command`, but executes the command in a non-interactive
|
||||||
shell. This means (among other things) that if you hit Ctrl-C while
|
shell. This means (among other things) that if you hit Ctrl-C while
|
||||||
the command is running, the shell exits.
|
the command is running, the shell exits.
|
||||||
|
|
||||||
- `--exclude` *regexp*
|
- `--exclude` *regexp*\
|
||||||
Do not build any dependencies whose store path matches the regular
|
Do not build any dependencies whose store path matches the regular
|
||||||
expression *regexp*. This option may be specified multiple times.
|
expression *regexp*. This option may be specified multiple times.
|
||||||
|
|
||||||
- `--pure`
|
- `--pure`\
|
||||||
If this flag is specified, the environment is almost entirely
|
If this flag is specified, the environment is almost entirely
|
||||||
cleared before the interactive shell is started, so you get an
|
cleared before the interactive shell is started, so you get an
|
||||||
environment that more closely corresponds to the “real” Nix build. A
|
environment that more closely corresponds to the “real” Nix build. A
|
||||||
few variables, in particular `HOME`, `USER` and `DISPLAY`, are
|
few variables, in particular `HOME`, `USER` and `DISPLAY`, are
|
||||||
retained. Note that (depending on your Bash
|
retained.
|
||||||
installation) `/etc/bashrc` is still sourced, so any variables set
|
|
||||||
there will affect the interactive shell.
|
|
||||||
|
|
||||||
- `--packages` / `-p` *packages*…
|
- `--packages` / `-p` *packages*…\
|
||||||
Set up an environment in which the specified packages are present.
|
Set up an environment in which the specified packages are present.
|
||||||
The command line arguments are interpreted as attribute names inside
|
The command line arguments are interpreted as attribute names inside
|
||||||
the Nix Packages collection. Thus, `nix-shell -p libjpeg openjdk`
|
the Nix Packages collection. Thus, `nix-shell -p libjpeg openjdk`
|
||||||
will start a shell in which the packages denoted by the attribute
|
will start a shell in which the packages denoted by the attribute
|
||||||
names `libjpeg` and `openjdk` are present.
|
names `libjpeg` and `openjdk` are present.
|
||||||
|
|
||||||
- `-i` *interpreter*
|
- `-i` *interpreter*\
|
||||||
The chained script interpreter to be invoked by `nix-shell`. Only
|
The chained script interpreter to be invoked by `nix-shell`. Only
|
||||||
applicable in `#!`-scripts (described below).
|
applicable in `#!`-scripts (described below).
|
||||||
|
|
||||||
- `--keep` *name*
|
- `--keep` *name*\
|
||||||
When a `--pure` shell is started, keep the listed environment
|
When a `--pure` shell is started, keep the listed environment
|
||||||
variables.
|
variables.
|
||||||
|
|
||||||
|
@ -101,7 +99,7 @@ The following common options are supported:
|
||||||
|
|
||||||
# Environment variables
|
# Environment variables
|
||||||
|
|
||||||
- `NIX_BUILD_SHELL`
|
- `NIX_BUILD_SHELL`\
|
||||||
Shell used to start the interactive environment. Defaults to the
|
Shell used to start the interactive environment. Defaults to the
|
||||||
`bash` found in `PATH`.
|
`bash` found in `PATH`.
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ This section lists the options that are common to all operations. These
|
||||||
options are allowed for every subcommand, though they may not always
|
options are allowed for every subcommand, though they may not always
|
||||||
have an effect.
|
have an effect.
|
||||||
|
|
||||||
- `--add-root` *path*
|
- `--add-root` *path*\
|
||||||
Causes the result of a realisation (`--realise` and
|
Causes the result of a realisation (`--realise` and
|
||||||
`--force-realise`) to be registered as a root of the garbage
|
`--force-realise`) to be registered as a root of the garbage
|
||||||
collector. *path* will be created as a symlink to the resulting
|
collector. *path* will be created as a symlink to the resulting
|
||||||
|
@ -79,22 +79,22 @@ paths. Realisation is a somewhat overloaded term:
|
||||||
system). If the path is already valid, we are done immediately.
|
system). If the path is already valid, we are done immediately.
|
||||||
Otherwise, the path and any missing paths in its closure may be
|
Otherwise, the path and any missing paths in its closure may be
|
||||||
produced through substitutes. If there are no (successful)
|
produced through substitutes. If there are no (successful)
|
||||||
subsitutes, realisation fails.
|
substitutes, realisation fails.
|
||||||
|
|
||||||
The output path of each derivation is printed on standard output. (For
|
The output path of each derivation is printed on standard output. (For
|
||||||
non-derivations argument, the argument itself is printed.)
|
non-derivations argument, the argument itself is printed.)
|
||||||
|
|
||||||
The following flags are available:
|
The following flags are available:
|
||||||
|
|
||||||
- `--dry-run`
|
- `--dry-run`\
|
||||||
Print on standard error a description of what packages would be
|
Print on standard error a description of what packages would be
|
||||||
built or downloaded, without actually performing the operation.
|
built or downloaded, without actually performing the operation.
|
||||||
|
|
||||||
- `--ignore-unknown`
|
- `--ignore-unknown`\
|
||||||
If a non-derivation path does not have a substitute, then silently
|
If a non-derivation path does not have a substitute, then silently
|
||||||
ignore it.
|
ignore it.
|
||||||
|
|
||||||
- `--check`
|
- `--check`\
|
||||||
This option allows you to check whether a derivation is
|
This option allows you to check whether a derivation is
|
||||||
deterministic. It rebuilds the specified derivation and checks
|
deterministic. It rebuilds the specified derivation and checks
|
||||||
whether the result is bitwise-identical with the existing outputs,
|
whether the result is bitwise-identical with the existing outputs,
|
||||||
|
@ -110,20 +110,20 @@ The following flags are available:
|
||||||
|
|
||||||
Special exit codes:
|
Special exit codes:
|
||||||
|
|
||||||
- `100`
|
- `100`\
|
||||||
Generic build failure, the builder process returned with a non-zero
|
Generic build failure, the builder process returned with a non-zero
|
||||||
exit code.
|
exit code.
|
||||||
|
|
||||||
- `101`
|
- `101`\
|
||||||
Build timeout, the build was aborted because it did not complete
|
Build timeout, the build was aborted because it did not complete
|
||||||
within the specified `timeout`.
|
within the specified `timeout`.
|
||||||
|
|
||||||
- `102`
|
- `102`\
|
||||||
Hash mismatch, the build output was rejected because it does not
|
Hash mismatch, the build output was rejected because it does not
|
||||||
match the [`outputHash` attribute of the
|
match the [`outputHash` attribute of the
|
||||||
derivation](../expressions/advanced-attributes.md).
|
derivation](../expressions/advanced-attributes.md).
|
||||||
|
|
||||||
- `104`
|
- `104`\
|
||||||
Not deterministic, the build succeeded in check mode but the
|
Not deterministic, the build succeeded in check mode but the
|
||||||
resulting output is not binary reproducable.
|
resulting output is not binary reproducable.
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ access to a restricted ssh user.
|
||||||
|
|
||||||
The following flags are available:
|
The following flags are available:
|
||||||
|
|
||||||
- `--write`
|
- `--write`\
|
||||||
Allow the connected client to request the realization of
|
Allow the connected client to request the realization of
|
||||||
derivations. In effect, this can be used to make the host act as a
|
derivations. In effect, this can be used to make the host act as a
|
||||||
remote builder.
|
remote builder.
|
||||||
|
@ -200,18 +200,18 @@ reachable via file system references from a set of “roots”, are deleted.
|
||||||
|
|
||||||
The following suboperations may be specified:
|
The following suboperations may be specified:
|
||||||
|
|
||||||
- `--print-roots`
|
- `--print-roots`\
|
||||||
This operation prints on standard output the set of roots used by
|
This operation prints on standard output the set of roots used by
|
||||||
the garbage collector.
|
the garbage collector.
|
||||||
|
|
||||||
- `--print-live`
|
- `--print-live`\
|
||||||
This operation prints on standard output the set of “live” store
|
This operation prints on standard output the set of “live” store
|
||||||
paths, which are all the store paths reachable from the roots. Live
|
paths, which are all the store paths reachable from the roots. Live
|
||||||
paths should never be deleted, since that would break consistency —
|
paths should never be deleted, since that would break consistency —
|
||||||
it would become possible that applications are installed that
|
it would become possible that applications are installed that
|
||||||
reference things that are no longer present in the store.
|
reference things that are no longer present in the store.
|
||||||
|
|
||||||
- `--print-dead`
|
- `--print-dead`\
|
||||||
This operation prints out on standard output the set of “dead” store
|
This operation prints out on standard output the set of “dead” store
|
||||||
paths, which is just the opposite of the set of live paths: any path
|
paths, which is just the opposite of the set of live paths: any path
|
||||||
in the store that is not live (with respect to the roots) is dead.
|
in the store that is not live (with respect to the roots) is dead.
|
||||||
|
@ -219,7 +219,7 @@ The following suboperations may be specified:
|
||||||
By default, all unreachable paths are deleted. The following options
|
By default, all unreachable paths are deleted. The following options
|
||||||
control what gets deleted and in what order:
|
control what gets deleted and in what order:
|
||||||
|
|
||||||
- `--max-freed` *bytes*
|
- `--max-freed` *bytes*\
|
||||||
Keep deleting paths until at least *bytes* bytes have been deleted,
|
Keep deleting paths until at least *bytes* bytes have been deleted,
|
||||||
then stop. The argument *bytes* can be followed by the
|
then stop. The argument *bytes* can be followed by the
|
||||||
multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB
|
multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB
|
||||||
|
@ -300,22 +300,22 @@ symlink.
|
||||||
|
|
||||||
## Common query options
|
## Common query options
|
||||||
|
|
||||||
- `--use-output`; `-u`
|
- `--use-output`; `-u`\
|
||||||
For each argument to the query that is a store derivation, apply the
|
For each argument to the query that is a store derivation, apply the
|
||||||
query to the output path of the derivation instead.
|
query to the output path of the derivation instead.
|
||||||
|
|
||||||
- `--force-realise`; `-f`
|
- `--force-realise`; `-f`\
|
||||||
Realise each argument to the query first (see [`nix-store
|
Realise each argument to the query first (see [`nix-store
|
||||||
--realise`](#operation---realise)).
|
--realise`](#operation---realise)).
|
||||||
|
|
||||||
## Queries
|
## Queries
|
||||||
|
|
||||||
- `--outputs`
|
- `--outputs`\
|
||||||
Prints out the [output paths](../glossary.md) of the store
|
Prints out the [output paths](../glossary.md) of the store
|
||||||
derivations *paths*. These are the paths that will be produced when
|
derivations *paths*. These are the paths that will be produced when
|
||||||
the derivation is built.
|
the derivation is built.
|
||||||
|
|
||||||
- `--requisites`; `-R`
|
- `--requisites`; `-R`\
|
||||||
Prints out the [closure](../glossary.md) of the store path *paths*.
|
Prints out the [closure](../glossary.md) of the store path *paths*.
|
||||||
|
|
||||||
This query has one option:
|
This query has one option:
|
||||||
|
@ -332,31 +332,31 @@ symlink.
|
||||||
dependencies) is obtained by distributing the closure of a store
|
dependencies) is obtained by distributing the closure of a store
|
||||||
derivation and specifying the option `--include-outputs`.
|
derivation and specifying the option `--include-outputs`.
|
||||||
|
|
||||||
- `--references`
|
- `--references`\
|
||||||
Prints the set of [references](../glossary.md) of the store paths
|
Prints the set of [references](../glossary.md) of the store paths
|
||||||
*paths*, that is, their immediate dependencies. (For *all*
|
*paths*, that is, their immediate dependencies. (For *all*
|
||||||
dependencies, use `--requisites`.)
|
dependencies, use `--requisites`.)
|
||||||
|
|
||||||
- `--referrers`
|
- `--referrers`\
|
||||||
Prints the set of *referrers* of the store paths *paths*, that is,
|
Prints the set of *referrers* of the store paths *paths*, that is,
|
||||||
the store paths currently existing in the Nix store that refer to
|
the store paths currently existing in the Nix store that refer to
|
||||||
one of *paths*. Note that contrary to the references, the set of
|
one of *paths*. Note that contrary to the references, the set of
|
||||||
referrers is not constant; it can change as store paths are added or
|
referrers is not constant; it can change as store paths are added or
|
||||||
removed.
|
removed.
|
||||||
|
|
||||||
- `--referrers-closure`
|
- `--referrers-closure`\
|
||||||
Prints the closure of the set of store paths *paths* under the
|
Prints the closure of the set of store paths *paths* under the
|
||||||
referrers relation; that is, all store paths that directly or
|
referrers relation; that is, all store paths that directly or
|
||||||
indirectly refer to one of *paths*. These are all the path currently
|
indirectly refer to one of *paths*. These are all the path currently
|
||||||
in the Nix store that are dependent on *paths*.
|
in the Nix store that are dependent on *paths*.
|
||||||
|
|
||||||
- `--deriver`; `-d`
|
- `--deriver`; `-d`\
|
||||||
Prints the [deriver](../glossary.md) of the store paths *paths*. If
|
Prints the [deriver](../glossary.md) of the store paths *paths*. If
|
||||||
the path has no deriver (e.g., if it is a source file), or if the
|
the path has no deriver (e.g., if it is a source file), or if the
|
||||||
deriver is not known (e.g., in the case of a binary-only
|
deriver is not known (e.g., in the case of a binary-only
|
||||||
deployment), the string `unknown-deriver` is printed.
|
deployment), the string `unknown-deriver` is printed.
|
||||||
|
|
||||||
- `--graph`
|
- `--graph`\
|
||||||
Prints the references graph of the store paths *paths* in the format
|
Prints the references graph of the store paths *paths* in the format
|
||||||
of the `dot` tool of AT\&T's [Graphviz
|
of the `dot` tool of AT\&T's [Graphviz
|
||||||
package](http://www.graphviz.org/). This can be used to visualise
|
package](http://www.graphviz.org/). This can be used to visualise
|
||||||
|
@ -364,39 +364,39 @@ symlink.
|
||||||
this to a store derivation. To obtain a runtime dependency graph,
|
this to a store derivation. To obtain a runtime dependency graph,
|
||||||
apply it to an output path.
|
apply it to an output path.
|
||||||
|
|
||||||
- `--tree`
|
- `--tree`\
|
||||||
Prints the references graph of the store paths *paths* as a nested
|
Prints the references graph of the store paths *paths* as a nested
|
||||||
ASCII tree. References are ordered by descending closure size; this
|
ASCII tree. References are ordered by descending closure size; this
|
||||||
tends to flatten the tree, making it more readable. The query only
|
tends to flatten the tree, making it more readable. The query only
|
||||||
recurses into a store path when it is first encountered; this
|
recurses into a store path when it is first encountered; this
|
||||||
prevents a blowup of the tree representation of the graph.
|
prevents a blowup of the tree representation of the graph.
|
||||||
|
|
||||||
- `--graphml`
|
- `--graphml`\
|
||||||
Prints the references graph of the store paths *paths* in the
|
Prints the references graph of the store paths *paths* in the
|
||||||
[GraphML](http://graphml.graphdrawing.org/) file format. This can be
|
[GraphML](http://graphml.graphdrawing.org/) file format. This can be
|
||||||
used to visualise dependency graphs. To obtain a build-time
|
used to visualise dependency graphs. To obtain a build-time
|
||||||
dependency graph, apply this to a store derivation. To obtain a
|
dependency graph, apply this to a store derivation. To obtain a
|
||||||
runtime dependency graph, apply it to an output path.
|
runtime dependency graph, apply it to an output path.
|
||||||
|
|
||||||
- `--binding` *name*; `-b` *name*
|
- `--binding` *name*; `-b` *name*\
|
||||||
Prints the value of the attribute *name* (i.e., environment
|
Prints the value of the attribute *name* (i.e., environment
|
||||||
variable) of the store derivations *paths*. It is an error for a
|
variable) of the store derivations *paths*. It is an error for a
|
||||||
derivation to not have the specified attribute.
|
derivation to not have the specified attribute.
|
||||||
|
|
||||||
- `--hash`
|
- `--hash`\
|
||||||
Prints the SHA-256 hash of the contents of the store paths *paths*
|
Prints the SHA-256 hash of the contents of the store paths *paths*
|
||||||
(that is, the hash of the output of `nix-store --dump` on the given
|
(that is, the hash of the output of `nix-store --dump` on the given
|
||||||
paths). Since the hash is stored in the Nix database, this is a fast
|
paths). Since the hash is stored in the Nix database, this is a fast
|
||||||
operation.
|
operation.
|
||||||
|
|
||||||
- `--size`
|
- `--size`\
|
||||||
Prints the size in bytes of the contents of the store paths *paths*
|
Prints the size in bytes of the contents of the store paths *paths*
|
||||||
— to be precise, the size of the output of `nix-store --dump` on
|
— to be precise, the size of the output of `nix-store --dump` on
|
||||||
the given paths. Note that the actual disk space required by the
|
the given paths. Note that the actual disk space required by the
|
||||||
store paths may be higher, especially on filesystems with large
|
store paths may be higher, especially on filesystems with large
|
||||||
cluster sizes.
|
cluster sizes.
|
||||||
|
|
||||||
- `--roots`
|
- `--roots`\
|
||||||
Prints the garbage collector roots that point, directly or
|
Prints the garbage collector roots that point, directly or
|
||||||
indirectly, at the store paths *paths*.
|
indirectly, at the store paths *paths*.
|
||||||
|
|
||||||
|
@ -513,7 +513,7 @@ public url or broke since the download expression was written.
|
||||||
|
|
||||||
This operation has the following options:
|
This operation has the following options:
|
||||||
|
|
||||||
- `--recursive`
|
- `--recursive`\
|
||||||
Use recursive instead of flat hashing mode, used when adding
|
Use recursive instead of flat hashing mode, used when adding
|
||||||
directories to the store.
|
directories to the store.
|
||||||
|
|
||||||
|
@ -540,14 +540,14 @@ being modified by non-Nix tools, or of bugs in Nix itself.
|
||||||
|
|
||||||
This operation has the following options:
|
This operation has the following options:
|
||||||
|
|
||||||
- `--check-contents`
|
- `--check-contents`\
|
||||||
Checks that the contents of every valid store path has not been
|
Checks that the contents of every valid store path has not been
|
||||||
altered by computing a SHA-256 hash of the contents and comparing it
|
altered by computing a SHA-256 hash of the contents and comparing it
|
||||||
with the hash stored in the Nix database at build time. Paths that
|
with the hash stored in the Nix database at build time. Paths that
|
||||||
have been modified are printed out. For large stores,
|
have been modified are printed out. For large stores,
|
||||||
`--check-contents` is obviously quite slow.
|
`--check-contents` is obviously quite slow.
|
||||||
|
|
||||||
- `--repair`
|
- `--repair`\
|
||||||
If any valid path is missing from the store, or (if
|
If any valid path is missing from the store, or (if
|
||||||
`--check-contents` is given) the contents of a valid path has been
|
`--check-contents` is given) the contents of a valid path has been
|
||||||
modified, then try to repair the path by redownloading it. See
|
modified, then try to repair the path by redownloading it. See
|
||||||
|
|
|
@ -2,13 +2,13 @@
|
||||||
|
|
||||||
Most Nix commands accept the following command-line options:
|
Most Nix commands accept the following command-line options:
|
||||||
|
|
||||||
- `--help`
|
- `--help`\
|
||||||
Prints out a summary of the command syntax and exits.
|
Prints out a summary of the command syntax and exits.
|
||||||
|
|
||||||
- `--version`
|
- `--version`\
|
||||||
Prints out the Nix version number on standard output and exits.
|
Prints out the Nix version number on standard output and exits.
|
||||||
|
|
||||||
- `--verbose` / `-v`
|
- `--verbose` / `-v`\
|
||||||
Increases the level of verbosity of diagnostic messages printed on
|
Increases the level of verbosity of diagnostic messages printed on
|
||||||
standard error. For each Nix operation, the information printed on
|
standard error. For each Nix operation, the information printed on
|
||||||
standard output is well-defined; any diagnostic information is
|
standard output is well-defined; any diagnostic information is
|
||||||
|
@ -17,41 +17,41 @@ Most Nix commands accept the following command-line options:
|
||||||
This option may be specified repeatedly. Currently, the following
|
This option may be specified repeatedly. Currently, the following
|
||||||
verbosity levels exist:
|
verbosity levels exist:
|
||||||
|
|
||||||
- 0
|
- 0\
|
||||||
“Errors only”: only print messages explaining why the Nix
|
“Errors only”: only print messages explaining why the Nix
|
||||||
invocation failed.
|
invocation failed.
|
||||||
|
|
||||||
- 1
|
- 1\
|
||||||
“Informational”: print *useful* messages about what Nix is
|
“Informational”: print *useful* messages about what Nix is
|
||||||
doing. This is the default.
|
doing. This is the default.
|
||||||
|
|
||||||
- 2
|
- 2\
|
||||||
“Talkative”: print more informational messages.
|
“Talkative”: print more informational messages.
|
||||||
|
|
||||||
- 3
|
- 3\
|
||||||
“Chatty”: print even more informational messages.
|
“Chatty”: print even more informational messages.
|
||||||
|
|
||||||
- 4
|
- 4\
|
||||||
“Debug”: print debug information.
|
“Debug”: print debug information.
|
||||||
|
|
||||||
- 5
|
- 5\
|
||||||
“Vomit”: print vast amounts of debug information.
|
“Vomit”: print vast amounts of debug information.
|
||||||
|
|
||||||
- `--quiet`
|
- `--quiet`\
|
||||||
Decreases the level of verbosity of diagnostic messages printed on
|
Decreases the level of verbosity of diagnostic messages printed on
|
||||||
standard error. This is the inverse option to `-v` / `--verbose`.
|
standard error. This is the inverse option to `-v` / `--verbose`.
|
||||||
|
|
||||||
This option may be specified repeatedly. See the previous verbosity
|
This option may be specified repeatedly. See the previous verbosity
|
||||||
levels list.
|
levels list.
|
||||||
|
|
||||||
- `--log-format` *format*
|
- `--log-format` *format*\
|
||||||
This option can be used to change the output of the log format, with
|
This option can be used to change the output of the log format, with
|
||||||
*format* being one of:
|
*format* being one of:
|
||||||
|
|
||||||
- raw
|
- raw\
|
||||||
This is the raw format, as outputted by nix-build.
|
This is the raw format, as outputted by nix-build.
|
||||||
|
|
||||||
- internal-json
|
- internal-json\
|
||||||
Outputs the logs in a structured manner.
|
Outputs the logs in a structured manner.
|
||||||
|
|
||||||
> **Warning**
|
> **Warning**
|
||||||
|
@ -60,20 +60,20 @@ Most Nix commands accept the following command-line options:
|
||||||
> the error-messages (namely of the `msg`-field) can change
|
> the error-messages (namely of the `msg`-field) can change
|
||||||
> between releases.
|
> between releases.
|
||||||
|
|
||||||
- bar
|
- bar\
|
||||||
Only display a progress bar during the builds.
|
Only display a progress bar during the builds.
|
||||||
|
|
||||||
- bar-with-logs
|
- bar-with-logs\
|
||||||
Display the raw logs, with the progress bar at the bottom.
|
Display the raw logs, with the progress bar at the bottom.
|
||||||
|
|
||||||
- `--no-build-output` / `-Q`
|
- `--no-build-output` / `-Q`\
|
||||||
By default, output written by builders to standard output and
|
By default, output written by builders to standard output and
|
||||||
standard error is echoed to the Nix command's standard error. This
|
standard error is echoed to the Nix command's standard error. This
|
||||||
option suppresses this behaviour. Note that the builder's standard
|
option suppresses this behaviour. Note that the builder's standard
|
||||||
output and error are always written to a log file in
|
output and error are always written to a log file in
|
||||||
`prefix/nix/var/log/nix`.
|
`prefix/nix/var/log/nix`.
|
||||||
|
|
||||||
- `--max-jobs` / `-j` *number*
|
- `--max-jobs` / `-j` *number*\
|
||||||
Sets the maximum number of build jobs that Nix will perform in
|
Sets the maximum number of build jobs that Nix will perform in
|
||||||
parallel to the specified number. Specify `auto` to use the number
|
parallel to the specified number. Specify `auto` to use the number
|
||||||
of CPUs in the system. The default is specified by the `max-jobs`
|
of CPUs in the system. The default is specified by the `max-jobs`
|
||||||
|
@ -83,7 +83,7 @@ Most Nix commands accept the following command-line options:
|
||||||
Setting it to `0` disallows building on the local machine, which is
|
Setting it to `0` disallows building on the local machine, which is
|
||||||
useful when you want builds to happen only on remote builders.
|
useful when you want builds to happen only on remote builders.
|
||||||
|
|
||||||
- `--cores`
|
- `--cores`\
|
||||||
Sets the value of the `NIX_BUILD_CORES` environment variable in
|
Sets the value of the `NIX_BUILD_CORES` environment variable in
|
||||||
the invocation of builders. Builders can use this variable at
|
the invocation of builders. Builders can use this variable at
|
||||||
their discretion to control the maximum amount of parallelism. For
|
their discretion to control the maximum amount of parallelism. For
|
||||||
|
@ -94,18 +94,18 @@ Most Nix commands accept the following command-line options:
|
||||||
means that the builder should use all available CPU cores in the
|
means that the builder should use all available CPU cores in the
|
||||||
system.
|
system.
|
||||||
|
|
||||||
- `--max-silent-time`
|
- `--max-silent-time`\
|
||||||
Sets the maximum number of seconds that a builder can go without
|
Sets the maximum number of seconds that a builder can go without
|
||||||
producing any data on standard output or standard error. The
|
producing any data on standard output or standard error. The
|
||||||
default is specified by the `max-silent-time` configuration
|
default is specified by the `max-silent-time` configuration
|
||||||
setting. `0` means no time-out.
|
setting. `0` means no time-out.
|
||||||
|
|
||||||
- `--timeout`
|
- `--timeout`\
|
||||||
Sets the maximum number of seconds that a builder can run. The
|
Sets the maximum number of seconds that a builder can run. The
|
||||||
default is specified by the `timeout` configuration setting. `0`
|
default is specified by the `timeout` configuration setting. `0`
|
||||||
means no timeout.
|
means no timeout.
|
||||||
|
|
||||||
- `--keep-going` / `-k`
|
- `--keep-going` / `-k`\
|
||||||
Keep going in case of failed builds, to the greatest extent
|
Keep going in case of failed builds, to the greatest extent
|
||||||
possible. That is, if building an input of some derivation fails,
|
possible. That is, if building an input of some derivation fails,
|
||||||
Nix will still build the other inputs, but not the derivation
|
Nix will still build the other inputs, but not the derivation
|
||||||
|
@ -113,13 +113,13 @@ Most Nix commands accept the following command-line options:
|
||||||
for builds of substitutes), possibly killing builds in progress (in
|
for builds of substitutes), possibly killing builds in progress (in
|
||||||
case of parallel or distributed builds).
|
case of parallel or distributed builds).
|
||||||
|
|
||||||
- `--keep-failed` / `-K`
|
- `--keep-failed` / `-K`\
|
||||||
Specifies that in case of a build failure, the temporary directory
|
Specifies that in case of a build failure, the temporary directory
|
||||||
(usually in `/tmp`) in which the build takes place should not be
|
(usually in `/tmp`) in which the build takes place should not be
|
||||||
deleted. The path of the build directory is printed as an
|
deleted. The path of the build directory is printed as an
|
||||||
informational message.
|
informational message.
|
||||||
|
|
||||||
- `--fallback`
|
- `--fallback`\
|
||||||
Whenever Nix attempts to build a derivation for which substitutes
|
Whenever Nix attempts to build a derivation for which substitutes
|
||||||
are known for each output path, but realising the output paths
|
are known for each output path, but realising the output paths
|
||||||
through the substitutes fails, fall back on building the derivation.
|
through the substitutes fails, fall back on building the derivation.
|
||||||
|
@ -134,12 +134,12 @@ Most Nix commands accept the following command-line options:
|
||||||
failure in obtaining the substitutes to lead to a full build from
|
failure in obtaining the substitutes to lead to a full build from
|
||||||
source (with the related consumption of resources).
|
source (with the related consumption of resources).
|
||||||
|
|
||||||
- `--readonly-mode`
|
- `--readonly-mode`\
|
||||||
When this option is used, no attempt is made to open the Nix
|
When this option is used, no attempt is made to open the Nix
|
||||||
database. Most Nix operations do need database access, so those
|
database. Most Nix operations do need database access, so those
|
||||||
operations will fail.
|
operations will fail.
|
||||||
|
|
||||||
- `--arg` *name* *value*
|
- `--arg` *name* *value*\
|
||||||
This option is accepted by `nix-env`, `nix-instantiate`,
|
This option is accepted by `nix-env`, `nix-instantiate`,
|
||||||
`nix-shell` and `nix-build`. When evaluating Nix expressions, the
|
`nix-shell` and `nix-build`. When evaluating Nix expressions, the
|
||||||
expression evaluator will automatically try to call functions that
|
expression evaluator will automatically try to call functions that
|
||||||
|
@ -170,13 +170,13 @@ Most Nix commands accept the following command-line options:
|
||||||
since the argument is a Nix string literal, you have to escape the
|
since the argument is a Nix string literal, you have to escape the
|
||||||
quotes.)
|
quotes.)
|
||||||
|
|
||||||
- `--argstr` *name* *value*
|
- `--argstr` *name* *value*\
|
||||||
This option is like `--arg`, only the value is not a Nix
|
This option is like `--arg`, only the value is not a Nix
|
||||||
expression but a string. So instead of `--arg system
|
expression but a string. So instead of `--arg system
|
||||||
\"i686-linux\"` (the outer quotes are to keep the shell happy) you
|
\"i686-linux\"` (the outer quotes are to keep the shell happy) you
|
||||||
can say `--argstr system i686-linux`.
|
can say `--argstr system i686-linux`.
|
||||||
|
|
||||||
- `--attr` / `-A` *attrPath*
|
- `--attr` / `-A` *attrPath*\
|
||||||
Select an attribute from the top-level Nix expression being
|
Select an attribute from the top-level Nix expression being
|
||||||
evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and
|
evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and
|
||||||
`nix-shell` only.) The *attribute path* *attrPath* is a sequence
|
`nix-shell` only.) The *attribute path* *attrPath* is a sequence
|
||||||
|
@ -191,7 +191,7 @@ Most Nix commands accept the following command-line options:
|
||||||
attribute of the fourth element of the array in the `foo` attribute
|
attribute of the fourth element of the array in the `foo` attribute
|
||||||
of the top-level expression.
|
of the top-level expression.
|
||||||
|
|
||||||
- `--expr` / `-E`
|
- `--expr` / `-E`\
|
||||||
Interpret the command line arguments as a list of Nix expressions to
|
Interpret the command line arguments as a list of Nix expressions to
|
||||||
be parsed and evaluated, rather than as a list of file names of Nix
|
be parsed and evaluated, rather than as a list of file names of Nix
|
||||||
expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.)
|
expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.)
|
||||||
|
@ -202,17 +202,17 @@ Most Nix commands accept the following command-line options:
|
||||||
use, give your expression to the `nix-shell -p` convenience flag
|
use, give your expression to the `nix-shell -p` convenience flag
|
||||||
instead.
|
instead.
|
||||||
|
|
||||||
- `-I` *path*
|
- `-I` *path*\
|
||||||
Add a path to the Nix expression search path. This option may be
|
Add a path to the Nix expression search path. This option may be
|
||||||
given multiple times. See the `NIX_PATH` environment variable for
|
given multiple times. See the `NIX_PATH` environment variable for
|
||||||
information on the semantics of the Nix search path. Paths added
|
information on the semantics of the Nix search path. Paths added
|
||||||
through `-I` take precedence over `NIX_PATH`.
|
through `-I` take precedence over `NIX_PATH`.
|
||||||
|
|
||||||
- `--option` *name* *value*
|
- `--option` *name* *value*\
|
||||||
Set the Nix configuration option *name* to *value*. This overrides
|
Set the Nix configuration option *name* to *value*. This overrides
|
||||||
settings in the Nix configuration file (see nix.conf5).
|
settings in the Nix configuration file (see nix.conf5).
|
||||||
|
|
||||||
- `--repair`
|
- `--repair`\
|
||||||
Fix corrupted or missing store paths by redownloading or rebuilding
|
Fix corrupted or missing store paths by redownloading or rebuilding
|
||||||
them. Note that this is slow because it requires computing a
|
them. Note that this is slow because it requires computing a
|
||||||
cryptographic hash of the contents of every path in the closure of
|
cryptographic hash of the contents of every path in the closure of
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
Derivations can declare some infrequently used optional attributes.
|
Derivations can declare some infrequently used optional attributes.
|
||||||
|
|
||||||
- `allowedReferences`
|
- `allowedReferences`\
|
||||||
The optional attribute `allowedReferences` specifies a list of legal
|
The optional attribute `allowedReferences` specifies a list of legal
|
||||||
references (dependencies) of the output of the builder. For example,
|
references (dependencies) of the output of the builder. For example,
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
booting Linux don’t have accidental dependencies on other paths in
|
booting Linux don’t have accidental dependencies on other paths in
|
||||||
the Nix store.
|
the Nix store.
|
||||||
|
|
||||||
- `allowedRequisites`
|
- `allowedRequisites`\
|
||||||
This attribute is similar to `allowedReferences`, but it specifies
|
This attribute is similar to `allowedReferences`, but it specifies
|
||||||
the legal requisites of the whole closure, so all the dependencies
|
the legal requisites of the whole closure, so all the dependencies
|
||||||
recursively. For example,
|
recursively. For example,
|
||||||
|
@ -30,7 +30,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
runtime dependency than `foobar`, and in addition it enforces that
|
runtime dependency than `foobar`, and in addition it enforces that
|
||||||
`foobar` itself doesn't introduce any other dependency itself.
|
`foobar` itself doesn't introduce any other dependency itself.
|
||||||
|
|
||||||
- `disallowedReferences`
|
- `disallowedReferences`\
|
||||||
The optional attribute `disallowedReferences` specifies a list of
|
The optional attribute `disallowedReferences` specifies a list of
|
||||||
illegal references (dependencies) of the output of the builder. For
|
illegal references (dependencies) of the output of the builder. For
|
||||||
example,
|
example,
|
||||||
|
@ -42,7 +42,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
enforces that the output of a derivation cannot have a direct
|
enforces that the output of a derivation cannot have a direct
|
||||||
runtime dependencies on the derivation `foo`.
|
runtime dependencies on the derivation `foo`.
|
||||||
|
|
||||||
- `disallowedRequisites`
|
- `disallowedRequisites`\
|
||||||
This attribute is similar to `disallowedReferences`, but it
|
This attribute is similar to `disallowedReferences`, but it
|
||||||
specifies illegal requisites for the whole closure, so all the
|
specifies illegal requisites for the whole closure, so all the
|
||||||
dependencies recursively. For example,
|
dependencies recursively. For example,
|
||||||
|
@ -55,7 +55,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
dependency on `foobar` or any other derivation depending recursively
|
dependency on `foobar` or any other derivation depending recursively
|
||||||
on `foobar`.
|
on `foobar`.
|
||||||
|
|
||||||
- `exportReferencesGraph`
|
- `exportReferencesGraph`\
|
||||||
This attribute allows builders access to the references graph of
|
This attribute allows builders access to the references graph of
|
||||||
their inputs. The attribute is a list of inputs in the Nix store
|
their inputs. The attribute is a list of inputs in the Nix store
|
||||||
whose references graph the builder needs to know. The value of
|
whose references graph the builder needs to know. The value of
|
||||||
|
@ -84,7 +84,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
with a Nix store containing the closure of a bootable NixOS
|
with a Nix store containing the closure of a bootable NixOS
|
||||||
configuration).
|
configuration).
|
||||||
|
|
||||||
- `impureEnvVars`
|
- `impureEnvVars`\
|
||||||
This attribute allows you to specify a list of environment variables
|
This attribute allows you to specify a list of environment variables
|
||||||
that should be passed from the environment of the calling user to
|
that should be passed from the environment of the calling user to
|
||||||
the builder. Usually, the environment is cleared completely when the
|
the builder. Usually, the environment is cleared completely when the
|
||||||
|
@ -112,7 +112,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
> environmental variables come from the environment of the
|
> environmental variables come from the environment of the
|
||||||
> `nix-build`.
|
> `nix-build`.
|
||||||
|
|
||||||
- `outputHash`; `outputHashAlgo`; `outputHashMode`
|
- `outputHash`; `outputHashAlgo`; `outputHashMode`\
|
||||||
These attributes declare that the derivation is a so-called
|
These attributes declare that the derivation is a so-called
|
||||||
*fixed-output derivation*, which means that a cryptographic hash of
|
*fixed-output derivation*, which means that a cryptographic hash of
|
||||||
the output is already known in advance. When the build of a
|
the output is already known in advance. When the build of a
|
||||||
|
@ -188,7 +188,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
The `outputHashMode` attribute determines how the hash is computed.
|
The `outputHashMode` attribute determines how the hash is computed.
|
||||||
It must be one of the following two values:
|
It must be one of the following two values:
|
||||||
|
|
||||||
- `"flat"`
|
- `"flat"`\
|
||||||
The output must be a non-executable regular file. If it isn’t,
|
The output must be a non-executable regular file. If it isn’t,
|
||||||
the build fails. The hash is simply computed over the contents
|
the build fails. The hash is simply computed over the contents
|
||||||
of that file (so it’s equal to what Unix commands like
|
of that file (so it’s equal to what Unix commands like
|
||||||
|
@ -196,7 +196,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
|
|
||||||
This is the default.
|
This is the default.
|
||||||
|
|
||||||
- `"recursive"`
|
- `"recursive"`\
|
||||||
The hash is computed over the NAR archive dump of the output
|
The hash is computed over the NAR archive dump of the output
|
||||||
(i.e., the result of [`nix-store
|
(i.e., the result of [`nix-store
|
||||||
--dump`](../command-ref/nix-store.md#operation---dump)). In
|
--dump`](../command-ref/nix-store.md#operation---dump)). In
|
||||||
|
@ -208,7 +208,15 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
[`nix-hash` command](../command-ref/nix-hash.md) for information
|
[`nix-hash` command](../command-ref/nix-hash.md) for information
|
||||||
about converting to and from base-32 notation.)
|
about converting to and from base-32 notation.)
|
||||||
|
|
||||||
- `passAsFile`
|
- `__contentAddressed`
|
||||||
|
If this **experimental** attribute is set to true, then the derivation
|
||||||
|
outputs will be stored in a content-addressed location rather than the
|
||||||
|
traditional input-addressed one.
|
||||||
|
This only has an effect if the `ca-derivation` experimental feature is enabled.
|
||||||
|
|
||||||
|
Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
|
||||||
|
|
||||||
|
- `passAsFile`\
|
||||||
A list of names of attributes that should be passed via files rather
|
A list of names of attributes that should be passed via files rather
|
||||||
than environment variables. For example, if you have
|
than environment variables. For example, if you have
|
||||||
|
|
||||||
|
@ -226,7 +234,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
builder, since most operating systems impose a limit on the size
|
builder, since most operating systems impose a limit on the size
|
||||||
of the environment (typically, a few hundred kilobyte).
|
of the environment (typically, a few hundred kilobyte).
|
||||||
|
|
||||||
- `preferLocalBuild`
|
- `preferLocalBuild`\
|
||||||
If this attribute is set to `true` and [distributed building is
|
If this attribute is set to `true` and [distributed building is
|
||||||
enabled](../advanced-topics/distributed-builds.md), then, if
|
enabled](../advanced-topics/distributed-builds.md), then, if
|
||||||
possible, the derivaton will be built locally instead of forwarded
|
possible, the derivaton will be built locally instead of forwarded
|
||||||
|
@ -234,7 +242,7 @@ Derivations can declare some infrequently used optional attributes.
|
||||||
where the cost of doing a download or remote build would exceed
|
where the cost of doing a download or remote build would exceed
|
||||||
the cost of building locally.
|
the cost of building locally.
|
||||||
|
|
||||||
- `allowSubstitutes`
|
- `allowSubstitutes`\
|
||||||
If this attribute is set to `false`, then Nix will always build this
|
If this attribute is set to `false`, then Nix will always build this
|
||||||
derivation; it will not try to substitute its outputs. This is
|
derivation; it will not try to substitute its outputs. This is
|
||||||
useful for very trivial derivations (such as `writeText` in Nixpkgs)
|
useful for very trivial derivations (such as `writeText` in Nixpkgs)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
Here are the constants built into the Nix expression evaluator:
|
Here are the constants built into the Nix expression evaluator:
|
||||||
|
|
||||||
- `builtins`
|
- `builtins`\
|
||||||
The set `builtins` contains all the built-in functions and values.
|
The set `builtins` contains all the built-in functions and values.
|
||||||
You can use `builtins` to test for the availability of features in
|
You can use `builtins` to test for the availability of features in
|
||||||
the Nix installation, e.g.,
|
the Nix installation, e.g.,
|
||||||
|
@ -14,7 +14,7 @@ Here are the constants built into the Nix expression evaluator:
|
||||||
This allows a Nix expression to fall back gracefully on older Nix
|
This allows a Nix expression to fall back gracefully on older Nix
|
||||||
installations that don’t have the desired built-in function.
|
installations that don’t have the desired built-in function.
|
||||||
|
|
||||||
- `builtins.currentSystem`
|
- `builtins.currentSystem`\
|
||||||
The built-in value `currentSystem` evaluates to the Nix platform
|
The built-in value `currentSystem` evaluates to the Nix platform
|
||||||
identifier for the Nix installation on which the expression is being
|
identifier for the Nix installation on which the expression is being
|
||||||
evaluated, such as `"i686-linux"` or `"x86_64-darwin"`.
|
evaluated, such as `"i686-linux"` or `"x86_64-darwin"`.
|
||||||
|
|
|
@ -9,7 +9,7 @@ scope. Instead, you can access them through the `builtins` built-in
|
||||||
value, which is a set that contains all built-in functions and values.
|
value, which is a set that contains all built-in functions and values.
|
||||||
For instance, `derivation` is also available as `builtins.derivation`.
|
For instance, `derivation` is also available as `builtins.derivation`.
|
||||||
|
|
||||||
- `derivation` *attrs*; `builtins.derivation` *attrs*
|
- `derivation` *attrs*; `builtins.derivation` *attrs*\
|
||||||
|
|
||||||
`derivation` is described in [its own section](derivations.md).
|
`derivation` is described in [its own section](derivations.md).
|
||||||
|
|
||||||
|
|
|
@ -1,48 +1,48 @@
|
||||||
# Glossary
|
# Glossary
|
||||||
|
|
||||||
- derivation
|
- derivation\
|
||||||
A description of a build action. The result of a derivation is a
|
A description of a build action. The result of a derivation is a
|
||||||
store object. Derivations are typically specified in Nix expressions
|
store object. Derivations are typically specified in Nix expressions
|
||||||
using the [`derivation` primitive](expressions/derivations.md). These are
|
using the [`derivation` primitive](expressions/derivations.md). These are
|
||||||
translated into low-level *store derivations* (implicitly by
|
translated into low-level *store derivations* (implicitly by
|
||||||
`nix-env` and `nix-build`, or explicitly by `nix-instantiate`).
|
`nix-env` and `nix-build`, or explicitly by `nix-instantiate`).
|
||||||
|
|
||||||
- store
|
- store\
|
||||||
The location in the file system where store objects live. Typically
|
The location in the file system where store objects live. Typically
|
||||||
`/nix/store`.
|
`/nix/store`.
|
||||||
|
|
||||||
- store path
|
- store path\
|
||||||
The location in the file system of a store object, i.e., an
|
The location in the file system of a store object, i.e., an
|
||||||
immediate child of the Nix store directory.
|
immediate child of the Nix store directory.
|
||||||
|
|
||||||
- store object
|
- store object\
|
||||||
A file that is an immediate child of the Nix store directory. These
|
A file that is an immediate child of the Nix store directory. These
|
||||||
can be regular files, but also entire directory trees. Store objects
|
can be regular files, but also entire directory trees. Store objects
|
||||||
can be sources (objects copied from outside of the store),
|
can be sources (objects copied from outside of the store),
|
||||||
derivation outputs (objects produced by running a build action), or
|
derivation outputs (objects produced by running a build action), or
|
||||||
derivations (files describing a build action).
|
derivations (files describing a build action).
|
||||||
|
|
||||||
- substitute
|
- substitute\
|
||||||
A substitute is a command invocation stored in the Nix database that
|
A substitute is a command invocation stored in the Nix database that
|
||||||
describes how to build a store object, bypassing the normal build
|
describes how to build a store object, bypassing the normal build
|
||||||
mechanism (i.e., derivations). Typically, the substitute builds the
|
mechanism (i.e., derivations). Typically, the substitute builds the
|
||||||
store object by downloading a pre-built version of the store object
|
store object by downloading a pre-built version of the store object
|
||||||
from some server.
|
from some server.
|
||||||
|
|
||||||
- purity
|
- purity\
|
||||||
The assumption that equal Nix derivations when run always produce
|
The assumption that equal Nix derivations when run always produce
|
||||||
the same output. This cannot be guaranteed in general (e.g., a
|
the same output. This cannot be guaranteed in general (e.g., a
|
||||||
builder can rely on external inputs such as the network or the
|
builder can rely on external inputs such as the network or the
|
||||||
system time) but the Nix model assumes it.
|
system time) but the Nix model assumes it.
|
||||||
|
|
||||||
- Nix expression
|
- Nix expression\
|
||||||
A high-level description of software packages and compositions
|
A high-level description of software packages and compositions
|
||||||
thereof. Deploying software using Nix entails writing Nix
|
thereof. Deploying software using Nix entails writing Nix
|
||||||
expressions for your packages. Nix expressions are translated to
|
expressions for your packages. Nix expressions are translated to
|
||||||
derivations that are stored in the Nix store. These derivations can
|
derivations that are stored in the Nix store. These derivations can
|
||||||
then be built.
|
then be built.
|
||||||
|
|
||||||
- reference
|
- reference\
|
||||||
A store path `P` is said to have a reference to a store path `Q` if
|
A store path `P` is said to have a reference to a store path `Q` if
|
||||||
the store object at `P` contains the path `Q` somewhere. The
|
the store object at `P` contains the path `Q` somewhere. The
|
||||||
*references* of a store path are the set of store paths to which it
|
*references* of a store path are the set of store paths to which it
|
||||||
|
@ -52,11 +52,11 @@
|
||||||
output paths), whereas an output path only references other output
|
output paths), whereas an output path only references other output
|
||||||
paths.
|
paths.
|
||||||
|
|
||||||
- reachable
|
- reachable\
|
||||||
A store path `Q` is reachable from another store path `P` if `Q`
|
A store path `Q` is reachable from another store path `P` if `Q`
|
||||||
is in the *closure* of the *references* relation.
|
is in the *closure* of the *references* relation.
|
||||||
|
|
||||||
- closure
|
- closure\
|
||||||
The closure of a store path is the set of store paths that are
|
The closure of a store path is the set of store paths that are
|
||||||
directly or indirectly “reachable” from that store path; that is,
|
directly or indirectly “reachable” from that store path; that is,
|
||||||
it’s the closure of the path under the *references* relation. For
|
it’s the closure of the path under the *references* relation. For
|
||||||
|
@ -71,29 +71,29 @@
|
||||||
to path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
|
to path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
|
||||||
references `R` then `R` is also in the closure of `P`.
|
references `R` then `R` is also in the closure of `P`.
|
||||||
|
|
||||||
- output path
|
- output path\
|
||||||
A store path produced by a derivation.
|
A store path produced by a derivation.
|
||||||
|
|
||||||
- deriver
|
- deriver\
|
||||||
The deriver of an *output path* is the store
|
The deriver of an *output path* is the store
|
||||||
derivation that built it.
|
derivation that built it.
|
||||||
|
|
||||||
- validity
|
- validity\
|
||||||
A store path is considered *valid* if it exists in the file system,
|
A store path is considered *valid* if it exists in the file system,
|
||||||
is listed in the Nix database as being valid, and if all paths in
|
is listed in the Nix database as being valid, and if all paths in
|
||||||
its closure are also valid.
|
its closure are also valid.
|
||||||
|
|
||||||
- user environment
|
- user environment\
|
||||||
An automatically generated store object that consists of a set of
|
An automatically generated store object that consists of a set of
|
||||||
symlinks to “active” applications, i.e., other store paths. These
|
symlinks to “active” applications, i.e., other store paths. These
|
||||||
are generated automatically by
|
are generated automatically by
|
||||||
[`nix-env`](command-ref/nix-env.md). See *profiles*.
|
[`nix-env`](command-ref/nix-env.md). See *profiles*.
|
||||||
|
|
||||||
- profile
|
- profile\
|
||||||
A symlink to the current *user environment* of a user, e.g.,
|
A symlink to the current *user environment* of a user, e.g.,
|
||||||
`/nix/var/nix/profiles/default`.
|
`/nix/var/nix/profiles/default`.
|
||||||
|
|
||||||
- NAR
|
- NAR\
|
||||||
A *N*ix *AR*chive. This is a serialisation of a path in the Nix
|
A *N*ix *AR*chive. This is a serialisation of a path in the Nix
|
||||||
store. It can contain regular files, directories and symbolic
|
store. It can contain regular files, directories and symbolic
|
||||||
links. NARs are generated and unpacked using `nix-store --dump`
|
links. NARs are generated and unpacked using `nix-store --dump`
|
||||||
|
|
|
@ -1,18 +1,26 @@
|
||||||
# Installing a Binary Distribution
|
# Installing a Binary Distribution
|
||||||
|
|
||||||
If you are using Linux or macOS versions up to 10.14 (Mojave), the
|
The easiest way to install Nix is to run the following command:
|
||||||
easiest way to install Nix is to run the following command:
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ sh <(curl -L https://nixos.org/nix/install)
|
$ sh <(curl -L https://nixos.org/nix/install)
|
||||||
```
|
```
|
||||||
|
|
||||||
If you're using macOS 10.15 (Catalina) or newer, consult [the macOS
|
This will run the installer interactively (causing it to explain what
|
||||||
installation instructions](#macos-installation) before installing.
|
it is doing more explicitly), and perform the default "type" of install
|
||||||
|
for your platform:
|
||||||
|
- single-user on Linux
|
||||||
|
- multi-user on macOS
|
||||||
|
|
||||||
As of Nix 2.1.0, the Nix installer will always default to creating a
|
> **Notes on read-only filesystem root in macOS 10.15 Catalina +**
|
||||||
single-user installation, however opting in to the multi-user
|
>
|
||||||
installation is highly recommended.
|
> - It took some time to support this cleanly. You may see posts,
|
||||||
|
> examples, and tutorials using obsolete workarounds.
|
||||||
|
> - Supporting it cleanly made macOS installs too complex to qualify
|
||||||
|
> as single-user, so this type is no longer supported on macOS.
|
||||||
|
|
||||||
|
We recommend the multi-user install if it supports your platform and
|
||||||
|
you can authenticate with `sudo`.
|
||||||
|
|
||||||
# Single User Installation
|
# Single User Installation
|
||||||
|
|
||||||
|
@ -50,9 +58,9 @@ $ rm -rf /nix
|
||||||
The multi-user Nix installation creates system users, and a system
|
The multi-user Nix installation creates system users, and a system
|
||||||
service for the Nix daemon.
|
service for the Nix daemon.
|
||||||
|
|
||||||
- Linux running systemd, with SELinux disabled
|
**Supported Systems**
|
||||||
|
- Linux running systemd, with SELinux disabled
|
||||||
- macOS
|
- macOS
|
||||||
|
|
||||||
You can instruct the installer to perform a multi-user installation on
|
You can instruct the installer to perform a multi-user installation on
|
||||||
your system:
|
your system:
|
||||||
|
@ -96,165 +104,28 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||||
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
|
There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
|
||||||
and `/etc/zshrc` which you may remove.
|
and `/etc/zshrc` which you may remove.
|
||||||
|
|
||||||
# macOS Installation
|
# macOS Installation <a name="sect-macos-installation-change-store-prefix"></a><a name="sect-macos-installation-encrypted-volume"></a><a name="sect-macos-installation-symlink"></a><a name="sect-macos-installation-recommended-notes"></a>
|
||||||
|
<!-- Note: anchors above to catch permalinks to old explanations -->
|
||||||
|
|
||||||
Starting with macOS 10.15 (Catalina), the root filesystem is read-only.
|
We believe we have ironed out how to cleanly support the read-only root
|
||||||
This means `/nix` can no longer live on your system volume, and that
|
on modern macOS. New installs will do this automatically, and you can
|
||||||
you'll need a workaround to install Nix.
|
also re-run a new installer to convert your existing setup.
|
||||||
|
|
||||||
The recommended approach, which creates an unencrypted APFS volume for
|
This section previously detailed the situation, options, and trade-offs,
|
||||||
your Nix store and a "synthetic" empty directory to mount it over at
|
but it now only outlines what the installer does. You don't need to know
|
||||||
`/nix`, is least likely to impair Nix or your system.
|
this to run the installer, but it may help if you run into trouble:
|
||||||
|
|
||||||
> **Note**
|
- create a new APFS volume for your Nix store
|
||||||
>
|
- update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
|
||||||
> With all separate-volume approaches, it's possible something on your
|
empty root directory to mount your volume
|
||||||
> system (particularly daemons/services and restored apps) may need
|
- specify mount options for the volume in `/etc/fstab`
|
||||||
> access to your Nix store before the volume is mounted. Adding
|
- if you have FileVault enabled
|
||||||
> additional encryption makes this more likely.
|
- generate an encryption password
|
||||||
|
- put it in your system Keychain
|
||||||
If you're using a recent Mac with a [T2
|
- use it to encrypt the volume
|
||||||
chip](https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf),
|
- create a system LaunchDaemon to mount this volume early enough in the
|
||||||
your drive will still be encrypted at rest (in which case "unencrypted"
|
boot process to avoid problems loading or restoring any programs that
|
||||||
is a bit of a misnomer). To use this approach, just install Nix with:
|
need access to your Nix store
|
||||||
|
|
||||||
```console
|
|
||||||
$ sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume
|
|
||||||
```
|
|
||||||
|
|
||||||
If you don't like the sound of this, you'll want to weigh the other
|
|
||||||
approaches and tradeoffs detailed in this section.
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> All of the known workarounds have drawbacks, but we hope better
|
|
||||||
> solutions will be available in the future. Some that we have our eye
|
|
||||||
> on are:
|
|
||||||
>
|
|
||||||
> 1. A true firmlink would enable the Nix store to live on the primary
|
|
||||||
> data volume without the build problems caused by the symlink
|
|
||||||
> approach. End users cannot currently create true firmlinks.
|
|
||||||
>
|
|
||||||
> 2. If the Nix store volume shared FileVault encryption with the
|
|
||||||
> primary data volume (probably by using the same volume group and
|
|
||||||
> role), FileVault encryption could be easily supported by the
|
|
||||||
> installer without requiring manual setup by each user.
|
|
||||||
|
|
||||||
## Change the Nix store path prefix
|
|
||||||
|
|
||||||
Changing the default prefix for the Nix store is a simple approach which
|
|
||||||
enables you to leave it on your root volume, where it can take full
|
|
||||||
advantage of FileVault encryption if enabled. Unfortunately, this
|
|
||||||
approach also opts your device out of some benefits that are enabled by
|
|
||||||
using the same prefix across systems:
|
|
||||||
|
|
||||||
- Your system won't be able to take advantage of the binary cache
|
|
||||||
(unless someone is able to stand up and support duplicate caching
|
|
||||||
infrastructure), which means you'll spend more time waiting for
|
|
||||||
builds.
|
|
||||||
|
|
||||||
- It's harder to build and deploy packages to Linux systems.
|
|
||||||
|
|
||||||
It would also possible (and often requested) to just apply this change
|
|
||||||
ecosystem-wide, but it's an intrusive process that has side effects we
|
|
||||||
want to avoid for now.
|
|
||||||
|
|
||||||
## Use a separate encrypted volume
|
|
||||||
|
|
||||||
If you like, you can also add encryption to the recommended approach
|
|
||||||
taken by the installer. You can do this by pre-creating an encrypted
|
|
||||||
volume before you run the installer--or you can run the installer and
|
|
||||||
encrypt the volume it creates later.
|
|
||||||
|
|
||||||
In either case, adding encryption to a second volume isn't quite as
|
|
||||||
simple as enabling FileVault for your boot volume. Before you dive in,
|
|
||||||
there are a few things to weigh:
|
|
||||||
|
|
||||||
1. The additional volume won't be encrypted with your existing
|
|
||||||
FileVault key, so you'll need another mechanism to decrypt the
|
|
||||||
volume.
|
|
||||||
|
|
||||||
2. You can store the password in Keychain to automatically decrypt the
|
|
||||||
volume on boot--but it'll have to wait on Keychain and may not mount
|
|
||||||
before your GUI apps restore. If any of your launchd agents or apps
|
|
||||||
depend on Nix-installed software (for example, if you use a
|
|
||||||
Nix-installed login shell), the restore may fail or break.
|
|
||||||
|
|
||||||
On a case-by-case basis, you may be able to work around this problem
|
|
||||||
by using `wait4path` to block execution until your executable is
|
|
||||||
available.
|
|
||||||
|
|
||||||
It's also possible to decrypt and mount the volume earlier with a
|
|
||||||
login hook--but this mechanism appears to be deprecated and its
|
|
||||||
future is unclear.
|
|
||||||
|
|
||||||
3. You can hard-code the password in the clear, so that your store
|
|
||||||
volume can be decrypted before Keychain is available.
|
|
||||||
|
|
||||||
If you are comfortable navigating these tradeoffs, you can encrypt the
|
|
||||||
volume with something along the lines of:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ diskutil apfs enableFileVault /nix -user disk
|
|
||||||
```
|
|
||||||
|
|
||||||
## Symlink the Nix store to a custom location
|
|
||||||
|
|
||||||
Another simple approach is using `/etc/synthetic.conf` to symlink the
|
|
||||||
Nix store to the data volume. This option also enables your store to
|
|
||||||
share any configured FileVault encryption. Unfortunately, builds that
|
|
||||||
resolve the symlink may leak the canonical path or even fail.
|
|
||||||
|
|
||||||
Because of these downsides, we can't recommend this approach.
|
|
||||||
|
|
||||||
## Notes on the recommended approach
|
|
||||||
|
|
||||||
This section goes into a little more detail on the recommended approach.
|
|
||||||
You don't need to understand it to run the installer, but it can serve
|
|
||||||
as a helpful reference if you run into trouble.
|
|
||||||
|
|
||||||
1. In order to compose user-writable locations into the new read-only
|
|
||||||
system root, Apple introduced a new concept called `firmlinks`,
|
|
||||||
which it describes as a "bi-directional wormhole" between two
|
|
||||||
filesystems. You can see the current firmlinks in
|
|
||||||
`/usr/share/firmlinks`. Unfortunately, firmlinks aren't (currently?)
|
|
||||||
user-configurable.
|
|
||||||
|
|
||||||
For special cases like NFS mount points or package manager roots,
|
|
||||||
[synthetic.conf(5)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html)
|
|
||||||
supports limited user-controlled file-creation (of symlinks, and
|
|
||||||
synthetic empty directories) at `/`. To create a synthetic empty
|
|
||||||
directory for mounting at `/nix`, add the following line to
|
|
||||||
`/etc/synthetic.conf` (create it if necessary):
|
|
||||||
|
|
||||||
nix
|
|
||||||
|
|
||||||
2. This configuration is applied at boot time, but you can use
|
|
||||||
`apfs.util` to trigger creation (not deletion) of new entries
|
|
||||||
without a reboot:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Create the new APFS volume with diskutil:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
|
|
||||||
already have other entries, it should look something like:
|
|
||||||
|
|
||||||
#
|
|
||||||
# Warning - this file should only be modified with vifs(8)
|
|
||||||
#
|
|
||||||
# Failure to do so is unsupported and may be destructive.
|
|
||||||
#
|
|
||||||
LABEL=Nix\040Store /nix apfs rw,nobrowse
|
|
||||||
|
|
||||||
The nobrowse setting will keep Spotlight from indexing this volume,
|
|
||||||
and keep it from showing up on your desktop.
|
|
||||||
|
|
||||||
# Installing a pinned Nix version from a URL
|
# Installing a pinned Nix version from a URL
|
||||||
|
|
||||||
|
|
|
@ -7,17 +7,17 @@ cache mechanism that Nix usually uses to fetch prebuilt binaries from
|
||||||
|
|
||||||
The following options can be specified as URL parameters to the S3 URL:
|
The following options can be specified as URL parameters to the S3 URL:
|
||||||
|
|
||||||
- `profile`
|
- `profile`\
|
||||||
The name of the AWS configuration profile to use. By default Nix
|
The name of the AWS configuration profile to use. By default Nix
|
||||||
will use the `default` profile.
|
will use the `default` profile.
|
||||||
|
|
||||||
- `region`
|
- `region`\
|
||||||
The region of the S3 bucket. `us–east-1` by default.
|
The region of the S3 bucket. `us–east-1` by default.
|
||||||
|
|
||||||
If your bucket is not in `us–east-1`, you should always explicitly
|
If your bucket is not in `us–east-1`, you should always explicitly
|
||||||
specify the region parameter.
|
specify the region parameter.
|
||||||
|
|
||||||
- `endpoint`
|
- `endpoint`\
|
||||||
The URL to your S3-compatible service, for when not using Amazon S3.
|
The URL to your S3-compatible service, for when not using Amazon S3.
|
||||||
Do not specify this value if you're using Amazon S3.
|
Do not specify this value if you're using Amazon S3.
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ The following options can be specified as URL parameters to the S3 URL:
|
||||||
> This endpoint must support HTTPS and will use path-based
|
> This endpoint must support HTTPS and will use path-based
|
||||||
> addressing instead of virtual host based addressing.
|
> addressing instead of virtual host based addressing.
|
||||||
|
|
||||||
- `scheme`
|
- `scheme`\
|
||||||
The scheme used for S3 requests, `https` (default) or `http`. This
|
The scheme used for S3 requests, `https` (default) or `http`. This
|
||||||
option allows you to disable HTTPS for binary caches which don't
|
option allows you to disable HTTPS for binary caches which don't
|
||||||
support it.
|
support it.
|
||||||
|
|
26
flake.lock
26
flake.lock
|
@ -1,22 +1,40 @@
|
||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
|
"lowdown-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1617481909,
|
||||||
|
"narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=",
|
||||||
|
"owner": "kristapsdz",
|
||||||
|
"repo": "lowdown",
|
||||||
|
"rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "kristapsdz",
|
||||||
|
"ref": "VERSION_0_8_4",
|
||||||
|
"repo": "lowdown",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1614309161,
|
"lastModified": 1622593737,
|
||||||
"narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=",
|
"narHash": "sha256-9loxFJg85AbzJrSkU4pE/divZ1+zOxDy2FSjlrufCB8=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "0e499fde7af3c28d63e9b13636716b86c3162b93",
|
"rev": "bb8a5e54845012ed1375ffd5f317d2fdf434b20e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"id": "nixpkgs",
|
"id": "nixpkgs",
|
||||||
"ref": "nixos-20.09-small",
|
"ref": "nixos-21.05-small",
|
||||||
"type": "indirect"
|
"type": "indirect"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
|
"lowdown-src": "lowdown-src",
|
||||||
"nixpkgs": "nixpkgs"
|
"nixpkgs": "nixpkgs"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
77
flake.nix
77
flake.nix
|
@ -1,10 +1,10 @@
|
||||||
{
|
{
|
||||||
description = "The purely functional package manager";
|
description = "The purely functional package manager";
|
||||||
|
|
||||||
inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small";
|
inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
|
||||||
#inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
|
inputs.lowdown-src = { url = "github:kristapsdz/lowdown/VERSION_0_8_4"; flake = false; };
|
||||||
|
|
||||||
outputs = { self, nixpkgs }:
|
outputs = { self, nixpkgs, lowdown-src }:
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
|
|
||||||
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
|
linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
|
||||||
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
|
linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
|
||||||
systems = linuxSystems ++ [ "x86_64-darwin" ];
|
systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
|
||||||
|
|
||||||
crossSystems = [ "armv6l-linux" "armv7l-linux" ];
|
crossSystems = [ "armv6l-linux" "armv7l-linux" ];
|
||||||
|
|
||||||
|
@ -80,11 +80,12 @@
|
||||||
buildPackages.git
|
buildPackages.git
|
||||||
buildPackages.mercurial
|
buildPackages.mercurial
|
||||||
buildPackages.jq
|
buildPackages.jq
|
||||||
] ++ lib.optional stdenv.hostPlatform.isLinux buildPackages.utillinuxMinimal;
|
]
|
||||||
|
++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
|
||||||
|
|
||||||
buildDeps =
|
buildDeps =
|
||||||
[ curl
|
[ curl
|
||||||
bzip2 xz brotli zlib editline
|
bzip2 xz brotli editline
|
||||||
openssl sqlite
|
openssl sqlite
|
||||||
libarchive
|
libarchive
|
||||||
boost
|
boost
|
||||||
|
@ -92,7 +93,7 @@
|
||||||
lowdown
|
lowdown
|
||||||
gmock
|
gmock
|
||||||
]
|
]
|
||||||
++ lib.optional stdenv.isLinux libseccomp
|
++ lib.optionals stdenv.isLinux [libseccomp]
|
||||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
||||||
++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
|
++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
|
||||||
|
|
||||||
|
@ -146,12 +147,46 @@
|
||||||
echo "file installer $out/install" >> $out/nix-support/hydra-build-products
|
echo "file installer $out/install" >> $out/nix-support/hydra-build-products
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
testNixVersions = pkgs: client: daemon: with commonDeps pkgs; pkgs.stdenv.mkDerivation {
|
||||||
|
NIX_DAEMON_PACKAGE = daemon;
|
||||||
|
NIX_CLIENT_PACKAGE = client;
|
||||||
|
# Must keep this name short as OSX has a rather strict limit on the
|
||||||
|
# socket path length, and this name appears in the path of the
|
||||||
|
# nix-daemon socket used in the tests
|
||||||
|
name = "nix-tests";
|
||||||
|
inherit version;
|
||||||
|
|
||||||
|
src = self;
|
||||||
|
|
||||||
|
VERSION_SUFFIX = versionSuffix;
|
||||||
|
|
||||||
|
nativeBuildInputs = nativeBuildDeps;
|
||||||
|
buildInputs = buildDeps ++ awsDeps;
|
||||||
|
propagatedBuildInputs = propagatedDeps;
|
||||||
|
|
||||||
|
enableParallelBuilding = true;
|
||||||
|
|
||||||
|
dontBuild = true;
|
||||||
|
doInstallCheck = true;
|
||||||
|
|
||||||
|
installPhase = ''
|
||||||
|
mkdir -p $out
|
||||||
|
'';
|
||||||
|
installCheckPhase = "make installcheck";
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
in {
|
in {
|
||||||
|
|
||||||
# A Nixpkgs overlay that overrides the 'nix' and
|
# A Nixpkgs overlay that overrides the 'nix' and
|
||||||
# 'nix.perl-bindings' packages.
|
# 'nix.perl-bindings' packages.
|
||||||
overlay = final: prev: {
|
overlay = final: prev: {
|
||||||
|
|
||||||
|
# An older version of Nix to test against when using the daemon.
|
||||||
|
# Currently using `nixUnstable` as the stable one doesn't respect
|
||||||
|
# `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
|
||||||
|
nixStable = prev.nix;
|
||||||
|
|
||||||
nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
|
nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
|
||||||
name = "nix-${version}";
|
name = "nix-${version}";
|
||||||
inherit version;
|
inherit version;
|
||||||
|
@ -201,6 +236,8 @@
|
||||||
|
|
||||||
separateDebugInfo = true;
|
separateDebugInfo = true;
|
||||||
|
|
||||||
|
strictDeps = true;
|
||||||
|
|
||||||
passthru.perl-bindings = with final; stdenv.mkDerivation {
|
passthru.perl-bindings = with final; stdenv.mkDerivation {
|
||||||
name = "nix-perl-${version}";
|
name = "nix-perl-${version}";
|
||||||
|
|
||||||
|
@ -221,7 +258,8 @@
|
||||||
boost
|
boost
|
||||||
nlohmann_json
|
nlohmann_json
|
||||||
]
|
]
|
||||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium;
|
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
||||||
|
++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
|
||||||
|
|
||||||
configureFlags = ''
|
configureFlags = ''
|
||||||
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
|
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
|
||||||
|
@ -236,21 +274,23 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
lowdown = with final; stdenv.mkDerivation rec {
|
lowdown = with final; stdenv.mkDerivation rec {
|
||||||
name = "lowdown-0.8.0";
|
name = "lowdown-0.8.4";
|
||||||
|
|
||||||
|
/*
|
||||||
src = fetchurl {
|
src = fetchurl {
|
||||||
url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
|
url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
|
||||||
hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
|
hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
|
||||||
};
|
};
|
||||||
|
*/
|
||||||
|
|
||||||
#src = lowdown-src;
|
src = lowdown-src;
|
||||||
|
|
||||||
outputs = [ "out" "bin" "dev" ];
|
outputs = [ "out" "bin" "dev" ];
|
||||||
|
|
||||||
nativeBuildInputs = [ buildPackages.which ];
|
nativeBuildInputs = [ buildPackages.which ];
|
||||||
|
|
||||||
configurePhase =
|
configurePhase = ''
|
||||||
''
|
${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
|
||||||
./configure \
|
./configure \
|
||||||
PREFIX=${placeholder "dev"} \
|
PREFIX=${placeholder "dev"} \
|
||||||
BINDIR=${placeholder "bin"}/bin
|
BINDIR=${placeholder "bin"}/bin
|
||||||
|
@ -353,7 +393,7 @@
|
||||||
# to https://nixos.org/nix/install. It downloads the binary
|
# to https://nixos.org/nix/install. It downloads the binary
|
||||||
# tarball for the user's system and calls the second half of the
|
# tarball for the user's system and calls the second half of the
|
||||||
# installation script.
|
# installation script.
|
||||||
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
|
installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ];
|
||||||
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
|
installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
|
||||||
|
|
||||||
# Line coverage analysis.
|
# Line coverage analysis.
|
||||||
|
@ -439,6 +479,15 @@
|
||||||
checks = forAllSystems (system: {
|
checks = forAllSystems (system: {
|
||||||
binaryTarball = self.hydraJobs.binaryTarball.${system};
|
binaryTarball = self.hydraJobs.binaryTarball.${system};
|
||||||
perlBindings = self.hydraJobs.perlBindings.${system};
|
perlBindings = self.hydraJobs.perlBindings.${system};
|
||||||
|
installTests =
|
||||||
|
let pkgs = nixpkgsFor.${system}; in
|
||||||
|
pkgs.runCommand "install-tests" {
|
||||||
|
againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
|
||||||
|
againstCurrentUnstable = testNixVersions pkgs pkgs.nix pkgs.nixUnstable;
|
||||||
|
# Disabled because the latest stable version doesn't handle
|
||||||
|
# `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
|
||||||
|
# againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
|
||||||
|
} "touch $out";
|
||||||
});
|
});
|
||||||
|
|
||||||
packages = forAllSystems (system: {
|
packages = forAllSystems (system: {
|
||||||
|
@ -479,6 +528,8 @@
|
||||||
installCheckFlags = "sysconfdir=$(out)/etc";
|
installCheckFlags = "sysconfdir=$(out)/etc";
|
||||||
|
|
||||||
stripAllList = ["bin"];
|
stripAllList = ["bin"];
|
||||||
|
|
||||||
|
strictDeps = true;
|
||||||
};
|
};
|
||||||
} // builtins.listToAttrs (map (crossSystem: {
|
} // builtins.listToAttrs (map (crossSystem: {
|
||||||
name = "nix-${crossSystem}";
|
name = "nix-${crossSystem}";
|
||||||
|
|
|
@ -133,20 +133,8 @@ for my $fn (glob "$tmpDir/*") {
|
||||||
|
|
||||||
exit if $version =~ /pre/;
|
exit if $version =~ /pre/;
|
||||||
|
|
||||||
# Update Nixpkgs in a very hacky way.
|
# Update nix-fallback-paths.nix.
|
||||||
system("cd $nixpkgsDir && git pull") == 0 or die;
|
system("cd $nixpkgsDir && git pull") == 0 or die;
|
||||||
my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName;
|
|
||||||
my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash;
|
|
||||||
print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
|
|
||||||
|
|
||||||
my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
|
|
||||||
my $oldFile = read_file($fn);
|
|
||||||
$oldFile =~ s/$oldName/"$releaseName"/g;
|
|
||||||
$oldFile =~ s/$oldHash/"$tarballHash"/g;
|
|
||||||
write_file($fn, $oldFile);
|
|
||||||
|
|
||||||
$oldName =~ s/nix-//g;
|
|
||||||
$oldName =~ s/"//g;
|
|
||||||
|
|
||||||
sub getStorePath {
|
sub getStorePath {
|
||||||
my ($jobName) = @_;
|
my ($jobName) = @_;
|
||||||
|
@ -167,7 +155,7 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
|
||||||
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
|
" x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
|
||||||
"}\n");
|
"}\n");
|
||||||
|
|
||||||
system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
|
system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
|
||||||
|
|
||||||
# Update the "latest" symlink.
|
# Update the "latest" symlink.
|
||||||
$channelsBucket->add_key(
|
$channelsBucket->add_key(
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<array>
|
<array>
|
||||||
<string>/bin/sh</string>
|
<string>/bin/sh</string>
|
||||||
<string>-c</string>
|
<string>-c</string>
|
||||||
<string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon && /nix/var/nix/profiles/default/bin/nix-daemon</string>
|
<string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon && exec /nix/var/nix/profiles/default/bin/nix-daemon</string>
|
||||||
</array>
|
</array>
|
||||||
<key>StandardErrorPath</key>
|
<key>StandardErrorPath</key>
|
||||||
<string>/var/log/nix-daemon.log</string>
|
<string>/var/log/nix-daemon.log</string>
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
#compdef nix
|
||||||
|
|
||||||
function _nix() {
|
function _nix() {
|
||||||
local ifs_bk="$IFS"
|
local ifs_bk="$IFS"
|
||||||
local input=("${(Q)words[@]}")
|
local input=("${(Q)words[@]}")
|
||||||
|
@ -18,4 +20,4 @@ function _nix() {
|
||||||
_describe 'nix' suggestions
|
_describe 'nix' suggestions
|
||||||
}
|
}
|
||||||
|
|
||||||
compdef _nix nix
|
_nix "$@"
|
||||||
|
|
1
misc/zsh/local.mk
Normal file
1
misc/zsh/local.mk
Normal file
|
@ -0,0 +1 @@
|
||||||
|
$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644))
|
|
@ -8,8 +8,13 @@ endif
|
||||||
|
|
||||||
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
|
libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
|
||||||
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
|
libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
|
||||||
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl
|
libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
|
||||||
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl
|
libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
|
||||||
|
|
||||||
|
ifeq ($(OS), Linux)
|
||||||
|
libnixrust_LDFLAGS_USE += -ldl
|
||||||
|
libnixrust_LDFLAGS_USE_INSTALLED += -ldl
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(OS), Darwin)
|
ifeq ($(OS), Darwin)
|
||||||
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
|
libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
|
||||||
|
|
46
scripts/bigsur-nixbld-user-migration.sh
Executable file
46
scripts/bigsur-nixbld-user-migration.sh
Executable file
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
((NEW_NIX_FIRST_BUILD_UID=301))
|
||||||
|
|
||||||
|
id_available(){
|
||||||
|
dscl . list /Users UniqueID | grep -E '\b'$1'\b' >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
change_nixbld_names_and_ids(){
|
||||||
|
local name uid next_id
|
||||||
|
((next_id=NEW_NIX_FIRST_BUILD_UID))
|
||||||
|
echo "Attempting to migrate nixbld users."
|
||||||
|
echo "Each user should change from nixbld# to _nixbld#"
|
||||||
|
echo "and their IDs relocated to $next_id+"
|
||||||
|
while read -r name uid; do
|
||||||
|
echo " Checking $name (uid: $uid)"
|
||||||
|
# iterate for a clean ID
|
||||||
|
while id_available "$next_id"; do
|
||||||
|
((next_id++))
|
||||||
|
if ((next_id >= 400)); then
|
||||||
|
echo "We've hit UID 400 without placing all of your users :("
|
||||||
|
echo "You should use the commands in this script as a starting"
|
||||||
|
echo "point to review your UID-space and manually move the"
|
||||||
|
echo "remaining users (or delete them, if you don't need them)."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ $name == _* ]]; then
|
||||||
|
echo " It looks like $name has already been renamed--skipping."
|
||||||
|
else
|
||||||
|
# first 3 are cleanup, it's OK if they aren't here
|
||||||
|
sudo dscl . delete /Users/$name dsAttrTypeNative:_writers_passwd &>/dev/null || true
|
||||||
|
sudo dscl . change /Users/$name NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true
|
||||||
|
# remove existing user from group
|
||||||
|
sudo dseditgroup -o edit -t user -d $name nixbld || true
|
||||||
|
sudo dscl . change /Users/$name UniqueID $uid $next_id
|
||||||
|
sudo dscl . change /Users/$name RecordName $name _$name
|
||||||
|
# add renamed user to group
|
||||||
|
sudo dseditgroup -o edit -t user -a _$name nixbld
|
||||||
|
echo " $name migrated to _$name (uid: $next_id)"
|
||||||
|
fi
|
||||||
|
done < <(dscl . list /Users UniqueID | grep nixbld | sort -n -k2)
|
||||||
|
}
|
||||||
|
|
||||||
|
change_nixbld_names_and_ids
|
|
@ -1,33 +1,262 @@
|
||||||
#!/bin/sh
|
#!/usr/bin/env bash
|
||||||
set -e
|
set -eu
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
root_disk() {
|
# I'm a little agnostic on the choices, but supporting a wide
|
||||||
diskutil info -plist /
|
# slate of uses for now, including:
|
||||||
}
|
# - import-only: `. create-darwin-volume.sh no-main[ ...]`
|
||||||
|
# - legacy: `./create-darwin-volume.sh` or `. create-darwin-volume.sh`
|
||||||
|
# (both will run main())
|
||||||
|
# - external alt-routine: `./create-darwin-volume.sh no-main func[ ...]`
|
||||||
|
if [ "${1-}" = "no-main" ]; then
|
||||||
|
shift
|
||||||
|
readonly _CREATE_VOLUME_NO_MAIN=1
|
||||||
|
else
|
||||||
|
readonly _CREATE_VOLUME_NO_MAIN=0
|
||||||
|
# declare some things we expect to inherit from install-multi-user
|
||||||
|
# I don't love this (because it's a bit of a kludge).
|
||||||
|
#
|
||||||
|
# CAUTION: (Dec 19 2020)
|
||||||
|
# This is a stopgap. It doesn't cover the full slate of
|
||||||
|
# identifiers we inherit--just those necessary to:
|
||||||
|
# - avoid breaking direct invocations of this script (here/now)
|
||||||
|
# - avoid hard-to-reverse structural changes before the call to rm
|
||||||
|
# single-user support is verified
|
||||||
|
#
|
||||||
|
# In the near-mid term, I (personally) think we should:
|
||||||
|
# - decide to deprecate the direct call and add a notice
|
||||||
|
# - fold all of this into install-darwin-multi-user.sh
|
||||||
|
# - intentionally remove the old direct-invocation form (kill the
|
||||||
|
# routine, replace this script w/ deprecation notice and a note
|
||||||
|
# on the remove-after date)
|
||||||
|
#
|
||||||
|
readonly NIX_ROOT="${NIX_ROOT:-/nix}"
|
||||||
|
|
||||||
# i.e., "disk1"
|
_sudo() {
|
||||||
|
shift # throw away the 'explanation'
|
||||||
|
/usr/bin/sudo "$@"
|
||||||
|
}
|
||||||
|
failure() {
|
||||||
|
if [ "$*" = "" ]; then
|
||||||
|
cat
|
||||||
|
else
|
||||||
|
echo "$@"
|
||||||
|
fi
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
task() {
|
||||||
|
echo "$@"
|
||||||
|
}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# usually "disk1"
|
||||||
root_disk_identifier() {
|
root_disk_identifier() {
|
||||||
diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
|
# For performance (~10ms vs 280ms) I'm parsing 'diskX' from stat output
|
||||||
|
# (~diskXsY)--but I'm retaining the more-semantic approach since
|
||||||
|
# it documents intent better.
|
||||||
|
# /usr/sbin/diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
|
||||||
|
#
|
||||||
|
local special_device
|
||||||
|
special_device="$(/usr/bin/stat -f "%Sd" /)"
|
||||||
|
echo "${special_device%s[0-9]*}"
|
||||||
}
|
}
|
||||||
|
|
||||||
find_nix_volume() {
|
# make it easy to play w/ 'Case-sensitive APFS'
|
||||||
diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true
|
readonly NIX_VOLUME_FS="${NIX_VOLUME_FS:-APFS}"
|
||||||
|
readonly NIX_VOLUME_LABEL="${NIX_VOLUME_LABEL:-Nix Store}"
|
||||||
|
# Strongly assuming we'll make a volume on the device / is on
|
||||||
|
# But you can override NIX_VOLUME_USE_DISK to create it on some other device
|
||||||
|
readonly NIX_VOLUME_USE_DISK="${NIX_VOLUME_USE_DISK:-$(root_disk_identifier)}"
|
||||||
|
NIX_VOLUME_USE_SPECIAL="${NIX_VOLUME_USE_SPECIAL:-}"
|
||||||
|
NIX_VOLUME_USE_UUID="${NIX_VOLUME_USE_UUID:-}"
|
||||||
|
readonly NIX_VOLUME_MOUNTD_DEST="${NIX_VOLUME_MOUNTD_DEST:-/Library/LaunchDaemons/org.nixos.darwin-store.plist}"
|
||||||
|
|
||||||
|
if /usr/bin/fdesetup isactive >/dev/null; then
|
||||||
|
test_filevault_in_use() { return 0; }
|
||||||
|
# no readonly; we may modify if user refuses from cure_volume
|
||||||
|
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-1}"
|
||||||
|
else
|
||||||
|
test_filevault_in_use() { return 1; }
|
||||||
|
NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-0}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
should_encrypt_volume() {
|
||||||
|
test_filevault_in_use && (( NIX_VOLUME_DO_ENCRYPT == 1 ))
|
||||||
|
}
|
||||||
|
|
||||||
|
substep() {
|
||||||
|
printf " %s\n" "" "- $1" "" "${@:2}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
volumes_labeled() {
|
||||||
|
local label="$1"
|
||||||
|
xsltproc --novalid --stringparam label "$label" - <(/usr/sbin/ioreg -ra -c "AppleAPFSVolume") <<'EOF'
|
||||||
|
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
|
||||||
|
<xsl:output method="text"/>
|
||||||
|
<xsl:template match="/">
|
||||||
|
<xsl:apply-templates select="/plist/array/dict/key[text()='IORegistryEntryName']/following-sibling::*[1][text()=$label]/.."/>
|
||||||
|
</xsl:template>
|
||||||
|
<xsl:template match="dict">
|
||||||
|
<xsl:apply-templates match="string" select="key[text()='BSD Name']/following-sibling::*[1]"/>
|
||||||
|
<xsl:text>=</xsl:text>
|
||||||
|
<xsl:apply-templates match="string" select="key[text()='UUID']/following-sibling::*[1]"/>
|
||||||
|
<xsl:text>
</xsl:text>
|
||||||
|
</xsl:template>
|
||||||
|
</xsl:stylesheet>
|
||||||
|
EOF
|
||||||
|
# I cut label out of the extracted values, but here it is for reference:
|
||||||
|
# <xsl:apply-templates match="string" select="key[text()='IORegistryEntryName']/following-sibling::*[1]"/>
|
||||||
|
# <xsl:text>=</xsl:text>
|
||||||
|
}
|
||||||
|
|
||||||
|
right_disk() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
[[ "$volume_special" == "$NIX_VOLUME_USE_DISK"s* ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
right_volume() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
# if set, it must match; otherwise ensure it's on the right disk
|
||||||
|
if [ -z "$NIX_VOLUME_USE_SPECIAL" ]; then
|
||||||
|
if right_disk "$volume_special"; then
|
||||||
|
NIX_VOLUME_USE_SPECIAL="$volume_special" # latch on
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
[ "$volume_special" = "$NIX_VOLUME_USE_SPECIAL" ]
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
right_uuid() {
|
||||||
|
local volume_uuid="$1"
|
||||||
|
# if set, it must match; otherwise allow
|
||||||
|
if [ -z "$NIX_VOLUME_USE_UUID" ]; then
|
||||||
|
NIX_VOLUME_USE_UUID="$volume_uuid" # latch on
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
[ "$volume_uuid" = "$NIX_VOLUME_USE_UUID" ]
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
cure_volumes() {
|
||||||
|
local found volume special uuid
|
||||||
|
# loop just in case they have more than one volume
|
||||||
|
# (nothing stops you from doing this)
|
||||||
|
for volume in $(volumes_labeled "$NIX_VOLUME_LABEL"); do
|
||||||
|
# CAUTION: this could (maybe) be a more normal read
|
||||||
|
# loop like:
|
||||||
|
# while IFS== read -r special uuid; do
|
||||||
|
# # ...
|
||||||
|
# done <<<"$(volumes_labeled "$NIX_VOLUME_LABEL")"
|
||||||
|
#
|
||||||
|
# I did it with for to skirt a problem with the obvious
|
||||||
|
# pattern replacing stdin and causing user prompts
|
||||||
|
# inside (which also use read and access stdin) to skip
|
||||||
|
#
|
||||||
|
# If there's an existing encrypted volume we can't find
|
||||||
|
# in keychain, the user never gets prompted to delete
|
||||||
|
# the volume, and the install fails.
|
||||||
|
#
|
||||||
|
# If you change this, a human needs to test a very
|
||||||
|
# specific scenario: you already have an encrypted
|
||||||
|
# Nix Store volume, and have deleted its credential
|
||||||
|
# from keychain. Ensure the script asks you if it can
|
||||||
|
# delete the volume, and then prompts for your sudo
|
||||||
|
# password to confirm.
|
||||||
|
#
|
||||||
|
# shellcheck disable=SC1097
|
||||||
|
IFS== read -r special uuid <<< "$volume"
|
||||||
|
# take the first one that's on the right disk
|
||||||
|
if [ -z "${found:-}" ]; then
|
||||||
|
if right_volume "$special" && right_uuid "$uuid"; then
|
||||||
|
cure_volume "$special" "$uuid"
|
||||||
|
found="${special} (${uuid})"
|
||||||
|
else
|
||||||
|
warning <<EOF
|
||||||
|
Ignoring ${special} (${uuid}) because I am looking for:
|
||||||
|
disk=${NIX_VOLUME_USE_DISK} special=${NIX_VOLUME_USE_SPECIAL:-${NIX_VOLUME_USE_DISK}sX} uuid=${NIX_VOLUME_USE_UUID:-any}
|
||||||
|
EOF
|
||||||
|
# TODO: give chance to delete if ! headless?
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warning <<EOF
|
||||||
|
Ignoring ${special} (${uuid}), already found target: $found
|
||||||
|
EOF
|
||||||
|
# TODO reminder? I feel like I want one
|
||||||
|
# idiom that reminds some warnings, or warns
|
||||||
|
# some reminders?
|
||||||
|
# TODO: if ! headless, chance to delete?
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ -z "${found:-}" ]; then
|
||||||
|
readonly NIX_VOLUME_USE_SPECIAL NIX_VOLUME_USE_UUID
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_encrypted() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
# Trying to match the first line of output; known first lines:
|
||||||
|
# No cryptographic users for <special>
|
||||||
|
# Cryptographic user for <special> (1 found)
|
||||||
|
# Cryptographic users for <special> (2 found)
|
||||||
|
/usr/sbin/diskutil apfs listCryptoUsers -plist "$volume_special" | /usr/bin/grep -q APFSCryptoUserUUID
|
||||||
}
|
}
|
||||||
|
|
||||||
test_fstab() {
|
test_fstab() {
|
||||||
grep -q "/nix apfs rw" /etc/fstab 2>/dev/null
|
/usr/bin/grep -q "$NIX_ROOT apfs rw" /etc/fstab 2>/dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
test_nix_symlink() {
|
test_nix_root_is_symlink() {
|
||||||
[ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null
|
[ -L "$NIX_ROOT" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
test_synthetic_conf() {
|
test_synthetic_conf_either(){
|
||||||
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
|
/usr/bin/grep -qE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
test_synthetic_conf_mountable() {
|
||||||
|
/usr/bin/grep -q "^${NIX_ROOT:1}$" /etc/synthetic.conf 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
test_synthetic_conf_symlinked() {
|
||||||
|
/usr/bin/grep -qE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
test_nix_volume_mountd_installed() {
|
||||||
|
test -e "$NIX_VOLUME_MOUNTD_DEST"
|
||||||
|
}
|
||||||
|
|
||||||
|
# current volume password
|
||||||
|
test_keychain_by_uuid() {
|
||||||
|
local volume_uuid="$1"
|
||||||
|
# Note: doesn't need sudo just to check; doesn't output pw
|
||||||
|
security find-generic-password -s "$volume_uuid" &>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
get_volume_pass() {
|
||||||
|
local volume_uuid="$1"
|
||||||
|
_sudo \
|
||||||
|
"to confirm keychain has a password that unlocks this volume" \
|
||||||
|
security find-generic-password -s "$volume_uuid" -w
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_volume_pass() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
local volume_uuid="$2"
|
||||||
|
/usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_pass_works() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
local volume_uuid="$2"
|
||||||
|
get_volume_pass "$volume_uuid" | verify_volume_pass "$volume_special" "$volume_uuid"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create the paths defined in synthetic.conf, saving us a reboot.
|
# Create the paths defined in synthetic.conf, saving us a reboot.
|
||||||
create_synthetic_objects(){
|
create_synthetic_objects() {
|
||||||
# Big Sur takes away the -B flag we were using and replaces it
|
# Big Sur takes away the -B flag we were using and replaces it
|
||||||
# with a -t flag that appears to do the same thing (but they
|
# with a -t flag that appears to do the same thing (but they
|
||||||
# don't behave exactly the same way in terms of return values).
|
# don't behave exactly the same way in terms of return values).
|
||||||
|
@ -41,129 +270,570 @@ create_synthetic_objects(){
|
||||||
}
|
}
|
||||||
|
|
||||||
test_nix() {
|
test_nix() {
|
||||||
test -d "/nix"
|
test -d "$NIX_ROOT"
|
||||||
}
|
}
|
||||||
|
|
||||||
test_t2_chip_present(){
|
test_voldaemon() {
|
||||||
# Use xartutil to see if system has a t2 chip.
|
test -f "$NIX_VOLUME_MOUNTD_DEST"
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_mount_command() {
|
||||||
|
local cmd_type="$1" # encrypted|unencrypted
|
||||||
|
local volume_uuid mountpoint cmd=()
|
||||||
|
printf -v volume_uuid "%q" "$2"
|
||||||
|
printf -v mountpoint "%q" "$NIX_ROOT"
|
||||||
|
|
||||||
|
case "$cmd_type" in
|
||||||
|
encrypted)
|
||||||
|
cmd=(/bin/sh -c "/usr/bin/security find-generic-password -s '$volume_uuid' -w | /usr/sbin/diskutil apfs unlockVolume '$volume_uuid' -mountpoint '$mountpoint' -stdinpassphrase");;
|
||||||
|
unencrypted)
|
||||||
|
cmd=(/usr/sbin/diskutil mount -mountPoint "$mountpoint" "$volume_uuid");;
|
||||||
|
*)
|
||||||
|
failure "Invalid first arg $cmd_type to generate_mount_command";;
|
||||||
|
esac
|
||||||
|
|
||||||
|
printf " <string>%s</string>\n" "${cmd[@]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_mount_daemon() {
|
||||||
|
local cmd_type="$1" # encrypted|unencrypted
|
||||||
|
local volume_uuid="$2"
|
||||||
|
cat <<EOF
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||||
|
<plist version="1.0">
|
||||||
|
<dict>
|
||||||
|
<key>RunAtLoad</key>
|
||||||
|
<true/>
|
||||||
|
<key>Label</key>
|
||||||
|
<string>org.nixos.darwin-store</string>
|
||||||
|
<key>ProgramArguments</key>
|
||||||
|
<array>
|
||||||
|
$(generate_mount_command "$cmd_type" "$volume_uuid")
|
||||||
|
</array>
|
||||||
|
</dict>
|
||||||
|
</plist>
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
_eat_bootout_err() {
|
||||||
|
/usr/bin/grep -v "Boot-out failed: 36: Operation now in progress"
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: remove with --uninstall?
|
||||||
|
uninstall_launch_daemon_directions() {
|
||||||
|
local daemon_label="$1" # i.e., org.nixos.blah-blah
|
||||||
|
local daemon_plist="$2" # abspath
|
||||||
|
substep "Uninstall LaunchDaemon $daemon_label" \
|
||||||
|
" sudo launchctl bootout system/$daemon_label" \
|
||||||
|
" sudo rm $daemon_plist"
|
||||||
|
}
|
||||||
|
|
||||||
|
uninstall_launch_daemon_prompt() {
|
||||||
|
local daemon_label="$1" # i.e., org.nixos.blah-blah
|
||||||
|
local daemon_plist="$2" # abspath
|
||||||
|
local reason_for_daemon="$3"
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
The installer adds a LaunchDaemon to $reason_for_daemon: $daemon_label
|
||||||
|
EOF
|
||||||
|
if ui_confirm "Can I remove it?"; then
|
||||||
|
_sudo "to terminate the daemon" \
|
||||||
|
launchctl bootout "system/$daemon_label" 2> >(_eat_bootout_err >&2) || true
|
||||||
|
# this can "fail" with a message like:
|
||||||
|
# Boot-out failed: 36: Operation now in progress
|
||||||
|
_sudo "to remove the daemon definition" rm "$daemon_plist"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
nix_volume_mountd_uninstall_directions() {
|
||||||
|
uninstall_launch_daemon_directions "org.nixos.darwin-store" \
|
||||||
|
"$NIX_VOLUME_MOUNTD_DEST"
|
||||||
|
}
|
||||||
|
|
||||||
|
nix_volume_mountd_uninstall_prompt() {
|
||||||
|
uninstall_launch_daemon_prompt "org.nixos.darwin-store" \
|
||||||
|
"$NIX_VOLUME_MOUNTD_DEST" \
|
||||||
|
"mount your Nix volume"
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: move nix_daemon to install-darwin-multi-user if/when uninstall_launch_daemon_prompt moves up to install-multi-user
|
||||||
|
nix_daemon_uninstall_prompt() {
|
||||||
|
uninstall_launch_daemon_prompt "org.nixos.nix-daemon" \
|
||||||
|
"$NIX_DAEMON_DEST" \
|
||||||
|
"run the nix-daemon"
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: remove with --uninstall?
|
||||||
|
nix_daemon_uninstall_directions() {
|
||||||
|
uninstall_launch_daemon_directions "org.nixos.nix-daemon" \
|
||||||
|
"$NIX_DAEMON_DEST"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: remove with --uninstall?
|
||||||
|
synthetic_conf_uninstall_directions() {
|
||||||
|
# :1 to strip leading slash
|
||||||
|
substep "Remove ${NIX_ROOT:1} from /etc/synthetic.conf" \
|
||||||
|
" If nix is the only entry: sudo rm /etc/synthetic.conf" \
|
||||||
|
" Otherwise: sudo /usr/bin/sed -i '' -e '/^${NIX_ROOT:1}$/d' /etc/synthetic.conf"
|
||||||
|
}
|
||||||
|
|
||||||
|
synthetic_conf_uninstall_prompt() {
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
During install, I add '${NIX_ROOT:1}' to /etc/synthetic.conf, which instructs
|
||||||
|
macOS to create an empty root directory for mounting the Nix volume.
|
||||||
|
EOF
|
||||||
|
# make the edit to a copy
|
||||||
|
/usr/bin/grep -vE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf > "$SCRATCH/synthetic.conf.edit"
|
||||||
|
|
||||||
|
if test_synthetic_conf_symlinked; then
|
||||||
|
warning <<EOF
|
||||||
|
|
||||||
|
/etc/synthetic.conf already contains a line instructing your system
|
||||||
|
to make '${NIX_ROOT}' as a symlink:
|
||||||
|
$(/usr/bin/grep -nE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf)
|
||||||
|
|
||||||
|
This may mean your system has/had a non-standard Nix install.
|
||||||
|
|
||||||
|
The volume-creation process in this installer is *not* compatible
|
||||||
|
with a symlinked store, so I'll have to remove this instruction to
|
||||||
|
continue.
|
||||||
|
|
||||||
|
If you want/need to keep this instruction, answer 'n' to abort.
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ask to rm if this left the file empty aside from comments, else edit
|
||||||
|
if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/synthetic.conf.edit") &>/dev/null; then
|
||||||
|
if confirm_rm "/etc/synthetic.conf"; then
|
||||||
|
if test_nix_root_is_symlink; then
|
||||||
|
failure >&2 <<EOF
|
||||||
|
I removed /etc/synthetic.conf, but $NIX_ROOT is already a symlink
|
||||||
|
(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
|
||||||
|
Once you've rebooted, run the installer again.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if confirm_edit "$SCRATCH/synthetic.conf.edit" "/etc/synthetic.conf"; then
|
||||||
|
if test_nix_root_is_symlink; then
|
||||||
|
failure >&2 <<EOF
|
||||||
|
I edited Nix out of /etc/synthetic.conf, but $NIX_ROOT is already a symlink
|
||||||
|
(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
|
||||||
|
Once you've rebooted, run the installer again.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
# fallback instructions
|
||||||
|
echo "Manually remove nix from /etc/synthetic.conf"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
add_nix_vol_fstab_line() {
|
||||||
|
local uuid="$1"
|
||||||
|
# shellcheck disable=SC1003,SC2026
|
||||||
|
local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
|
||||||
|
shift
|
||||||
|
EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
|
||||||
|
:a
|
||||||
|
UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
|
||||||
|
.
|
||||||
|
:x
|
||||||
|
EOF
|
||||||
|
# TODO: preserving my notes on suid,owners above until resolved
|
||||||
|
# There *may* be some issue regarding volume ownership, see nix#3156
|
||||||
#
|
#
|
||||||
# This isn't well-documented on its own; until it is,
|
# It seems like the cheapest fix is adding "suid,owners" to fstab, but:
|
||||||
# let's keep track of knowledge/assumptions.
|
# - We don't have much info on this condition yet
|
||||||
|
# - I'm not certain if these cause other problems?
|
||||||
|
# - There's a "chown" component some people claim to need to fix this
|
||||||
|
# that I don't understand yet
|
||||||
|
# (Note however that I've had to add a chown step to handle
|
||||||
|
# single->multi-user reinstalls, which may cover this)
|
||||||
#
|
#
|
||||||
# Warnings:
|
# I'm not sure if it's safe to approach this way?
|
||||||
# - Don't search "xart" if porn will cause you trouble :)
|
|
||||||
# - Other xartutil flags do dangerous things. Don't run them
|
|
||||||
# naively. If you must, search "xartutil" first.
|
|
||||||
#
|
#
|
||||||
# Assumptions:
|
# I think I think the most-proper way to test for it is:
|
||||||
# - the "xART session seeds recovery utility"
|
# diskutil info -plist "$NIX_VOLUME_LABEL" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1][name()='true']" -; echo $?
|
||||||
# appears to interact with xartstorageremoted
|
#
|
||||||
# - `sudo xartutil --list` lists xART sessions
|
# There's also `sudo /usr/sbin/vsdbutil -c /path` (which is much faster, but is also
|
||||||
# and their seeds and exits 0 if successful. If
|
# deprecated and needs minor parsing).
|
||||||
# not, it exits 1 and prints an error such as:
|
#
|
||||||
# xartutil: ERROR: No supported link to the SEP present
|
# If no one finds a problem with doing so, I think the simplest approach
|
||||||
# - xART sessions/seeds are present when a T2 chip is
|
# is to just eagerly set this. I found a few imperative approaches:
|
||||||
# (and not, otherwise)
|
# (diskutil enableOwnership, ~100ms), a cheap one (/usr/sbin/vsdbutil -a, ~40-50ms),
|
||||||
# - the presence of a T2 chip means a newly-created
|
# a very cheap one (append the internal format to /var/db/volinfo.database).
|
||||||
# volume on the primary drive will be
|
#
|
||||||
# encrypted at rest
|
# But vsdbutil's deprecation notice suggests using fstab, so I want to
|
||||||
# - all together: `sudo xartutil --list`
|
# give that a whirl first.
|
||||||
# should exit 0 if a new Nix Store volume will
|
#
|
||||||
# be encrypted at rest, and exit 1 if not.
|
# TODO: when this is workable, poke infinisil about reproducing the issue
|
||||||
sudo xartutil --list >/dev/null 2>/dev/null
|
# and confirming this fix?
|
||||||
}
|
}
|
||||||
|
|
||||||
test_filevault_in_use() {
|
delete_nix_vol_fstab_line() {
|
||||||
fdesetup isactive >/dev/null
|
# TODO: I'm scaffolding this to handle the new nix volumes
|
||||||
|
# but it might be nice to generalize a smidge further to
|
||||||
|
# go ahead and set up a pattern for curing "old" things
|
||||||
|
# we no longer do?
|
||||||
|
EDITOR="/usr/bin/patch" _sudo "to cut nix from fstab" "$@" < <(/usr/bin/diff /etc/fstab <(/usr/bin/grep -v "$NIX_ROOT apfs rw" /etc/fstab))
|
||||||
|
# leaving some parts out of the grep; people may fiddle this a little?
|
||||||
}
|
}
|
||||||
|
|
||||||
# use after error msg for conditions we don't understand
|
# TODO: hope to remove with --uninstall
|
||||||
suggest_report_error(){
|
fstab_uninstall_directions() {
|
||||||
# ex "error: something sad happened :(" >&2
|
substep "Remove ${NIX_ROOT} from /etc/fstab" \
|
||||||
echo " please report this @ https://github.com/nixos/nix/issues" >&2
|
" If nix is the only entry: sudo rm /etc/fstab" \
|
||||||
|
" Otherwise, run 'sudo /usr/sbin/vifs' to remove the nix line"
|
||||||
}
|
}
|
||||||
|
|
||||||
main() {
|
fstab_uninstall_prompt() {
|
||||||
(
|
cat <<EOF
|
||||||
|
During install, I add '${NIX_ROOT}' to /etc/fstab so that macOS knows what
|
||||||
|
mount options to use for the Nix volume.
|
||||||
|
EOF
|
||||||
|
cp /etc/fstab "$SCRATCH/fstab.edit"
|
||||||
|
# technically doesn't need the _sudo path, but throwing away the
|
||||||
|
# output is probably better than mostly-duplicating the code...
|
||||||
|
delete_nix_vol_fstab_line patch "$SCRATCH/fstab.edit" &>/dev/null
|
||||||
|
|
||||||
|
# if the patch test edit, minus comment lines, is equal to empty (:)
|
||||||
|
if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/fstab.edit") &>/dev/null; then
|
||||||
|
# this edit would leave it empty; propose deleting it
|
||||||
|
if confirm_rm "/etc/fstab"; then
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
echo "Remove nix from /etc/fstab (or remove the file)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "I might be able to help you make this edit. Here's the diff:"
|
||||||
|
if ! _diff "/etc/fstab" "$SCRATCH/fstab.edit" && ui_confirm "Does the change above look right?"; then
|
||||||
|
delete_nix_vol_fstab_line /usr/sbin/vifs
|
||||||
|
else
|
||||||
|
echo "Remove nix from /etc/fstab (or remove the file)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_volume() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
_sudo "to unmount the Nix volume" \
|
||||||
|
/usr/sbin/diskutil unmount force "$volume_special" || true # might not be mounted
|
||||||
|
_sudo "to delete the Nix volume" \
|
||||||
|
/usr/sbin/diskutil apfs deleteVolume "$volume_special"
|
||||||
|
}
|
||||||
|
|
||||||
|
# aspiration: robust enough to both fix problems
|
||||||
|
# *and* update older darwin volumes
|
||||||
|
cure_volume() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
local volume_uuid="$2"
|
||||||
|
header "Found existing Nix volume"
|
||||||
|
row " special" "$volume_special"
|
||||||
|
row " uuid" "$volume_uuid"
|
||||||
|
|
||||||
|
if volume_encrypted "$volume_special"; then
|
||||||
|
row "encrypted" "yes"
|
||||||
|
if volume_pass_works "$volume_special" "$volume_uuid"; then
|
||||||
|
NIX_VOLUME_DO_ENCRYPT=0
|
||||||
|
ok "Found a working decryption password in keychain :)"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
# - this is a volume we made, and
|
||||||
|
# - the user encrypted it on their own
|
||||||
|
# - something deleted the credential
|
||||||
|
# - this is an old or BYO volume and the pw
|
||||||
|
# just isn't somewhere we can find it.
|
||||||
|
#
|
||||||
|
# We're going to explain why we're freaking out
|
||||||
|
# and prompt them to either delete the volume
|
||||||
|
# (requiring a sudo auth), or abort to fix
|
||||||
|
warning <<EOF
|
||||||
|
|
||||||
|
This volume is encrypted, but I don't see a password to decrypt it.
|
||||||
|
The quick fix is to let me delete this volume and make you a new one.
|
||||||
|
If that's okay, enter your (sudo) password to continue. If not, you
|
||||||
|
can ensure the decryption password is in your system keychain with a
|
||||||
|
"Where" (service) field set to this volume's UUID:
|
||||||
|
$volume_uuid
|
||||||
|
EOF
|
||||||
|
if password_confirm "delete this volume"; then
|
||||||
|
remove_volume "$volume_special"
|
||||||
|
else
|
||||||
|
# TODO: this is a good design case for a warn-and
|
||||||
|
# remind idiom...
|
||||||
|
failure <<EOF
|
||||||
|
Your Nix volume is encrypted, but I couldn't find its password. Either:
|
||||||
|
- Delete or rename the volume out of the way
|
||||||
|
- Ensure its decryption password is in the system keychain with a
|
||||||
|
"Where" (service) field set to this volume's UUID:
|
||||||
|
$volume_uuid
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
elif test_filevault_in_use; then
|
||||||
|
row "encrypted" "no"
|
||||||
|
warning <<EOF
|
||||||
|
FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted.
|
||||||
|
EOF
|
||||||
|
# if we're interactive, give them a chance to
|
||||||
|
# encrypt the volume. If not, /shrug
|
||||||
|
if ! headless && (( NIX_VOLUME_DO_ENCRYPT == 1 )); then
|
||||||
|
if ui_confirm "Should I encrypt it and add the decryption key to your keychain?"; then
|
||||||
|
encrypt_volume "$volume_uuid" "$NIX_VOLUME_LABEL"
|
||||||
|
NIX_VOLUME_DO_ENCRYPT=0
|
||||||
|
else
|
||||||
|
NIX_VOLUME_DO_ENCRYPT=0
|
||||||
|
reminder "FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
row "encrypted" "no"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_volume_artifacts() {
|
||||||
|
if test_synthetic_conf_either; then
|
||||||
|
# NIX_ROOT is in synthetic.conf
|
||||||
|
if synthetic_conf_uninstall_prompt; then
|
||||||
|
# TODO: moot until we tackle uninstall, but when we're
|
||||||
|
# actually uninstalling, we should issue:
|
||||||
|
# reminder "macOS will clean up the empty mount-point directory at $NIX_ROOT on reboot."
|
||||||
|
:
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if test_fstab; then
|
||||||
|
fstab_uninstall_prompt
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test_nix_volume_mountd_installed; then
|
||||||
|
nix_volume_mountd_uninstall_prompt
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_synthetic_conf() {
|
||||||
|
if test_nix_root_is_symlink; then
|
||||||
|
if ! test_synthetic_conf_symlinked; then
|
||||||
|
failure >&2 <<EOF
|
||||||
|
error: $NIX_ROOT is a symlink (-> $(readlink "$NIX_ROOT")).
|
||||||
|
Please remove it. If nix is in /etc/synthetic.conf, remove it and reboot.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if ! test_synthetic_conf_mountable; then
|
||||||
|
task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
|
||||||
|
# technically /etc/synthetic.d/nix is supported in Big Sur+
|
||||||
|
# but handling both takes even more code...
|
||||||
|
_sudo "to add Nix to /etc/synthetic.conf" \
|
||||||
|
/usr/bin/ex /etc/synthetic.conf <<EOF
|
||||||
|
:a
|
||||||
|
${NIX_ROOT:1}
|
||||||
|
.
|
||||||
|
:x
|
||||||
|
EOF
|
||||||
|
if ! test_synthetic_conf_mountable; then
|
||||||
|
failure "error: failed to configure synthetic.conf" >&2
|
||||||
|
fi
|
||||||
|
create_synthetic_objects
|
||||||
|
if ! test_nix; then
|
||||||
|
failure >&2 <<EOF
|
||||||
|
error: failed to bootstrap $NIX_ROOT
|
||||||
|
If you enabled FileVault after booting, this is likely a known issue
|
||||||
|
with macOS that you'll have to reboot to fix. If you didn't enable FV,
|
||||||
|
though, please open an issue describing how the system that you see
|
||||||
|
this error on was set up.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_fstab() {
|
||||||
|
local volume_uuid="$1"
|
||||||
|
# fstab used to be responsible for mounting the volume. Now the last
|
||||||
|
# step adds a LaunchDaemon responsible for mounting. This is technically
|
||||||
|
# redundant for mounting, but diskutil appears to pick up mount options
|
||||||
|
# from fstab (and diskutil's support for specifying them directly is not
|
||||||
|
# consistent across versions/subcommands).
|
||||||
|
if ! test_fstab; then
|
||||||
|
task "Configuring /etc/fstab to specify volume mount options" >&2
|
||||||
|
add_nix_vol_fstab_line "$volume_uuid" /usr/sbin/vifs
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
encrypt_volume() {
|
||||||
|
local volume_uuid="$1"
|
||||||
|
local volume_label="$2"
|
||||||
|
local password
|
||||||
|
# Note: mount/unmount are late additions to support the right order
|
||||||
|
# of operations for creating the volume and then baking its uuid into
|
||||||
|
# other artifacts; not as well-trod wrt to potential errors, race
|
||||||
|
# conditions, etc.
|
||||||
|
|
||||||
|
/usr/sbin/diskutil mount "$volume_label"
|
||||||
|
|
||||||
|
password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
|
||||||
|
_sudo "to add your Nix volume's password to Keychain" \
|
||||||
|
/usr/bin/security -i <<EOF
|
||||||
|
add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
|
||||||
|
EOF
|
||||||
|
builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \
|
||||||
|
/usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
|
||||||
|
|
||||||
|
/usr/sbin/diskutil unmount force "$volume_label"
|
||||||
|
}
|
||||||
|
|
||||||
|
create_volume() {
|
||||||
|
# Notes:
|
||||||
|
# 1) using `-nomount` instead of `-mountpoint "$NIX_ROOT"` to get
|
||||||
|
# its UUID and set mount opts in fstab before first mount
|
||||||
|
#
|
||||||
|
# 2) system is in some sense less secure than user keychain... (it's
|
||||||
|
# possible to read the password for decrypting the keychain) but
|
||||||
|
# the user keychain appears to be available too late. As far as I
|
||||||
|
# can tell, the file with this password (/var/db/SystemKey) is
|
||||||
|
# inside the FileVault envelope. If that isn't true, it may make
|
||||||
|
# sense to store the password inside the envelope?
|
||||||
|
#
|
||||||
|
# 3) At some point it would be ideal to have a small binary to serve
|
||||||
|
# as the daemon itself, and for it to replace /usr/bin/security here.
|
||||||
|
#
|
||||||
|
# 4) *UserAgent exemptions should let the system seamlessly supply the
|
||||||
|
# password if noauto is removed from fstab entry. This is intentional;
|
||||||
|
# the user will hopefully look for help if the volume stops mounting,
|
||||||
|
# rather than failing over into subtle race-condition problems.
|
||||||
|
#
|
||||||
|
# 5) If we ever get users griping about not having space to do
|
||||||
|
# anything useful with Nix, it is possibly to specify
|
||||||
|
# `-reserve 10g` or something, which will fail w/o that much
|
||||||
|
#
|
||||||
|
# 6) getting special w/ awk may be fragile, but doing it to:
|
||||||
|
# - save time over running slow diskutil commands
|
||||||
|
# - skirt risk we grab wrong volume if multiple match
|
||||||
|
/usr/sbin/diskutil apfs addVolume "$NIX_VOLUME_USE_DISK" "$NIX_VOLUME_FS" "$NIX_VOLUME_LABEL" -nomount | /usr/bin/awk '/Created new APFS Volume/ {print $5}'
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_uuid_from_special() {
|
||||||
|
local volume_special="$1" # (i.e., disk1s7)
|
||||||
|
# For reasons I won't pretend to fathom, this returns 253 when it works
|
||||||
|
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -k "$volume_special" || true
|
||||||
|
}
|
||||||
|
|
||||||
|
# this sometimes clears immediately, and AFAIK clears
|
||||||
|
# within about 1s. diskutil info on an unmounted path
|
||||||
|
# fails in around 50-100ms and a match takes about
|
||||||
|
# 250-300ms. I suspect it's usually ~250-750ms
|
||||||
|
await_volume() {
|
||||||
|
# caution: this could, in theory, get stuck
|
||||||
|
until /usr/sbin/diskutil info "$NIX_ROOT" &>/dev/null; do
|
||||||
|
:
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_volume() {
|
||||||
|
local use_special use_uuid profile_packages
|
||||||
|
task "Creating a Nix volume" >&2
|
||||||
|
# DOING: I'm tempted to wrap this call in a grep to get the new disk special without doing anything too complex, but this sudo wrapper *is* a little complex, so it'll be a PITA unless maybe we can skip sudo on this. Let's just try it without.
|
||||||
|
|
||||||
|
use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
|
||||||
|
|
||||||
|
use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
|
||||||
|
|
||||||
|
setup_fstab "$use_uuid"
|
||||||
|
|
||||||
|
if should_encrypt_volume; then
|
||||||
|
encrypt_volume "$use_uuid" "$NIX_VOLUME_LABEL"
|
||||||
|
setup_volume_daemon "encrypted" "$use_uuid"
|
||||||
|
# TODO: might be able to save ~60ms by caching or setting
|
||||||
|
# this somewhere rather than re-checking here.
|
||||||
|
elif volume_encrypted "$use_special"; then
|
||||||
|
setup_volume_daemon "encrypted" "$use_uuid"
|
||||||
|
else
|
||||||
|
setup_volume_daemon "unencrypted" "$use_uuid"
|
||||||
|
fi
|
||||||
|
|
||||||
|
await_volume
|
||||||
|
|
||||||
|
# TODO: below is a vague kludge for now; I just don't know
|
||||||
|
# what if any safe action there is to take here. Also, the
|
||||||
|
# reminder isn't very helpful.
|
||||||
|
# I'm less sure where this belongs, but it also wants mounted, pre-install
|
||||||
|
if type -p nix-env; then
|
||||||
|
profile_packages="$(nix-env --query --installed)"
|
||||||
|
# TODO: can probably do below faster w/ read
|
||||||
|
# intentionally unquoted string to eat whitespace in wc output
|
||||||
|
# shellcheck disable=SC2046,SC2059
|
||||||
|
if ! [ $(printf "$profile_packages" | /usr/bin/wc -l) = "0" ]; then
|
||||||
|
reminder <<EOF
|
||||||
|
Nix now supports only multi-user installs on Darwin/macOS, and your user's
|
||||||
|
Nix profile has some packages in it. These packages may obscure those in the
|
||||||
|
default profile, including the Nix this installer will add. You should
|
||||||
|
review these packages:
|
||||||
|
$profile_packages
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_volume_daemon() {
|
||||||
|
local cmd_type="$1" # encrypted|unencrypted
|
||||||
|
local volume_uuid="$2"
|
||||||
|
if ! test_voldaemon; then
|
||||||
|
task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
|
||||||
|
_sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF
|
||||||
|
:a
|
||||||
|
$(generate_mount_daemon "$cmd_type" "$volume_uuid")
|
||||||
|
.
|
||||||
|
:x
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# TODO: should probably alert the user if this is disabled?
|
||||||
|
_sudo "to launch the Nix volume mounter" \
|
||||||
|
launchctl bootstrap system "$NIX_VOLUME_MOUNTD_DEST" || true
|
||||||
|
# TODO: confirm whether kickstart is necessesary?
|
||||||
|
# I feel a little superstitous, but it can guard
|
||||||
|
# against multiple problems (doesn't start, old
|
||||||
|
# version still running for some reason...)
|
||||||
|
_sudo "to launch the Nix volume mounter" \
|
||||||
|
launchctl kickstart -k system/org.nixos.darwin-store
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_darwin_volume() {
|
||||||
|
setup_synthetic_conf
|
||||||
|
setup_volume
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$_CREATE_VOLUME_NO_MAIN" = 1 ]; then
|
||||||
|
if [ -n "$*" ]; then
|
||||||
|
"$@" # expose functions in case we want multiple routines?
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# no reason to pay for bash to process this
|
||||||
|
main() {
|
||||||
|
{
|
||||||
echo ""
|
echo ""
|
||||||
echo " ------------------------------------------------------------------ "
|
echo " ------------------------------------------------------------------ "
|
||||||
echo " | This installer will create a volume for the nix store and |"
|
echo " | This installer will create a volume for the nix store and |"
|
||||||
echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
|
echo " | configure it to mount at $NIX_ROOT. Follow these steps to uninstall. |"
|
||||||
echo " ------------------------------------------------------------------ "
|
echo " ------------------------------------------------------------------ "
|
||||||
echo ""
|
echo ""
|
||||||
echo " 1. Remove the entry from fstab using 'sudo vifs'"
|
echo " 1. Remove the entry from fstab using 'sudo /usr/sbin/vifs'"
|
||||||
echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
|
echo " 2. Run 'sudo launchctl bootout system/org.nixos.darwin-store'"
|
||||||
echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
|
echo " 3. Remove $NIX_VOLUME_MOUNTD_DEST"
|
||||||
|
echo " 4. Destroy the data volume using '/usr/sbin/diskutil apfs deleteVolume'"
|
||||||
|
echo " 5. Remove the 'nix' line from /etc/synthetic.conf (or the file)"
|
||||||
echo ""
|
echo ""
|
||||||
) >&2
|
} >&2
|
||||||
|
|
||||||
if test_nix_symlink; then
|
setup_darwin_volume
|
||||||
echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2
|
}
|
||||||
echo " /nix -> $(readlink "/nix")" >&2
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! test_synthetic_conf; then
|
main "$@"
|
||||||
echo "Configuring /etc/synthetic.conf..." >&2
|
fi
|
||||||
echo nix | sudo tee -a /etc/synthetic.conf
|
|
||||||
if ! test_synthetic_conf; then
|
|
||||||
echo "error: failed to configure synthetic.conf;" >&2
|
|
||||||
suggest_report_error
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! test_nix; then
|
|
||||||
echo "Creating mountpoint for /nix..." >&2
|
|
||||||
create_synthetic_objects # the ones we defined in synthetic.conf
|
|
||||||
if ! test_nix; then
|
|
||||||
sudo mkdir -p /nix 2>/dev/null || true
|
|
||||||
fi
|
|
||||||
if ! test_nix; then
|
|
||||||
echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2
|
|
||||||
suggest_report_error
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
disk="$(root_disk_identifier)"
|
|
||||||
volume=$(find_nix_volume "$disk")
|
|
||||||
if [ -z "$volume" ]; then
|
|
||||||
echo "Creating a Nix Store volume..." >&2
|
|
||||||
|
|
||||||
if test_filevault_in_use; then
|
|
||||||
# TODO: Not sure if it's in-scope now, but `diskutil apfs list`
|
|
||||||
# shows both filevault and encrypted at rest status, and it
|
|
||||||
# may be the more semantic way to test for this? It'll show
|
|
||||||
# `FileVault: No (Encrypted at rest)`
|
|
||||||
# `FileVault: No`
|
|
||||||
# `FileVault: Yes (Unlocked)`
|
|
||||||
# and so on.
|
|
||||||
if test_t2_chip_present; then
|
|
||||||
echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2
|
|
||||||
echo " is only encrypted at rest." >&2
|
|
||||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
|
||||||
else
|
|
||||||
echo "error: refusing to create Nix store volume because the boot volume is" >&2
|
|
||||||
echo " FileVault encrypted, but encryption-at-rest is not available." >&2
|
|
||||||
echo " Manually create a volume for the store and re-run this script." >&2
|
|
||||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
|
|
||||||
volume="Nix Store"
|
|
||||||
else
|
|
||||||
echo "Using existing '$volume' volume" >&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! test_fstab; then
|
|
||||||
echo "Configuring /etc/fstab..." >&2
|
|
||||||
label=$(echo "$volume" | sed 's/ /\\040/g')
|
|
||||||
# shellcheck disable=SC2209
|
|
||||||
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
main "$@"
|
|
||||||
|
|
|
@ -3,57 +3,99 @@
|
||||||
set -eu
|
set -eu
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||||
|
# create by default; set 0 to DIY, use a symlink, etc.
|
||||||
|
readonly NIX_VOLUME_CREATE=${NIX_VOLUME_CREATE:-1} # now default
|
||||||
|
NIX_FIRST_BUILD_UID="301"
|
||||||
|
NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
|
||||||
|
|
||||||
|
# caution: may update times on / if not run as normal non-root user
|
||||||
|
read_only_root() {
|
||||||
|
# this touch command ~should~ always produce an error
|
||||||
|
# as of this change I confirmed /usr/bin/touch emits:
|
||||||
|
# "touch: /: Read-only file system" Catalina+ and Big Sur
|
||||||
|
# "touch: /: Permission denied" Mojave
|
||||||
|
# (not matching prefix for compat w/ coreutils touch in case using
|
||||||
|
# an explicit path causes problems; its prefix differs)
|
||||||
|
[[ "$(/usr/bin/touch / 2>&1)" = *"Read-only file system" ]]
|
||||||
|
|
||||||
|
# Avoiding the slow semantic way to get this information (~330ms vs ~8ms)
|
||||||
|
# unless using touch causes problems. Just in case, that approach is:
|
||||||
|
# diskutil info -plist / | <find the Writable or WritableVolume keys>, i.e.
|
||||||
|
# diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -
|
||||||
|
}
|
||||||
|
|
||||||
|
if read_only_root && [ "$NIX_VOLUME_CREATE" = 1 ]; then
|
||||||
|
should_create_volume() { return 0; }
|
||||||
|
else
|
||||||
|
should_create_volume() { return 1; }
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck source=./create-darwin-volume.sh
|
||||||
|
. "$EXTRACTED_NIX_PATH/create-darwin-volume.sh" "no-main"
|
||||||
|
|
||||||
dsclattr() {
|
dsclattr() {
|
||||||
/usr/bin/dscl . -read "$1" \
|
/usr/bin/dscl . -read "$1" \
|
||||||
| awk "/$2/ { print \$2 }"
|
| /usr/bin/awk "/$2/ { print \$2 }"
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_validate_assumptions() {
|
test_nix_daemon_installed() {
|
||||||
if [ "$(uname -s)" != "Darwin" ]; then
|
test -e "$NIX_DAEMON_DEST"
|
||||||
failure "This script is for use with macOS!"
|
}
|
||||||
|
|
||||||
|
poly_cure_artifacts() {
|
||||||
|
if should_create_volume; then
|
||||||
|
task "Fixing any leftover Nix volume state"
|
||||||
|
cat <<EOF
|
||||||
|
Before I try to install, I'll check for any existing Nix volume config
|
||||||
|
and ask for your permission to remove it (so that the installer can
|
||||||
|
start fresh). I'll also ask for permission to fix any issues I spot.
|
||||||
|
EOF
|
||||||
|
cure_volumes
|
||||||
|
remove_volume_artifacts
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_service_installed_check() {
|
poly_service_installed_check() {
|
||||||
[ -e "$PLIST_DEST" ]
|
if should_create_volume; then
|
||||||
|
test_nix_daemon_installed || test_nix_volume_mountd_installed
|
||||||
|
else
|
||||||
|
test_nix_daemon_installed
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_service_uninstall_directions() {
|
poly_service_uninstall_directions() {
|
||||||
cat <<EOF
|
echo "$1. Remove macOS-specific components:"
|
||||||
$1. Delete $PLIST_DEST
|
if should_create_volume && test_nix_volume_mountd_installed; then
|
||||||
|
darwin_volume_uninstall_directions
|
||||||
sudo launchctl unload $PLIST_DEST
|
fi
|
||||||
sudo rm $PLIST_DEST
|
if test_nix_daemon_installed; then
|
||||||
|
nix_daemon_uninstall_directions
|
||||||
EOF
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_service_setup_note() {
|
poly_service_setup_note() {
|
||||||
cat <<EOF
|
if should_create_volume; then
|
||||||
- load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon
|
echo " - create a Nix volume and a LaunchDaemon to mount it"
|
||||||
|
fi
|
||||||
EOF
|
echo " - create a LaunchDaemon (at $NIX_DAEMON_DEST) for nix-daemon"
|
||||||
|
echo ""
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_extra_try_me_commands(){
|
poly_extra_try_me_commands() {
|
||||||
:
|
|
||||||
}
|
|
||||||
poly_extra_setup_instructions(){
|
|
||||||
:
|
:
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_configure_nix_daemon_service() {
|
poly_configure_nix_daemon_service() {
|
||||||
|
task "Setting up the nix-daemon LaunchDaemon"
|
||||||
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
_sudo "to set up the nix-daemon as a LaunchDaemon" \
|
||||||
cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST"
|
/bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
|
||||||
|
|
||||||
_sudo "to load the LaunchDaemon plist for nix-daemon" \
|
_sudo "to load the LaunchDaemon plist for nix-daemon" \
|
||||||
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
||||||
|
|
||||||
_sudo "to start the nix-daemon" \
|
_sudo "to start the nix-daemon" \
|
||||||
launchctl start org.nixos.nix-daemon
|
launchctl kickstart -k system/org.nixos.nix-daemon
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_group_exists() {
|
poly_group_exists() {
|
||||||
|
@ -94,6 +136,8 @@ poly_user_home_get() {
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_user_home_set() {
|
poly_user_home_set() {
|
||||||
|
# This can trigger a permission prompt now:
|
||||||
|
# "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
|
||||||
_sudo "in order to give $1 a safe home directory" \
|
_sudo "in order to give $1 a safe home directory" \
|
||||||
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
|
/usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
|
||||||
}
|
}
|
||||||
|
@ -119,7 +163,7 @@ poly_user_shell_set() {
|
||||||
poly_user_in_group_check() {
|
poly_user_in_group_check() {
|
||||||
username=$1
|
username=$1
|
||||||
group=$2
|
group=$2
|
||||||
dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
|
/usr/sbin/dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_user_in_group_set() {
|
poly_user_in_group_set() {
|
||||||
|
@ -149,3 +193,17 @@ poly_create_build_user() {
|
||||||
/usr/bin/dscl . create "/Users/$username" \
|
/usr/bin/dscl . create "/Users/$username" \
|
||||||
UniqueID "${uid}"
|
UniqueID "${uid}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
poly_prepare_to_install() {
|
||||||
|
if should_create_volume; then
|
||||||
|
header "Preparing a Nix volume"
|
||||||
|
# intentional indent below to match task indent
|
||||||
|
cat <<EOF
|
||||||
|
Nix traditionally stores its data in the root directory $NIX_ROOT, but
|
||||||
|
macOS now (starting in 10.15 Catalina) has a read-only root directory.
|
||||||
|
To support Nix, I will create a volume and configure macOS to mount it
|
||||||
|
at $NIX_ROOT.
|
||||||
|
EOF
|
||||||
|
setup_darwin_volume
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
|
@ -25,13 +25,15 @@ readonly RED='\033[31m'
|
||||||
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
|
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
|
||||||
readonly NIX_BUILD_GROUP_ID="30000"
|
readonly NIX_BUILD_GROUP_ID="30000"
|
||||||
readonly NIX_BUILD_GROUP_NAME="nixbld"
|
readonly NIX_BUILD_GROUP_NAME="nixbld"
|
||||||
readonly NIX_FIRST_BUILD_UID="30001"
|
# darwin installer needs to override these
|
||||||
|
NIX_FIRST_BUILD_UID="30001"
|
||||||
|
NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
|
||||||
# Please don't change this. We don't support it, because the
|
# Please don't change this. We don't support it, because the
|
||||||
# default shell profile that comes with Nix doesn't support it.
|
# default shell profile that comes with Nix doesn't support it.
|
||||||
readonly NIX_ROOT="/nix"
|
readonly NIX_ROOT="/nix"
|
||||||
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
|
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
|
||||||
|
|
||||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv")
|
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv")
|
||||||
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
|
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
|
||||||
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||||
|
|
||||||
|
@ -41,7 +43,7 @@ readonly NIX_INSTALLED_CACERT="@cacert@"
|
||||||
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
|
#readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
|
||||||
readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
|
readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
|
||||||
|
|
||||||
readonly ROOT_HOME=$(echo ~root)
|
readonly ROOT_HOME=~root
|
||||||
|
|
||||||
if [ -t 0 ]; then
|
if [ -t 0 ]; then
|
||||||
readonly IS_HEADLESS='no'
|
readonly IS_HEADLESS='no'
|
||||||
|
@ -57,14 +59,19 @@ headless() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
contactme() {
|
contact_us() {
|
||||||
|
echo "You can open an issue at https://github.com/nixos/nix/issues"
|
||||||
|
echo ""
|
||||||
|
echo "Or feel free to contact the team:"
|
||||||
|
echo " - Matrix: #nix:nixos.org"
|
||||||
|
echo " - IRC: in #nixos on irc.libera.chat"
|
||||||
|
echo " - twitter: @nixos_org"
|
||||||
|
echo " - forum: https://discourse.nixos.org"
|
||||||
|
}
|
||||||
|
get_help() {
|
||||||
echo "We'd love to help if you need it."
|
echo "We'd love to help if you need it."
|
||||||
echo ""
|
echo ""
|
||||||
echo "If you can, open an issue at https://github.com/nixos/nix/issues"
|
contact_us
|
||||||
echo ""
|
|
||||||
echo "Or feel free to contact the team,"
|
|
||||||
echo " - on IRC #nixos on irc.freenode.net"
|
|
||||||
echo " - on twitter @nixos_org"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
uninstall_directions() {
|
uninstall_directions() {
|
||||||
|
@ -100,11 +107,10 @@ $step. Delete the files Nix added to your system:
|
||||||
and that is it.
|
and that is it.
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nix_user_for_core() {
|
nix_user_for_core() {
|
||||||
printf "nixbld%d" "$1"
|
printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1"
|
||||||
}
|
}
|
||||||
|
|
||||||
nix_uid_for_core() {
|
nix_uid_for_core() {
|
||||||
|
@ -168,7 +174,7 @@ failure() {
|
||||||
header "oh no!"
|
header "oh no!"
|
||||||
_textout "$RED" "$@"
|
_textout "$RED" "$@"
|
||||||
echo ""
|
echo ""
|
||||||
_textout "$RED" "$(contactme)"
|
_textout "$RED" "$(get_help)"
|
||||||
trap finish_cleanup EXIT
|
trap finish_cleanup EXIT
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
@ -199,6 +205,95 @@ ui_confirm() {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
printf -v _UNCHANGED_GRP_FMT "%b" $'\033[2m%='"$ESC" # "dim"
|
||||||
|
# bold+invert+red and bold+invert+green just for the +/- below
|
||||||
|
# red/green foreground for rest of the line
|
||||||
|
printf -v _OLD_LINE_FMT "%b" $'\033[1;7;31m-'"$ESC ${RED}%L${ESC}"
|
||||||
|
printf -v _NEW_LINE_FMT "%b" $'\033[1;7;32m+'"$ESC ${GREEN}%L${ESC}"
|
||||||
|
|
||||||
|
_diff() {
|
||||||
|
# simple colorized diff comatible w/ pre `--color` versions
|
||||||
|
diff --unchanged-group-format="$_UNCHANGED_GRP_FMT" --old-line-format="$_OLD_LINE_FMT" --new-line-format="$_NEW_LINE_FMT" --unchanged-line-format=" %L" "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
confirm_rm() {
|
||||||
|
local path="$1"
|
||||||
|
if ui_confirm "Can I remove $path?"; then
|
||||||
|
_sudo "to remove $path" rm "$path"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
confirm_edit() {
|
||||||
|
local path="$1"
|
||||||
|
local edit_path="$2"
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
Nix isn't the only thing in $path,
|
||||||
|
but I think I know how to edit it out.
|
||||||
|
Here's the diff:
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# could technically test the diff, but caller should do it
|
||||||
|
_diff "$path" "$edit_path"
|
||||||
|
if ui_confirm "Does the change above look right?"; then
|
||||||
|
_sudo "remove nix from $path" cp "$edit_path" "$path"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
_SERIOUS_BUSINESS="${RED}%s:${ESC} "
|
||||||
|
password_confirm() {
|
||||||
|
local do_something_consequential="$1"
|
||||||
|
if ui_confirm "Can I $do_something_consequential?"; then
|
||||||
|
# shellcheck disable=SC2059
|
||||||
|
sudo -kv --prompt="$(printf "${_SERIOUS_BUSINESS}" "Enter your password to $do_something_consequential")"
|
||||||
|
else
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Support accumulating reminders over the course of a run and showing
|
||||||
|
# them at the end. An example where this helps: the installer changes
|
||||||
|
# something, but it won't work without a reboot. If you tell the user
|
||||||
|
# when you do it, they may miss it in the stream. The value of the
|
||||||
|
# setting isn't enough to decide whether to message because you only
|
||||||
|
# need to message if you *changed* it.
|
||||||
|
|
||||||
|
# reminders stored in array delimited by empty entry; if ! headless,
|
||||||
|
# user is asked to confirm after each delimiter.
|
||||||
|
_reminders=()
|
||||||
|
((_remind_num=1))
|
||||||
|
|
||||||
|
remind() {
|
||||||
|
# (( arithmetic expression ))
|
||||||
|
if (( _remind_num > 1 )); then
|
||||||
|
header "Reminders"
|
||||||
|
for line in "${_reminders[@]}"; do
|
||||||
|
echo "$line"
|
||||||
|
if ! headless && [ "${#line}" = 0 ]; then
|
||||||
|
if read -r -p "Press enter/return to acknowledge."; then
|
||||||
|
printf $'\033[A\33[2K\r'
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
reminder() {
|
||||||
|
printf -v label "${BLUE}[ %d ]${ESC}" "$_remind_num"
|
||||||
|
_reminders+=("$label")
|
||||||
|
if [[ "$*" = "" ]]; then
|
||||||
|
while read -r line; do
|
||||||
|
_reminders+=("$line")
|
||||||
|
done
|
||||||
|
else
|
||||||
|
# this expands each arg to an array entry (and each entry will
|
||||||
|
# ultimately be a separate line in the output)
|
||||||
|
_reminders+=("$@")
|
||||||
|
fi
|
||||||
|
_reminders+=("")
|
||||||
|
((_remind_num++))
|
||||||
|
}
|
||||||
|
|
||||||
__sudo() {
|
__sudo() {
|
||||||
local expl="$1"
|
local expl="$1"
|
||||||
local cmd="$2"
|
local cmd="$2"
|
||||||
|
@ -219,18 +314,18 @@ _sudo() {
|
||||||
local expl="$1"
|
local expl="$1"
|
||||||
shift
|
shift
|
||||||
if ! headless; then
|
if ! headless; then
|
||||||
__sudo "$expl" "$*"
|
__sudo "$expl" "$*" >&2
|
||||||
fi
|
fi
|
||||||
sudo "$@"
|
sudo "$@"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX)
|
readonly SCRATCH=$(mktemp -d "${TMPDIR:-/tmp/}tmp.XXXXXXXXXX")
|
||||||
function finish_cleanup {
|
finish_cleanup() {
|
||||||
rm -rf "$SCRATCH"
|
rm -rf "$SCRATCH"
|
||||||
}
|
}
|
||||||
|
|
||||||
function finish_fail {
|
finish_fail() {
|
||||||
finish_cleanup
|
finish_cleanup
|
||||||
|
|
||||||
failure <<EOF
|
failure <<EOF
|
||||||
|
@ -242,45 +337,46 @@ EOF
|
||||||
}
|
}
|
||||||
trap finish_fail EXIT
|
trap finish_fail EXIT
|
||||||
|
|
||||||
channel_update_failed=0
|
finish_success() {
|
||||||
function finish_success {
|
|
||||||
finish_cleanup
|
|
||||||
|
|
||||||
ok "Alright! We're done!"
|
ok "Alright! We're done!"
|
||||||
if [ "x$channel_update_failed" = x1 ]; then
|
|
||||||
echo ""
|
|
||||||
echo "But fetching the nixpkgs channel failed. (Are you offline?)"
|
|
||||||
echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
|
|
||||||
fi
|
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
|
|
||||||
Before Nix will work in your existing shells, you'll need to close
|
|
||||||
them and open them again. Other than that, you should be ready to go.
|
|
||||||
|
|
||||||
Try it! Open a new terminal, and type:
|
Try it! Open a new terminal, and type:
|
||||||
$(poly_extra_try_me_commands)
|
$(poly_extra_try_me_commands)
|
||||||
$ nix-shell -p nix-info --run "nix-info -m"
|
$ nix-shell -p nix-info --run "nix-info -m"
|
||||||
$(poly_extra_setup_instructions)
|
|
||||||
Thank you for using this installer. If you have any feedback, don't
|
|
||||||
hesitate:
|
|
||||||
|
|
||||||
$(contactme)
|
Thank you for using this installer. If you have any feedback or need
|
||||||
|
help, don't hesitate:
|
||||||
|
|
||||||
|
$(contact_us)
|
||||||
EOF
|
EOF
|
||||||
|
remind
|
||||||
|
finish_cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
|
finish_uninstall_success() {
|
||||||
|
ok "Alright! Nix should be removed!"
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
If you spot anything this uninstaller missed or have feedback,
|
||||||
|
don't hesitate:
|
||||||
|
|
||||||
|
$(contact_us)
|
||||||
|
EOF
|
||||||
|
remind
|
||||||
|
finish_cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_nix_artifacts() {
|
||||||
|
failure "Not implemented yet"
|
||||||
|
}
|
||||||
|
|
||||||
|
cure_artifacts() {
|
||||||
|
poly_cure_artifacts
|
||||||
|
# remove_nix_artifacts (LATER)
|
||||||
|
}
|
||||||
|
|
||||||
validate_starting_assumptions() {
|
validate_starting_assumptions() {
|
||||||
poly_validate_assumptions
|
|
||||||
|
|
||||||
if [ $EUID -eq 0 ]; then
|
|
||||||
failure <<EOF
|
|
||||||
Please do not run this script with root privileges. We will call sudo
|
|
||||||
when we need to.
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
if type nix-env 2> /dev/null >&2; then
|
if type nix-env 2> /dev/null >&2; then
|
||||||
warning <<EOF
|
warning <<EOF
|
||||||
Nix already appears to be installed. This installer may run into issues.
|
Nix already appears to be installed. This installer may run into issues.
|
||||||
|
@ -442,18 +538,46 @@ create_build_users() {
|
||||||
|
|
||||||
create_directories() {
|
create_directories() {
|
||||||
# FIXME: remove all of this because it duplicates LocalStore::LocalStore().
|
# FIXME: remove all of this because it duplicates LocalStore::LocalStore().
|
||||||
|
task "Setting up the basic directory structure"
|
||||||
|
if [ -d "$NIX_ROOT" ]; then
|
||||||
|
# if /nix already exists, take ownership
|
||||||
|
#
|
||||||
|
# Caution: notes below are macOS-y
|
||||||
|
# This is a bit of a goldilocks zone for taking ownership
|
||||||
|
# if there are already files on the volume; the volume is
|
||||||
|
# now mounted, but we haven't added a bunch of new files
|
||||||
|
|
||||||
|
# this is probably a bit slow; I've been seeing 3.3-4s even
|
||||||
|
# when promptly installed over a fresh single-user install.
|
||||||
|
# In case anyone's aware of a shortcut.
|
||||||
|
# `|| true`: .Trashes errors w/o full disk perm
|
||||||
|
|
||||||
|
# rumor per #4488 that macOS 11.2 may not have
|
||||||
|
# sbin on path, and that's where chown is, but
|
||||||
|
# since this bit is cross-platform:
|
||||||
|
# - first try with `command -vp` to try and find
|
||||||
|
# chown in the usual places
|
||||||
|
# - fall back on `command -v` which would find
|
||||||
|
# any chown on path
|
||||||
|
# if we don't find one, the command is already
|
||||||
|
# hiding behind || true, and the general state
|
||||||
|
# should be one the user can repair once they
|
||||||
|
# figure out where chown is...
|
||||||
|
local get_chr_own="$(command -vp chown)"
|
||||||
|
if [[ -z "$get_chr_own" ]]; then
|
||||||
|
get_chr_own="$(command -v chown)"
|
||||||
|
fi
|
||||||
|
_sudo "to take root ownership of existing Nix store files" \
|
||||||
|
"$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
|
||||||
|
fi
|
||||||
_sudo "to make the basic directory structure of Nix (part 1)" \
|
_sudo "to make the basic directory structure of Nix (part 1)" \
|
||||||
mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
|
install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
|
||||||
|
|
||||||
_sudo "to make the basic directory structure of Nix (part 2)" \
|
_sudo "to make the basic directory structure of Nix (part 2)" \
|
||||||
mkdir -pv -m 1775 /nix/store
|
install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
|
||||||
|
|
||||||
_sudo "to make the basic directory structure of Nix (part 3)" \
|
|
||||||
chgrp "$NIX_BUILD_GROUP_NAME" /nix/store
|
|
||||||
|
|
||||||
_sudo "to place the default nix daemon configuration (part 1)" \
|
_sudo "to place the default nix daemon configuration (part 1)" \
|
||||||
mkdir -pv -m 0555 /etc/nix
|
install -dv -m 0555 /etc/nix
|
||||||
}
|
}
|
||||||
|
|
||||||
place_channel_configuration() {
|
place_channel_configuration() {
|
||||||
|
@ -473,7 +597,7 @@ This installation tool will set up your computer with the Nix package
|
||||||
manager. This will happen in a few stages:
|
manager. This will happen in a few stages:
|
||||||
|
|
||||||
1. Make sure your computer doesn't already have Nix. If it does, I
|
1. Make sure your computer doesn't already have Nix. If it does, I
|
||||||
will show you instructions on how to clean up your old one.
|
will show you instructions on how to clean up your old install.
|
||||||
|
|
||||||
2. Show you what we are going to install and where. Then we will ask
|
2. Show you what we are going to install and where. Then we will ask
|
||||||
if you are ready to continue.
|
if you are ready to continue.
|
||||||
|
@ -572,6 +696,7 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
install_from_extracted_nix() {
|
install_from_extracted_nix() {
|
||||||
|
task "Installing Nix"
|
||||||
(
|
(
|
||||||
cd "$EXTRACTED_NIX_PATH"
|
cd "$EXTRACTED_NIX_PATH"
|
||||||
|
|
||||||
|
@ -587,9 +712,8 @@ $NIX_INSTALLED_NIX.
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cat ./.reginfo \
|
_sudo "to load data for the first time in to the Nix Database" \
|
||||||
| _sudo "to load data for the first time in to the Nix Database" \
|
"$NIX_INSTALLED_NIX/bin/nix-store" --load-db < ./.reginfo
|
||||||
"$NIX_INSTALLED_NIX/bin/nix-store" --load-db
|
|
||||||
|
|
||||||
echo " Just finished getting the nix database ready."
|
echo " Just finished getting the nix database ready."
|
||||||
)
|
)
|
||||||
|
@ -608,6 +732,7 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
configure_shell_profile() {
|
configure_shell_profile() {
|
||||||
|
task "Setting up shell profiles: ${PROFILE_TARGETS[*]}"
|
||||||
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
for profile_target in "${PROFILE_TARGETS[@]}"; do
|
||||||
if [ -e "$profile_target" ]; then
|
if [ -e "$profile_target" ]; then
|
||||||
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
|
_sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
|
||||||
|
@ -627,14 +752,27 @@ configure_shell_profile() {
|
||||||
tee -a "$profile_target"
|
tee -a "$profile_target"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
# TODO: should we suggest '. $PROFILE_NIX_FILE'? It would get them on
|
||||||
|
# their way less disruptively, but a counter-argument is that they won't
|
||||||
|
# immediately notice if something didn't get set up right?
|
||||||
|
reminder "Nix won't work in active shell sessions until you restart them."
|
||||||
|
}
|
||||||
|
|
||||||
|
cert_in_store() {
|
||||||
|
# in a subshell
|
||||||
|
# - change into the cert-file dir
|
||||||
|
# - get the phyiscal pwd
|
||||||
|
# and test if this path is in the Nix store
|
||||||
|
[[ "$(cd -- "$(dirname "$NIX_SSL_CERT_FILE")" && exec pwd -P)" == "$NIX_ROOT/store/"* ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
setup_default_profile() {
|
setup_default_profile() {
|
||||||
_sudo "to installing a bootstrapping Nix in to the default Profile" \
|
task "Setting up the default profile"
|
||||||
|
_sudo "to install a bootstrapping Nix in to the default profile" \
|
||||||
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
|
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
|
||||||
|
|
||||||
if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then
|
if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ] || cert_in_store; then
|
||||||
_sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
|
_sudo "to install a bootstrapping SSL certificate just for Nix in to the default profile" \
|
||||||
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
|
HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
|
||||||
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
|
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
|
||||||
fi
|
fi
|
||||||
|
@ -643,9 +781,13 @@ setup_default_profile() {
|
||||||
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
|
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
|
||||||
# otherwise it will be lost in environments where sudo doesn't pass
|
# otherwise it will be lost in environments where sudo doesn't pass
|
||||||
# all the environment variables by default.
|
# all the environment variables by default.
|
||||||
_sudo "to update the default channel in the default profile" \
|
if ! _sudo "to update the default channel in the default profile" \
|
||||||
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
|
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs; then
|
||||||
|| channel_update_failed=1
|
reminder <<EOF
|
||||||
|
I had trouble fetching the nixpkgs channel (are you offline?)
|
||||||
|
To try again later, run: sudo -i nix-channel --update nixpkgs
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -660,6 +802,17 @@ EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
|
# TODO: I've moved this out of validate_starting_assumptions so we
|
||||||
|
# can fail faster in this case. Sourcing install-darwin... now runs
|
||||||
|
# `touch /` to detect Read-only root, but it could update times on
|
||||||
|
# pre-Catalina macOS if run as root user.
|
||||||
|
if [ $EUID -eq 0 ]; then
|
||||||
|
failure <<EOF
|
||||||
|
Please do not run this script with root privileges. We will call sudo
|
||||||
|
when we need to.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
# shellcheck source=./install-darwin-multi-user.sh
|
# shellcheck source=./install-darwin-multi-user.sh
|
||||||
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
|
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
|
||||||
|
@ -673,17 +826,24 @@ main() {
|
||||||
welcome_to_nix
|
welcome_to_nix
|
||||||
chat_about_sudo
|
chat_about_sudo
|
||||||
|
|
||||||
|
cure_artifacts
|
||||||
|
# TODO: there's a tension between cure and validate. I moved the
|
||||||
|
# the sudo/root check out of validate to the head of this func.
|
||||||
|
# Cure is *intended* to subsume the validate-and-abort approach,
|
||||||
|
# so it may eventually obsolete it.
|
||||||
validate_starting_assumptions
|
validate_starting_assumptions
|
||||||
|
|
||||||
setup_report
|
setup_report
|
||||||
|
|
||||||
if ! ui_confirm "Ready to continue?"; then
|
if ! ui_confirm "Ready to continue?"; then
|
||||||
ok "Alright, no changes have been made :)"
|
ok "Alright, no changes have been made :)"
|
||||||
contactme
|
get_help
|
||||||
trap finish_cleanup EXIT
|
trap finish_cleanup EXIT
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
poly_prepare_to_install
|
||||||
|
|
||||||
create_build_group
|
create_build_group
|
||||||
create_build_users
|
create_build_users
|
||||||
create_directories
|
create_directories
|
||||||
|
@ -693,6 +853,7 @@ main() {
|
||||||
configure_shell_profile
|
configure_shell_profile
|
||||||
|
|
||||||
set +eu
|
set +eu
|
||||||
|
# shellcheck disable=SC1091
|
||||||
. /etc/profile
|
. /etc/profile
|
||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
|
@ -704,5 +865,20 @@ main() {
|
||||||
trap finish_success EXIT
|
trap finish_success EXIT
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# set an empty initial arg for bare invocations in case we need to
|
||||||
|
# disambiguate someone directly invoking this later.
|
||||||
|
if [ "${#@}" = 0 ]; then
|
||||||
|
set ""
|
||||||
|
fi
|
||||||
|
|
||||||
main
|
# ACTION for override
|
||||||
|
case "${1-}" in
|
||||||
|
# uninstall)
|
||||||
|
# shift
|
||||||
|
# uninstall "$@";;
|
||||||
|
# install == same as the no-arg condition for now (but, explicit)
|
||||||
|
""|install)
|
||||||
|
main;;
|
||||||
|
*) # holding space for future options (like uninstall + install?)
|
||||||
|
failure "install-multi-user: invalid argument";;
|
||||||
|
esac
|
||||||
|
|
|
@ -26,18 +26,9 @@ fi
|
||||||
|
|
||||||
# macOS support for 10.12.6 or higher
|
# macOS support for 10.12.6 or higher
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
IFS='.' read macos_major macos_minor macos_patch << EOF
|
IFS='.' read -r macos_major macos_minor macos_patch << EOF
|
||||||
$(sw_vers -productVersion)
|
$(sw_vers -productVersion)
|
||||||
EOF
|
EOF
|
||||||
# TODO: this is a temporary speed-bump to keep people from naively installing Nix
|
|
||||||
# on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
|
|
||||||
# *Ideally* this is gone before next Nix release. If you're intentionally working on
|
|
||||||
# Nix + Big Sur, just comment out this block and be on your way :)
|
|
||||||
if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
|
|
||||||
echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
|
if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
|
||||||
# patch may not be present; command substitution for simplicity
|
# patch may not be present; command substitution for simplicity
|
||||||
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
|
echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
|
||||||
|
@ -46,21 +37,40 @@ EOF
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Determine if we could use the multi-user installer or not
|
# Determine if we could use the multi-user installer or not
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
if [ "$(uname -s)" = "Linux" ]; then
|
||||||
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
|
|
||||||
elif [ "$(uname -s)" = "Linux" ]; then
|
|
||||||
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
|
echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
INSTALL_MODE=no-daemon
|
case "$(uname -s)" in
|
||||||
CREATE_DARWIN_VOLUME=0
|
"Darwin")
|
||||||
|
INSTALL_MODE=daemon;;
|
||||||
|
*)
|
||||||
|
INSTALL_MODE=no-daemon;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# space-separated string
|
||||||
|
ACTIONS=
|
||||||
|
|
||||||
# handle the command line flags
|
# handle the command line flags
|
||||||
while [ $# -gt 0 ]; do
|
while [ $# -gt 0 ]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--daemon)
|
--daemon)
|
||||||
INSTALL_MODE=daemon;;
|
INSTALL_MODE=daemon
|
||||||
|
ACTIONS="${ACTIONS}install "
|
||||||
|
;;
|
||||||
--no-daemon)
|
--no-daemon)
|
||||||
INSTALL_MODE=no-daemon;;
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
|
printf '\e[1;31mError: --no-daemon installs are no-longer supported on Darwin/macOS!\e[0m\n' >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
INSTALL_MODE=no-daemon
|
||||||
|
# intentional tail space
|
||||||
|
ACTIONS="${ACTIONS}install "
|
||||||
|
;;
|
||||||
|
# --uninstall)
|
||||||
|
# # intentional tail space
|
||||||
|
# ACTIONS="${ACTIONS}uninstall "
|
||||||
|
# ;;
|
||||||
--no-channel-add)
|
--no-channel-add)
|
||||||
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
|
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
|
||||||
--daemon-user-count)
|
--daemon-user-count)
|
||||||
|
@ -69,13 +79,18 @@ while [ $# -gt 0 ]; do
|
||||||
--no-modify-profile)
|
--no-modify-profile)
|
||||||
NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
|
NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
|
||||||
--darwin-use-unencrypted-nix-store-volume)
|
--darwin-use-unencrypted-nix-store-volume)
|
||||||
CREATE_DARWIN_VOLUME=1;;
|
{
|
||||||
|
echo "Warning: the flag --darwin-use-unencrypted-nix-store-volume"
|
||||||
|
echo " is no longer needed and will be removed in the future."
|
||||||
|
echo ""
|
||||||
|
} >&2;;
|
||||||
--nix-extra-conf-file)
|
--nix-extra-conf-file)
|
||||||
export NIX_EXTRA_CONF="$(cat $2)"
|
# shellcheck disable=SC2155
|
||||||
|
export NIX_EXTRA_CONF="$(cat "$2")"
|
||||||
shift;;
|
shift;;
|
||||||
*)
|
*)
|
||||||
(
|
{
|
||||||
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]"
|
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
|
||||||
|
|
||||||
echo "Choose installation method."
|
echo "Choose installation method."
|
||||||
echo ""
|
echo ""
|
||||||
|
@ -101,45 +116,16 @@ while [ $# -gt 0 ]; do
|
||||||
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
|
if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
|
||||||
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
|
echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
|
||||||
fi
|
fi
|
||||||
) >&2
|
} >&2
|
||||||
|
|
||||||
# darwin and Catalina+
|
|
||||||
if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
|
|
||||||
(
|
|
||||||
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
|
|
||||||
echo " store and mount it at /nix. This is the recommended way to create"
|
|
||||||
echo " /nix with a read-only / on macOS >=10.15."
|
|
||||||
echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
|
|
||||||
echo ""
|
|
||||||
) >&2
|
|
||||||
fi
|
|
||||||
exit;;
|
exit;;
|
||||||
esac
|
esac
|
||||||
shift
|
shift
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
|
||||||
if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
|
|
||||||
printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
|
|
||||||
"$self/create-darwin-volume.sh"
|
|
||||||
fi
|
|
||||||
|
|
||||||
writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
|
|
||||||
if ! [ -e $dest ] && [ "$writable" = "false" ]; then
|
|
||||||
(
|
|
||||||
echo ""
|
|
||||||
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
|
|
||||||
echo "Use sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
|
|
||||||
echo "See https://nixos.org/nix/manual/#sect-macos-installation"
|
|
||||||
echo ""
|
|
||||||
) >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$INSTALL_MODE" = "daemon" ]; then
|
if [ "$INSTALL_MODE" = "daemon" ]; then
|
||||||
printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
|
printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
|
||||||
exec "$self/install-multi-user"
|
exec "$self/install-multi-user" $ACTIONS # let ACTIONS split
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -194,6 +180,7 @@ if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# shellcheck source=./nix-profile.sh.in
|
||||||
. "$nix/etc/profile.d/nix.sh"
|
. "$nix/etc/profile.d/nix.sh"
|
||||||
|
|
||||||
if ! "$nix/bin/nix-env" -i "$nix"; then
|
if ! "$nix/bin/nix-env" -i "$nix"; then
|
||||||
|
|
|
@ -41,10 +41,8 @@ handle_network_proxy() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_validate_assumptions() {
|
poly_cure_artifacts() {
|
||||||
if [ "$(uname -s)" != "Linux" ]; then
|
:
|
||||||
failure "This script is for use with Linux!"
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_service_installed_check() {
|
poly_service_installed_check() {
|
||||||
|
@ -72,7 +70,7 @@ poly_service_setup_note() {
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
poly_extra_try_me_commands(){
|
poly_extra_try_me_commands() {
|
||||||
if [ -e /run/systemd/system ]; then
|
if [ -e /run/systemd/system ]; then
|
||||||
:
|
:
|
||||||
else
|
else
|
||||||
|
@ -81,19 +79,10 @@ poly_extra_try_me_commands(){
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
poly_extra_setup_instructions(){
|
|
||||||
if [ -e /run/systemd/system ]; then
|
|
||||||
:
|
|
||||||
else
|
|
||||||
cat <<EOF
|
|
||||||
Additionally, you may want to add nix-daemon to your init-system.
|
|
||||||
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
poly_configure_nix_daemon_service() {
|
poly_configure_nix_daemon_service() {
|
||||||
if [ -e /run/systemd/system ]; then
|
if [ -e /run/systemd/system ]; then
|
||||||
|
task "Setting up the nix-daemon systemd service"
|
||||||
_sudo "to set up the nix-daemon service" \
|
_sudo "to set up the nix-daemon service" \
|
||||||
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
|
systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
|
||||||
|
|
||||||
|
@ -110,6 +99,8 @@ poly_configure_nix_daemon_service() {
|
||||||
|
|
||||||
_sudo "to start the nix-daemon.service" \
|
_sudo "to start the nix-daemon.service" \
|
||||||
systemctl restart nix-daemon.service
|
systemctl restart nix-daemon.service
|
||||||
|
else
|
||||||
|
reminder "I don't support your init system yet; you may want to add nix-daemon manually."
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,3 +198,7 @@ poly_create_build_user() {
|
||||||
--password "!" \
|
--password "!" \
|
||||||
"$username"
|
"$username"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
poly_prepare_to_install() {
|
||||||
|
:
|
||||||
|
}
|
||||||
|
|
|
@ -46,15 +46,9 @@ case "$(uname -s).$(uname -m)" in
|
||||||
system=x86_64-darwin
|
system=x86_64-darwin
|
||||||
;;
|
;;
|
||||||
Darwin.arm64|Darwin.aarch64)
|
Darwin.arm64|Darwin.aarch64)
|
||||||
# check for Rosetta 2 support
|
hash=@binaryTarball_aarch64-darwin@
|
||||||
if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then
|
path=@tarballPath_aarch64-darwin@
|
||||||
oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation"
|
system=aarch64-darwin
|
||||||
fi
|
|
||||||
|
|
||||||
hash=@binaryTarball_x86_64-darwin@
|
|
||||||
path=@tarballPath_x86_64-darwin@
|
|
||||||
# eventually maybe: aarch64-darwin
|
|
||||||
system=x86_64-darwin
|
|
||||||
;;
|
;;
|
||||||
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
||||||
esac
|
esac
|
||||||
|
|
|
@ -277,6 +277,15 @@ connected:
|
||||||
|
|
||||||
auto drv = store->readDerivation(*drvPath);
|
auto drv = store->readDerivation(*drvPath);
|
||||||
auto outputHashes = staticOutputHashes(*store, drv);
|
auto outputHashes = staticOutputHashes(*store, drv);
|
||||||
|
|
||||||
|
// Hijack the inputs paths of the derivation to include all the paths
|
||||||
|
// that come from the `inputDrvs` set.
|
||||||
|
// We don’t do that for the derivations whose `inputDrvs` is empty
|
||||||
|
// because
|
||||||
|
// 1. It’s not needed
|
||||||
|
// 2. Changing the `inputSrcs` set changes the associated output ids,
|
||||||
|
// which break CA derivations
|
||||||
|
if (!drv.inputDrvs.empty())
|
||||||
drv.inputSrcs = store->parseStorePathSet(inputs);
|
drv.inputSrcs = store->parseStorePathSet(inputs);
|
||||||
|
|
||||||
auto result = sshStore->buildDerivation(*drvPath, drv);
|
auto result = sshStore->buildDerivation(*drvPath, drv);
|
||||||
|
|
|
@ -54,7 +54,7 @@ void StoreCommand::run()
|
||||||
run(getStore());
|
run(getStore());
|
||||||
}
|
}
|
||||||
|
|
||||||
RealisedPathsCommand::RealisedPathsCommand(bool recursive)
|
BuiltPathsCommand::BuiltPathsCommand(bool recursive)
|
||||||
: recursive(recursive)
|
: recursive(recursive)
|
||||||
{
|
{
|
||||||
if (recursive)
|
if (recursive)
|
||||||
|
@ -81,39 +81,45 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void RealisedPathsCommand::run(ref<Store> store)
|
void BuiltPathsCommand::run(ref<Store> store)
|
||||||
{
|
{
|
||||||
std::vector<RealisedPath> paths;
|
BuiltPaths paths;
|
||||||
if (all) {
|
if (all) {
|
||||||
if (installables.size())
|
if (installables.size())
|
||||||
throw UsageError("'--all' does not expect arguments");
|
throw UsageError("'--all' does not expect arguments");
|
||||||
// XXX: Only uses opaque paths, ignores all the realisations
|
// XXX: Only uses opaque paths, ignores all the realisations
|
||||||
for (auto & p : store->queryAllValidPaths())
|
for (auto & p : store->queryAllValidPaths())
|
||||||
paths.push_back(p);
|
paths.push_back(BuiltPath::Opaque{p});
|
||||||
} else {
|
} else {
|
||||||
auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables);
|
paths = toBuiltPaths(store, realiseMode, operateOn, installables);
|
||||||
if (recursive) {
|
if (recursive) {
|
||||||
auto roots = std::move(pathSet);
|
// XXX: This only computes the store path closure, ignoring
|
||||||
pathSet = {};
|
// intermediate realisations
|
||||||
RealisedPath::closure(*store, roots, pathSet);
|
StorePathSet pathsRoots, pathsClosure;
|
||||||
|
for (auto & root: paths) {
|
||||||
|
auto rootFromThis = root.outPaths();
|
||||||
|
pathsRoots.insert(rootFromThis.begin(), rootFromThis.end());
|
||||||
|
}
|
||||||
|
store->computeFSClosure(pathsRoots, pathsClosure);
|
||||||
|
for (auto & path : pathsClosure)
|
||||||
|
paths.push_back(BuiltPath::Opaque{path});
|
||||||
}
|
}
|
||||||
for (auto & path : pathSet)
|
|
||||||
paths.push_back(path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
run(store, std::move(paths));
|
run(store, std::move(paths));
|
||||||
}
|
}
|
||||||
|
|
||||||
StorePathsCommand::StorePathsCommand(bool recursive)
|
StorePathsCommand::StorePathsCommand(bool recursive)
|
||||||
: RealisedPathsCommand(recursive)
|
: BuiltPathsCommand(recursive)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths)
|
void StorePathsCommand::run(ref<Store> store, BuiltPaths paths)
|
||||||
{
|
{
|
||||||
StorePaths storePaths;
|
StorePaths storePaths;
|
||||||
for (auto & p : paths)
|
for (auto& builtPath : paths)
|
||||||
storePaths.push_back(p.path());
|
for (auto& p : builtPath.outPaths())
|
||||||
|
storePaths.push_back(p);
|
||||||
|
|
||||||
run(store, std::move(storePaths));
|
run(store, std::move(storePaths));
|
||||||
}
|
}
|
||||||
|
@ -162,7 +168,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
|
||||||
profile2, storePath));
|
profile2, storePath));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MixProfile::updateProfile(const Buildables & buildables)
|
void MixProfile::updateProfile(const BuiltPaths & buildables)
|
||||||
{
|
{
|
||||||
if (!profile) return;
|
if (!profile) return;
|
||||||
|
|
||||||
|
@ -170,18 +176,15 @@ void MixProfile::updateProfile(const Buildables & buildables)
|
||||||
|
|
||||||
for (auto & buildable : buildables) {
|
for (auto & buildable : buildables) {
|
||||||
std::visit(overloaded {
|
std::visit(overloaded {
|
||||||
[&](BuildableOpaque bo) {
|
[&](BuiltPath::Opaque bo) {
|
||||||
result.push_back(bo.path);
|
result.push_back(bo.path);
|
||||||
},
|
},
|
||||||
[&](BuildableFromDrv bfd) {
|
[&](BuiltPath::Built bfd) {
|
||||||
for (auto & output : bfd.outputs) {
|
for (auto & output : bfd.outputs) {
|
||||||
/* Output path should be known because we just tried to
|
result.push_back(output.second);
|
||||||
build it. */
|
|
||||||
assert(output.second);
|
|
||||||
result.push_back(*output.second);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}, buildable);
|
}, buildable.raw());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result.size() != 1)
|
if (result.size() != 1)
|
||||||
|
|
|
@ -143,7 +143,7 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
/* A command that operates on zero or more store paths. */
|
/* A command that operates on zero or more store paths. */
|
||||||
struct RealisedPathsCommand : public InstallablesCommand
|
struct BuiltPathsCommand : public InstallablesCommand
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
|
||||||
|
@ -156,26 +156,26 @@ protected:
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
RealisedPathsCommand(bool recursive = false);
|
BuiltPathsCommand(bool recursive = false);
|
||||||
|
|
||||||
using StoreCommand::run;
|
using StoreCommand::run;
|
||||||
|
|
||||||
virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0;
|
virtual void run(ref<Store> store, BuiltPaths paths) = 0;
|
||||||
|
|
||||||
void run(ref<Store> store) override;
|
void run(ref<Store> store) override;
|
||||||
|
|
||||||
bool useDefaultInstallables() override { return !all; }
|
bool useDefaultInstallables() override { return !all; }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct StorePathsCommand : public RealisedPathsCommand
|
struct StorePathsCommand : public BuiltPathsCommand
|
||||||
{
|
{
|
||||||
StorePathsCommand(bool recursive = false);
|
StorePathsCommand(bool recursive = false);
|
||||||
|
|
||||||
using RealisedPathsCommand::run;
|
using BuiltPathsCommand::run;
|
||||||
|
|
||||||
virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0;
|
virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0;
|
||||||
|
|
||||||
void run(ref<Store> store, std::vector<RealisedPath> paths) override;
|
void run(ref<Store> store, BuiltPaths paths) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* A command that operates on exactly one store path. */
|
/* A command that operates on exactly one store path. */
|
||||||
|
@ -216,7 +216,7 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
|
||||||
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
|
return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
Buildables build(ref<Store> store, Realise mode,
|
BuiltPaths build(ref<Store> store, Realise mode,
|
||||||
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal);
|
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal);
|
||||||
|
|
||||||
std::set<StorePath> toStorePaths(ref<Store> store,
|
std::set<StorePath> toStorePaths(ref<Store> store,
|
||||||
|
@ -231,7 +231,7 @@ std::set<StorePath> toDerivations(ref<Store> store,
|
||||||
std::vector<std::shared_ptr<Installable>> installables,
|
std::vector<std::shared_ptr<Installable>> installables,
|
||||||
bool useDeriver = false);
|
bool useDeriver = false);
|
||||||
|
|
||||||
std::set<RealisedPath> toRealisedPaths(
|
BuiltPaths toBuiltPaths(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
Realise mode,
|
Realise mode,
|
||||||
OperateOn operateOn,
|
OperateOn operateOn,
|
||||||
|
@ -252,7 +252,7 @@ struct MixProfile : virtual StoreCommand
|
||||||
|
|
||||||
/* If 'profile' is set, make it point at the store path produced
|
/* If 'profile' is set, make it point at the store path produced
|
||||||
by 'buildables'. */
|
by 'buildables'. */
|
||||||
void updateProfile(const Buildables & buildables);
|
void updateProfile(const BuiltPaths & buildables);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MixDefaultProfile : MixProfile
|
struct MixDefaultProfile : MixProfile
|
||||||
|
|
|
@ -20,31 +20,6 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
nlohmann::json BuildableOpaque::toJSON(ref<Store> store) const {
|
|
||||||
nlohmann::json res;
|
|
||||||
res["path"] = store->printStorePath(path);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
nlohmann::json BuildableFromDrv::toJSON(ref<Store> store) const {
|
|
||||||
nlohmann::json res;
|
|
||||||
res["drvPath"] = store->printStorePath(drvPath);
|
|
||||||
for (const auto& [output, path] : outputs) {
|
|
||||||
res["outputs"][output] = path ? store->printStorePath(*path) : "";
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
nlohmann::json buildablesToJSON(const Buildables & buildables, ref<Store> store) {
|
|
||||||
auto res = nlohmann::json::array();
|
|
||||||
for (const Buildable & buildable : buildables) {
|
|
||||||
std::visit([&res, store](const auto & buildable) {
|
|
||||||
res.push_back(buildable.toJSON(store));
|
|
||||||
}, buildable);
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
void completeFlakeInputPath(
|
void completeFlakeInputPath(
|
||||||
ref<EvalState> evalState,
|
ref<EvalState> evalState,
|
||||||
const FlakeRef & flakeRef,
|
const FlakeRef & flakeRef,
|
||||||
|
@ -111,10 +86,11 @@ MixFlakeOptions::MixFlakeOptions()
|
||||||
|
|
||||||
addFlag({
|
addFlag({
|
||||||
.longName = "override-input",
|
.longName = "override-input",
|
||||||
.description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`).",
|
.description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`). This implies `--no-write-lock-file`.",
|
||||||
.category = category,
|
.category = category,
|
||||||
.labels = {"input-path", "flake-url"},
|
.labels = {"input-path", "flake-url"},
|
||||||
.handler = {[&](std::string inputPath, std::string flakeRef) {
|
.handler = {[&](std::string inputPath, std::string flakeRef) {
|
||||||
|
lockFlags.writeLockFile = false;
|
||||||
lockFlags.inputOverrides.insert_or_assign(
|
lockFlags.inputOverrides.insert_or_assign(
|
||||||
flake::parseInputPath(inputPath),
|
flake::parseInputPath(inputPath),
|
||||||
parseFlakeRef(flakeRef, absPath(".")));
|
parseFlakeRef(flakeRef, absPath(".")));
|
||||||
|
@ -309,9 +285,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Buildable Installable::toBuildable()
|
DerivedPath Installable::toDerivedPath()
|
||||||
{
|
{
|
||||||
auto buildables = toBuildables();
|
auto buildables = toDerivedPaths();
|
||||||
if (buildables.size() != 1)
|
if (buildables.size() != 1)
|
||||||
throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
|
throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
|
||||||
return std::move(buildables[0]);
|
return std::move(buildables[0]);
|
||||||
|
@ -345,22 +321,19 @@ struct InstallableStorePath : Installable
|
||||||
|
|
||||||
std::string what() override { return store->printStorePath(storePath); }
|
std::string what() override { return store->printStorePath(storePath); }
|
||||||
|
|
||||||
Buildables toBuildables() override
|
DerivedPaths toDerivedPaths() override
|
||||||
{
|
{
|
||||||
if (storePath.isDerivation()) {
|
if (storePath.isDerivation()) {
|
||||||
std::map<std::string, std::optional<StorePath>> outputs;
|
|
||||||
auto drv = store->readDerivation(storePath);
|
auto drv = store->readDerivation(storePath);
|
||||||
for (auto & [name, output] : drv.outputsAndOptPaths(*store))
|
|
||||||
outputs.emplace(name, output.second);
|
|
||||||
return {
|
return {
|
||||||
BuildableFromDrv {
|
DerivedPath::Built {
|
||||||
.drvPath = storePath,
|
.drvPath = storePath,
|
||||||
.outputs = std::move(outputs)
|
.outputs = drv.outputNames(),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
return {
|
return {
|
||||||
BuildableOpaque {
|
DerivedPath::Opaque {
|
||||||
.path = storePath,
|
.path = storePath,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -373,22 +346,22 @@ struct InstallableStorePath : Installable
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Buildables InstallableValue::toBuildables()
|
DerivedPaths InstallableValue::toDerivedPaths()
|
||||||
{
|
{
|
||||||
Buildables res;
|
DerivedPaths res;
|
||||||
|
|
||||||
std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs;
|
std::map<StorePath, std::set<std::string>> drvsToOutputs;
|
||||||
|
|
||||||
// Group by derivation, helps with .all in particular
|
// Group by derivation, helps with .all in particular
|
||||||
for (auto & drv : toDerivations()) {
|
for (auto & drv : toDerivations()) {
|
||||||
auto outputName = drv.outputName;
|
auto outputName = drv.outputName;
|
||||||
if (outputName == "")
|
if (outputName == "")
|
||||||
throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
|
throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
|
||||||
drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath);
|
drvsToOutputs[drv.drvPath].insert(outputName);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto & i : drvsToOutputs)
|
for (auto & i : drvsToOutputs)
|
||||||
res.push_back(BuildableFromDrv { i.first, i.second });
|
res.push_back(DerivedPath::Built { i.first, i.second });
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
@ -527,7 +500,11 @@ std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableF
|
||||||
auto root = cache->getRoot();
|
auto root = cache->getRoot();
|
||||||
|
|
||||||
for (auto & attrPath : getActualAttrPaths()) {
|
for (auto & attrPath : getActualAttrPaths()) {
|
||||||
auto attr = root->findAlongAttrPath(parseAttrPath(*state, attrPath));
|
auto attr = root->findAlongAttrPath(
|
||||||
|
parseAttrPath(*state, attrPath),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
|
||||||
if (!attr) continue;
|
if (!attr) continue;
|
||||||
|
|
||||||
if (!attr->isDerivation())
|
if (!attr->isDerivation())
|
||||||
|
@ -695,31 +672,67 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
|
||||||
return installables.front();
|
return installables.front();
|
||||||
}
|
}
|
||||||
|
|
||||||
Buildables build(ref<Store> store, Realise mode,
|
BuiltPaths getBuiltPaths(ref<Store> store, DerivedPaths hopefullyBuiltPaths)
|
||||||
|
{
|
||||||
|
BuiltPaths res;
|
||||||
|
for (auto& b : hopefullyBuiltPaths)
|
||||||
|
std::visit(
|
||||||
|
overloaded{
|
||||||
|
[&](DerivedPath::Opaque bo) {
|
||||||
|
res.push_back(BuiltPath::Opaque{bo.path});
|
||||||
|
},
|
||||||
|
[&](DerivedPath::Built bfd) {
|
||||||
|
OutputPathMap outputs;
|
||||||
|
auto drv = store->readDerivation(bfd.drvPath);
|
||||||
|
auto outputHashes = staticOutputHashes(*store, drv);
|
||||||
|
auto drvOutputs = drv.outputsAndOptPaths(*store);
|
||||||
|
for (auto& output : bfd.outputs) {
|
||||||
|
if (!outputHashes.count(output))
|
||||||
|
throw Error(
|
||||||
|
"the derivation '%s' doesn't have an output "
|
||||||
|
"named '%s'",
|
||||||
|
store->printStorePath(bfd.drvPath), output);
|
||||||
|
if (settings.isExperimentalFeatureEnabled(
|
||||||
|
"ca-derivations")) {
|
||||||
|
auto outputId =
|
||||||
|
DrvOutput{outputHashes.at(output), output};
|
||||||
|
auto realisation =
|
||||||
|
store->queryRealisation(outputId);
|
||||||
|
if (!realisation)
|
||||||
|
throw Error(
|
||||||
|
"cannot operate on an output of unbuilt "
|
||||||
|
"content-addresed derivation '%s'",
|
||||||
|
outputId.to_string());
|
||||||
|
outputs.insert_or_assign(
|
||||||
|
output, realisation->outPath);
|
||||||
|
} else {
|
||||||
|
// If ca-derivations isn't enabled, assume that
|
||||||
|
// the output path is statically known.
|
||||||
|
assert(drvOutputs.count(output));
|
||||||
|
assert(drvOutputs.at(output).second);
|
||||||
|
outputs.insert_or_assign(
|
||||||
|
output, *drvOutputs.at(output).second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res.push_back(BuiltPath::Built{bfd.drvPath, outputs});
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b.raw());
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
BuiltPaths build(ref<Store> store, Realise mode,
|
||||||
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
|
std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
|
||||||
{
|
{
|
||||||
if (mode == Realise::Nothing)
|
if (mode == Realise::Nothing)
|
||||||
settings.readOnlyMode = true;
|
settings.readOnlyMode = true;
|
||||||
|
|
||||||
Buildables buildables;
|
std::vector<DerivedPath> pathsToBuild;
|
||||||
|
|
||||||
std::vector<StorePathWithOutputs> pathsToBuild;
|
|
||||||
|
|
||||||
for (auto & i : installables) {
|
for (auto & i : installables) {
|
||||||
for (auto & b : i->toBuildables()) {
|
auto b = i->toDerivedPaths();
|
||||||
std::visit(overloaded {
|
pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
|
||||||
[&](BuildableOpaque bo) {
|
|
||||||
pathsToBuild.push_back({bo.path});
|
|
||||||
},
|
|
||||||
[&](BuildableFromDrv bfd) {
|
|
||||||
StringSet outputNames;
|
|
||||||
for (auto & output : bfd.outputs)
|
|
||||||
outputNames.insert(output.first);
|
|
||||||
pathsToBuild.push_back({bfd.drvPath, outputNames});
|
|
||||||
},
|
|
||||||
}, b);
|
|
||||||
buildables.push_back(std::move(b));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mode == Realise::Nothing)
|
if (mode == Realise::Nothing)
|
||||||
|
@ -727,59 +740,26 @@ Buildables build(ref<Store> store, Realise mode,
|
||||||
else if (mode == Realise::Outputs)
|
else if (mode == Realise::Outputs)
|
||||||
store->buildPaths(pathsToBuild, bMode);
|
store->buildPaths(pathsToBuild, bMode);
|
||||||
|
|
||||||
return buildables;
|
return getBuiltPaths(store, pathsToBuild);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<RealisedPath> toRealisedPaths(
|
BuiltPaths toBuiltPaths(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
Realise mode,
|
Realise mode,
|
||||||
OperateOn operateOn,
|
OperateOn operateOn,
|
||||||
std::vector<std::shared_ptr<Installable>> installables)
|
std::vector<std::shared_ptr<Installable>> installables)
|
||||||
{
|
{
|
||||||
std::set<RealisedPath> res;
|
|
||||||
if (operateOn == OperateOn::Output) {
|
if (operateOn == OperateOn::Output) {
|
||||||
for (auto & b : build(store, mode, installables))
|
return build(store, mode, installables);
|
||||||
std::visit(overloaded {
|
|
||||||
[&](BuildableOpaque bo) {
|
|
||||||
res.insert(bo.path);
|
|
||||||
},
|
|
||||||
[&](BuildableFromDrv bfd) {
|
|
||||||
auto drv = store->readDerivation(bfd.drvPath);
|
|
||||||
auto outputHashes = staticOutputHashes(*store, drv);
|
|
||||||
for (auto & output : bfd.outputs) {
|
|
||||||
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
|
||||||
if (!outputHashes.count(output.first))
|
|
||||||
throw Error(
|
|
||||||
"the derivation '%s' doesn't have an output named '%s'",
|
|
||||||
store->printStorePath(bfd.drvPath),
|
|
||||||
output.first);
|
|
||||||
auto outputId = DrvOutput{outputHashes.at(output.first), output.first};
|
|
||||||
auto realisation = store->queryRealisation(outputId);
|
|
||||||
if (!realisation)
|
|
||||||
throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string());
|
|
||||||
res.insert(RealisedPath{*realisation});
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// If ca-derivations isn't enabled, behave as if
|
|
||||||
// all the paths are opaque to keep the default
|
|
||||||
// behavior
|
|
||||||
assert(output.second);
|
|
||||||
res.insert(*output.second);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}, b);
|
|
||||||
} else {
|
} else {
|
||||||
if (mode == Realise::Nothing)
|
if (mode == Realise::Nothing)
|
||||||
settings.readOnlyMode = true;
|
settings.readOnlyMode = true;
|
||||||
|
|
||||||
for (auto & i : installables)
|
BuiltPaths res;
|
||||||
for (auto & b : i->toBuildables())
|
for (auto & drvPath : toDerivations(store, installables, true))
|
||||||
if (auto bfd = std::get_if<BuildableFromDrv>(&b))
|
res.push_back(BuiltPath::Opaque{drvPath});
|
||||||
res.insert(bfd->drvPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
StorePathSet toStorePaths(ref<Store> store,
|
StorePathSet toStorePaths(ref<Store> store,
|
||||||
|
@ -787,8 +767,10 @@ StorePathSet toStorePaths(ref<Store> store,
|
||||||
std::vector<std::shared_ptr<Installable>> installables)
|
std::vector<std::shared_ptr<Installable>> installables)
|
||||||
{
|
{
|
||||||
StorePathSet outPaths;
|
StorePathSet outPaths;
|
||||||
for (auto & path : toRealisedPaths(store, mode, operateOn, installables))
|
for (auto & path : toBuiltPaths(store, mode, operateOn, installables)) {
|
||||||
outPaths.insert(path.path());
|
auto thisOutPaths = path.outPaths();
|
||||||
|
outPaths.insert(thisOutPaths.begin(), thisOutPaths.end());
|
||||||
|
}
|
||||||
return outPaths;
|
return outPaths;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -810,9 +792,9 @@ StorePathSet toDerivations(ref<Store> store,
|
||||||
StorePathSet drvPaths;
|
StorePathSet drvPaths;
|
||||||
|
|
||||||
for (auto & i : installables)
|
for (auto & i : installables)
|
||||||
for (auto & b : i->toBuildables())
|
for (auto & b : i->toDerivedPaths())
|
||||||
std::visit(overloaded {
|
std::visit(overloaded {
|
||||||
[&](BuildableOpaque bo) {
|
[&](DerivedPath::Opaque bo) {
|
||||||
if (!useDeriver)
|
if (!useDeriver)
|
||||||
throw Error("argument '%s' did not evaluate to a derivation", i->what());
|
throw Error("argument '%s' did not evaluate to a derivation", i->what());
|
||||||
auto derivers = store->queryValidDerivers(bo.path);
|
auto derivers = store->queryValidDerivers(bo.path);
|
||||||
|
@ -821,10 +803,10 @@ StorePathSet toDerivations(ref<Store> store,
|
||||||
// FIXME: use all derivers?
|
// FIXME: use all derivers?
|
||||||
drvPaths.insert(*derivers.begin());
|
drvPaths.insert(*derivers.begin());
|
||||||
},
|
},
|
||||||
[&](BuildableFromDrv bfd) {
|
[&](DerivedPath::Built bfd) {
|
||||||
drvPaths.insert(bfd.drvPath);
|
drvPaths.insert(bfd.drvPath);
|
||||||
},
|
},
|
||||||
}, b);
|
}, b.raw());
|
||||||
|
|
||||||
return drvPaths;
|
return drvPaths;
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,13 +2,13 @@
|
||||||
|
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "path.hh"
|
#include "path.hh"
|
||||||
|
#include "path-with-outputs.hh"
|
||||||
|
#include "derived-path.hh"
|
||||||
#include "eval.hh"
|
#include "eval.hh"
|
||||||
#include "flake/flake.hh"
|
#include "flake/flake.hh"
|
||||||
|
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
|
||||||
#include <nlohmann/json_fwd.hpp>
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
struct DrvInfo;
|
struct DrvInfo;
|
||||||
|
@ -16,25 +16,6 @@ struct SourceExprCommand;
|
||||||
|
|
||||||
namespace eval_cache { class EvalCache; class AttrCursor; }
|
namespace eval_cache { class EvalCache; class AttrCursor; }
|
||||||
|
|
||||||
struct BuildableOpaque {
|
|
||||||
StorePath path;
|
|
||||||
nlohmann::json toJSON(ref<Store> store) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct BuildableFromDrv {
|
|
||||||
StorePath drvPath;
|
|
||||||
std::map<std::string, std::optional<StorePath>> outputs;
|
|
||||||
nlohmann::json toJSON(ref<Store> store) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::variant<
|
|
||||||
BuildableOpaque,
|
|
||||||
BuildableFromDrv
|
|
||||||
> Buildable;
|
|
||||||
|
|
||||||
typedef std::vector<Buildable> Buildables;
|
|
||||||
nlohmann::json buildablesToJSON(const Buildables & buildables, ref<Store> store);
|
|
||||||
|
|
||||||
struct App
|
struct App
|
||||||
{
|
{
|
||||||
std::vector<StorePathWithOutputs> context;
|
std::vector<StorePathWithOutputs> context;
|
||||||
|
@ -42,17 +23,23 @@ struct App
|
||||||
// FIXME: add args, sandbox settings, metadata, ...
|
// FIXME: add args, sandbox settings, metadata, ...
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct UnresolvedApp
|
||||||
|
{
|
||||||
|
App unresolved;
|
||||||
|
App resolve(ref<Store>);
|
||||||
|
};
|
||||||
|
|
||||||
struct Installable
|
struct Installable
|
||||||
{
|
{
|
||||||
virtual ~Installable() { }
|
virtual ~Installable() { }
|
||||||
|
|
||||||
virtual std::string what() = 0;
|
virtual std::string what() = 0;
|
||||||
|
|
||||||
virtual Buildables toBuildables() = 0;
|
virtual DerivedPaths toDerivedPaths() = 0;
|
||||||
|
|
||||||
Buildable toBuildable();
|
DerivedPath toDerivedPath();
|
||||||
|
|
||||||
App toApp(EvalState & state);
|
UnresolvedApp toApp(EvalState & state);
|
||||||
|
|
||||||
virtual std::pair<Value *, Pos> toValue(EvalState & state)
|
virtual std::pair<Value *, Pos> toValue(EvalState & state)
|
||||||
{
|
{
|
||||||
|
@ -93,7 +80,7 @@ struct InstallableValue : Installable
|
||||||
|
|
||||||
virtual std::vector<DerivationInfo> toDerivations() = 0;
|
virtual std::vector<DerivationInfo> toDerivations() = 0;
|
||||||
|
|
||||||
Buildables toBuildables() override;
|
DerivedPaths toDerivedPaths() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct InstallableFlake : InstallableValue
|
struct InstallableFlake : InstallableValue
|
||||||
|
|
|
@ -35,6 +35,7 @@ class Bindings
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
typedef uint32_t size_t;
|
typedef uint32_t size_t;
|
||||||
|
Pos *pos;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
size_t size_, capacity_;
|
size_t size_, capacity_;
|
||||||
|
|
|
@ -486,11 +486,11 @@ std::shared_ptr<AttrCursor> AttrCursor::getAttr(std::string_view name)
|
||||||
return getAttr(root->state.symbols.create(name));
|
return getAttr(root->state.symbols.create(name));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<AttrCursor> AttrCursor::findAlongAttrPath(const std::vector<Symbol> & attrPath)
|
std::shared_ptr<AttrCursor> AttrCursor::findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force)
|
||||||
{
|
{
|
||||||
auto res = shared_from_this();
|
auto res = shared_from_this();
|
||||||
for (auto & attr : attrPath) {
|
for (auto & attr : attrPath) {
|
||||||
res = res->maybeGetAttr(attr);
|
res = res->maybeGetAttr(attr, force);
|
||||||
if (!res) return {};
|
if (!res) return {};
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
|
|
|
@ -102,7 +102,7 @@ public:
|
||||||
|
|
||||||
std::shared_ptr<AttrCursor> getAttr(std::string_view name);
|
std::shared_ptr<AttrCursor> getAttr(std::string_view name);
|
||||||
|
|
||||||
std::shared_ptr<AttrCursor> findAlongAttrPath(const std::vector<Symbol> & attrPath);
|
std::shared_ptr<AttrCursor> findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force = false);
|
||||||
|
|
||||||
std::string getString();
|
std::string getString();
|
||||||
|
|
||||||
|
|
|
@ -201,6 +201,15 @@ string showType(const Value & v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pos Value::determinePos(const Pos &pos) const
|
||||||
|
{
|
||||||
|
switch (internalType) {
|
||||||
|
case tAttrs: return *attrs->pos;
|
||||||
|
case tLambda: return lambda.fun->pos;
|
||||||
|
case tApp: return app.left->determinePos(pos);
|
||||||
|
default: return pos;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool Value::isTrivial() const
|
bool Value::isTrivial() const
|
||||||
{
|
{
|
||||||
|
@ -1060,6 +1069,8 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
|
||||||
v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos));
|
v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos));
|
||||||
v.attrs->sort(); // FIXME: inefficient
|
v.attrs->sort(); // FIXME: inefficient
|
||||||
}
|
}
|
||||||
|
|
||||||
|
v.attrs->pos = &pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -2091,9 +2102,12 @@ Strings EvalSettings::getDefaultNixPath()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (!evalSettings.restrictEval && !evalSettings.pureEval) {
|
||||||
add(getHome() + "/.nix-defexpr/channels");
|
add(getHome() + "/.nix-defexpr/channels");
|
||||||
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
|
add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
|
||||||
add(settings.nixStateDir + "/profiles/per-user/root/channels");
|
add(settings.nixStateDir + "/profiles/per-user/root/channels");
|
||||||
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,9 @@ static TrustedList readTrustedList()
|
||||||
|
|
||||||
static void writeTrustedList(const TrustedList & trustedList)
|
static void writeTrustedList(const TrustedList & trustedList)
|
||||||
{
|
{
|
||||||
writeFile(trustedListPath(), nlohmann::json(trustedList).dump());
|
auto path = trustedListPath();
|
||||||
|
createDirs(dirOf(path));
|
||||||
|
writeFile(path, nlohmann::json(trustedList).dump());
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConfigFile::apply()
|
void ConfigFile::apply()
|
||||||
|
|
|
@ -113,7 +113,7 @@ struct LockFlags
|
||||||
/* Whether to commit changes to flake.lock. */
|
/* Whether to commit changes to flake.lock. */
|
||||||
bool commitLockFile = false;
|
bool commitLockFile = false;
|
||||||
|
|
||||||
/* Flake inputs to be overriden. */
|
/* Flake inputs to be overridden. */
|
||||||
std::map<InputPath, FlakeRef> inputOverrides;
|
std::map<InputPath, FlakeRef> inputOverrides;
|
||||||
|
|
||||||
/* Flake inputs to be updated. This means that any existing lock
|
/* Flake inputs to be updated. This means that any existing lock
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#include "util.hh"
|
#include "util.hh"
|
||||||
#include "eval-inline.hh"
|
#include "eval-inline.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
#include "path-with-outputs.hh"
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <regex>
|
#include <regex>
|
||||||
|
@ -19,7 +20,7 @@ DrvInfo::DrvInfo(EvalState & state, const string & attrPath, Bindings * attrs)
|
||||||
DrvInfo::DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs)
|
DrvInfo::DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs)
|
||||||
: state(&state), attrs(nullptr), attrPath("")
|
: state(&state), attrs(nullptr), attrPath("")
|
||||||
{
|
{
|
||||||
auto [drvPath, selectedOutputs] = store->parsePathWithOutputs(drvPathWithOutputs);
|
auto [drvPath, selectedOutputs] = parsePathWithOutputs(*store, drvPathWithOutputs);
|
||||||
|
|
||||||
this->drvPath = store->printStorePath(drvPath);
|
this->drvPath = store->printStorePath(drvPath);
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/lib
|
||||||
libexpr_LIBS = libutil libstore libfetchers
|
libexpr_LIBS = libutil libstore libfetchers
|
||||||
|
|
||||||
libexpr_LDFLAGS = -lboost_context
|
libexpr_LDFLAGS = -lboost_context
|
||||||
ifneq ($(OS), FreeBSD)
|
ifeq ($(OS), Linux)
|
||||||
libexpr_LDFLAGS += -ldl
|
libexpr_LDFLAGS += -ldl
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
|
@ -180,6 +180,7 @@ struct ExprOpHasAttr : Expr
|
||||||
struct ExprAttrs : Expr
|
struct ExprAttrs : Expr
|
||||||
{
|
{
|
||||||
bool recursive;
|
bool recursive;
|
||||||
|
Pos pos;
|
||||||
struct AttrDef {
|
struct AttrDef {
|
||||||
bool inherited;
|
bool inherited;
|
||||||
Expr * e;
|
Expr * e;
|
||||||
|
@ -199,7 +200,8 @@ struct ExprAttrs : Expr
|
||||||
};
|
};
|
||||||
typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
|
typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
|
||||||
DynamicAttrDefs dynamicAttrs;
|
DynamicAttrDefs dynamicAttrs;
|
||||||
ExprAttrs() : recursive(false) { };
|
ExprAttrs(const Pos &pos) : recursive(false), pos(pos) { };
|
||||||
|
ExprAttrs() : recursive(false), pos(noPos) { };
|
||||||
COMMON_METHODS
|
COMMON_METHODS
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -478,7 +478,7 @@ binds
|
||||||
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
|
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
| { $$ = new ExprAttrs; }
|
| { $$ = new ExprAttrs(makeCurPos(@0, data)); }
|
||||||
;
|
;
|
||||||
|
|
||||||
attrs
|
attrs
|
||||||
|
|
|
@ -21,6 +21,8 @@
|
||||||
#include <regex>
|
#include <regex>
|
||||||
#include <dlfcn.h>
|
#include <dlfcn.h>
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -35,7 +37,7 @@ InvalidPathError::InvalidPathError(const Path & path) :
|
||||||
|
|
||||||
void EvalState::realiseContext(const PathSet & context)
|
void EvalState::realiseContext(const PathSet & context)
|
||||||
{
|
{
|
||||||
std::vector<StorePathWithOutputs> drvs;
|
std::vector<DerivedPath::Built> drvs;
|
||||||
|
|
||||||
for (auto & i : context) {
|
for (auto & i : context) {
|
||||||
auto [ctxS, outputName] = decodeContext(i);
|
auto [ctxS, outputName] = decodeContext(i);
|
||||||
|
@ -43,7 +45,7 @@ void EvalState::realiseContext(const PathSet & context)
|
||||||
if (!store->isValidPath(ctx))
|
if (!store->isValidPath(ctx))
|
||||||
throw InvalidPathError(store->printStorePath(ctx));
|
throw InvalidPathError(store->printStorePath(ctx));
|
||||||
if (!outputName.empty() && ctx.isDerivation()) {
|
if (!outputName.empty() && ctx.isDerivation()) {
|
||||||
drvs.push_back(StorePathWithOutputs{ctx, {outputName}});
|
drvs.push_back({ctx, {outputName}});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,14 +53,16 @@ void EvalState::realiseContext(const PathSet & context)
|
||||||
|
|
||||||
if (!evalSettings.enableImportFromDerivation)
|
if (!evalSettings.enableImportFromDerivation)
|
||||||
throw EvalError("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false",
|
throw EvalError("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false",
|
||||||
store->printStorePath(drvs.begin()->path));
|
store->printStorePath(drvs.begin()->drvPath));
|
||||||
|
|
||||||
/* For performance, prefetch all substitute info. */
|
/* For performance, prefetch all substitute info. */
|
||||||
StorePathSet willBuild, willSubstitute, unknown;
|
StorePathSet willBuild, willSubstitute, unknown;
|
||||||
uint64_t downloadSize, narSize;
|
uint64_t downloadSize, narSize;
|
||||||
store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
std::vector<DerivedPath> buildReqs;
|
||||||
|
for (auto & d : drvs) buildReqs.emplace_back(DerivedPath { d });
|
||||||
|
store->queryMissing(buildReqs, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||||
|
|
||||||
store->buildPaths(drvs);
|
store->buildPaths(buildReqs);
|
||||||
|
|
||||||
/* Add the output of this derivations to the allowed
|
/* Add the output of this derivations to the allowed
|
||||||
paths. */
|
paths. */
|
||||||
|
@ -545,18 +549,56 @@ typedef list<Value *> ValueList;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
static Bindings::iterator getAttr(
|
||||||
|
EvalState & state,
|
||||||
|
string funcName,
|
||||||
|
string attrName,
|
||||||
|
Bindings * attrSet,
|
||||||
|
const Pos & pos)
|
||||||
|
{
|
||||||
|
Bindings::iterator value = attrSet->find(state.symbols.create(attrName));
|
||||||
|
if (value == attrSet->end()) {
|
||||||
|
hintformat errorMsg = hintfmt(
|
||||||
|
"attribute '%s' missing for call to '%s'",
|
||||||
|
attrName,
|
||||||
|
funcName
|
||||||
|
);
|
||||||
|
|
||||||
|
Pos aPos = *attrSet->pos;
|
||||||
|
if (aPos == noPos) {
|
||||||
|
throw TypeError({
|
||||||
|
.msg = errorMsg,
|
||||||
|
.errPos = pos,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
auto e = TypeError({
|
||||||
|
.msg = errorMsg,
|
||||||
|
.errPos = aPos,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Adding another trace for the function name to make it clear
|
||||||
|
// which call received wrong arguments.
|
||||||
|
e.addTrace(pos, hintfmt("while invoking '%s'", funcName));
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
{
|
{
|
||||||
state.forceAttrs(*args[0], pos);
|
state.forceAttrs(*args[0], pos);
|
||||||
|
|
||||||
/* Get the start set. */
|
/* Get the start set. */
|
||||||
Bindings::iterator startSet =
|
Bindings::iterator startSet = getAttr(
|
||||||
args[0]->attrs->find(state.symbols.create("startSet"));
|
state,
|
||||||
if (startSet == args[0]->attrs->end())
|
"genericClosure",
|
||||||
throw EvalError({
|
"startSet",
|
||||||
.msg = hintfmt("attribute 'startSet' required"),
|
args[0]->attrs,
|
||||||
.errPos = pos
|
pos
|
||||||
});
|
);
|
||||||
|
|
||||||
state.forceList(*startSet->value, pos);
|
state.forceList(*startSet->value, pos);
|
||||||
|
|
||||||
ValueList workSet;
|
ValueList workSet;
|
||||||
|
@ -564,13 +606,14 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
|
||||||
workSet.push_back(startSet->value->listElems()[n]);
|
workSet.push_back(startSet->value->listElems()[n]);
|
||||||
|
|
||||||
/* Get the operator. */
|
/* Get the operator. */
|
||||||
Bindings::iterator op =
|
Bindings::iterator op = getAttr(
|
||||||
args[0]->attrs->find(state.symbols.create("operator"));
|
state,
|
||||||
if (op == args[0]->attrs->end())
|
"genericClosure",
|
||||||
throw EvalError({
|
"operator",
|
||||||
.msg = hintfmt("attribute 'operator' required"),
|
args[0]->attrs,
|
||||||
.errPos = pos
|
pos
|
||||||
});
|
);
|
||||||
|
|
||||||
state.forceValue(*op->value, pos);
|
state.forceValue(*op->value, pos);
|
||||||
|
|
||||||
/* Construct the closure by applying the operator to element of
|
/* Construct the closure by applying the operator to element of
|
||||||
|
@ -673,6 +716,44 @@ static RegisterPrimOp primop_addErrorContext(RegisterPrimOp::Info {
|
||||||
.fun = prim_addErrorContext,
|
.fun = prim_addErrorContext,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
static void prim_ceil(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
|
{
|
||||||
|
auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
|
||||||
|
mkInt(v, ceil(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
static RegisterPrimOp primop_ceil({
|
||||||
|
.name = "__ceil",
|
||||||
|
.args = {"double"},
|
||||||
|
.doc = R"(
|
||||||
|
Converts an IEEE-754 double-precision floating-point number (*double*) to
|
||||||
|
the next higher integer.
|
||||||
|
|
||||||
|
If the datatype is neither an integer nor a "float", an evaluation error will be
|
||||||
|
thrown.
|
||||||
|
)",
|
||||||
|
.fun = prim_ceil,
|
||||||
|
});
|
||||||
|
|
||||||
|
static void prim_floor(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
|
{
|
||||||
|
auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
|
||||||
|
mkInt(v, floor(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
static RegisterPrimOp primop_floor({
|
||||||
|
.name = "__floor",
|
||||||
|
.args = {"double"},
|
||||||
|
.doc = R"(
|
||||||
|
Converts an IEEE-754 double-precision floating-point number (*double*) to
|
||||||
|
the next lower integer.
|
||||||
|
|
||||||
|
If the datatype is neither an integer nor a "float", an evaluation error will be
|
||||||
|
thrown.
|
||||||
|
)",
|
||||||
|
.fun = prim_floor,
|
||||||
|
});
|
||||||
|
|
||||||
/* Try evaluating the argument. Success => {success=true; value=something;},
|
/* Try evaluating the argument. Success => {success=true; value=something;},
|
||||||
* else => {success=false; value=false;} */
|
* else => {success=false; value=false;} */
|
||||||
static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
|
@ -814,12 +895,14 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
||||||
state.forceAttrs(*args[0], pos);
|
state.forceAttrs(*args[0], pos);
|
||||||
|
|
||||||
/* Figure out the name first (for stack backtraces). */
|
/* Figure out the name first (for stack backtraces). */
|
||||||
Bindings::iterator attr = args[0]->attrs->find(state.sName);
|
Bindings::iterator attr = getAttr(
|
||||||
if (attr == args[0]->attrs->end())
|
state,
|
||||||
throw EvalError({
|
"derivationStrict",
|
||||||
.msg = hintfmt("required attribute 'name' missing"),
|
state.sName,
|
||||||
.errPos = pos
|
args[0]->attrs,
|
||||||
});
|
pos
|
||||||
|
);
|
||||||
|
|
||||||
string drvName;
|
string drvName;
|
||||||
Pos & posDrvName(*attr->pos);
|
Pos & posDrvName(*attr->pos);
|
||||||
try {
|
try {
|
||||||
|
@ -951,7 +1034,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
auto s = state.coerceToString(posDrvName, *i->value, context, true);
|
auto s = state.coerceToString(*i->pos, *i->value, context, true);
|
||||||
drv.env.emplace(key, s);
|
drv.env.emplace(key, s);
|
||||||
if (i->name == state.sBuilder) drv.builder = s;
|
if (i->name == state.sBuilder) drv.builder = s;
|
||||||
else if (i->name == state.sSystem) drv.platform = s;
|
else if (i->name == state.sSystem) drv.platform = s;
|
||||||
|
@ -1208,7 +1291,10 @@ static RegisterPrimOp primop_toPath({
|
||||||
static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
{
|
{
|
||||||
if (evalSettings.pureEval)
|
if (evalSettings.pureEval)
|
||||||
throw EvalError("builtins.storePath' is not allowed in pure evaluation mode");
|
throw EvalError({
|
||||||
|
.msg = hintfmt("'%s' is not allowed in pure evaluation mode", "builtins.storePath"),
|
||||||
|
.errPos = pos
|
||||||
|
});
|
||||||
|
|
||||||
PathSet context;
|
PathSet context;
|
||||||
Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context));
|
Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context));
|
||||||
|
@ -1367,12 +1453,13 @@ static void prim_findFile(EvalState & state, const Pos & pos, Value * * args, Va
|
||||||
if (i != v2.attrs->end())
|
if (i != v2.attrs->end())
|
||||||
prefix = state.forceStringNoCtx(*i->value, pos);
|
prefix = state.forceStringNoCtx(*i->value, pos);
|
||||||
|
|
||||||
i = v2.attrs->find(state.symbols.create("path"));
|
i = getAttr(
|
||||||
if (i == v2.attrs->end())
|
state,
|
||||||
throw EvalError({
|
"findFile",
|
||||||
.msg = hintfmt("attribute 'path' missing"),
|
"path",
|
||||||
.errPos = pos
|
v2.attrs,
|
||||||
});
|
pos
|
||||||
|
);
|
||||||
|
|
||||||
PathSet context;
|
PathSet context;
|
||||||
string path = state.coerceToString(pos, *i->value, context, false, false);
|
string path = state.coerceToString(pos, *i->value, context, false, false);
|
||||||
|
@ -1918,26 +2005,26 @@ static RegisterPrimOp primop_path({
|
||||||
An enrichment of the built-in path type, based on the attributes
|
An enrichment of the built-in path type, based on the attributes
|
||||||
present in *args*. All are optional except `path`:
|
present in *args*. All are optional except `path`:
|
||||||
|
|
||||||
- path
|
- path\
|
||||||
The underlying path.
|
The underlying path.
|
||||||
|
|
||||||
- name
|
- name\
|
||||||
The name of the path when added to the store. This can used to
|
The name of the path when added to the store. This can used to
|
||||||
reference paths that have nix-illegal characters in their names,
|
reference paths that have nix-illegal characters in their names,
|
||||||
like `@`.
|
like `@`.
|
||||||
|
|
||||||
- filter
|
- filter\
|
||||||
A function of the type expected by `builtins.filterSource`,
|
A function of the type expected by `builtins.filterSource`,
|
||||||
with the same semantics.
|
with the same semantics.
|
||||||
|
|
||||||
- recursive
|
- recursive\
|
||||||
When `false`, when `path` is added to the store it is with a
|
When `false`, when `path` is added to the store it is with a
|
||||||
flat hash, rather than a hash of the NAR serialization of the
|
flat hash, rather than a hash of the NAR serialization of the
|
||||||
file. Thus, `path` must refer to a regular file, not a
|
file. Thus, `path` must refer to a regular file, not a
|
||||||
directory. This allows similar behavior to `fetchurl`. Defaults
|
directory. This allows similar behavior to `fetchurl`. Defaults
|
||||||
to `true`.
|
to `true`.
|
||||||
|
|
||||||
- sha256
|
- sha256\
|
||||||
When provided, this is the expected hash of the file at the
|
When provided, this is the expected hash of the file at the
|
||||||
path. Evaluation will fail if the hash is incorrect, and
|
path. Evaluation will fail if the hash is incorrect, and
|
||||||
providing a hash allows `builtins.path` to be used even when the
|
providing a hash allows `builtins.path` to be used even when the
|
||||||
|
@ -2014,12 +2101,13 @@ void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||||
string attr = state.forceStringNoCtx(*args[0], pos);
|
string attr = state.forceStringNoCtx(*args[0], pos);
|
||||||
state.forceAttrs(*args[1], pos);
|
state.forceAttrs(*args[1], pos);
|
||||||
// !!! Should we create a symbol here or just do a lookup?
|
// !!! Should we create a symbol here or just do a lookup?
|
||||||
Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
|
Bindings::iterator i = getAttr(
|
||||||
if (i == args[1]->attrs->end())
|
state,
|
||||||
throw EvalError({
|
"getAttr",
|
||||||
.msg = hintfmt("attribute '%1%' missing", attr),
|
attr,
|
||||||
.errPos = pos
|
args[1]->attrs,
|
||||||
});
|
pos
|
||||||
|
);
|
||||||
// !!! add to stack trace?
|
// !!! add to stack trace?
|
||||||
if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
|
if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
|
||||||
state.forceValue(*i->value, pos);
|
state.forceValue(*i->value, pos);
|
||||||
|
@ -2146,22 +2234,25 @@ static void prim_listToAttrs(EvalState & state, const Pos & pos, Value * * args,
|
||||||
Value & v2(*args[0]->listElems()[i]);
|
Value & v2(*args[0]->listElems()[i]);
|
||||||
state.forceAttrs(v2, pos);
|
state.forceAttrs(v2, pos);
|
||||||
|
|
||||||
Bindings::iterator j = v2.attrs->find(state.sName);
|
Bindings::iterator j = getAttr(
|
||||||
if (j == v2.attrs->end())
|
state,
|
||||||
throw TypeError({
|
"listToAttrs",
|
||||||
.msg = hintfmt("'name' attribute missing in a call to 'listToAttrs'"),
|
state.sName,
|
||||||
.errPos = pos
|
v2.attrs,
|
||||||
});
|
pos
|
||||||
string name = state.forceStringNoCtx(*j->value, pos);
|
);
|
||||||
|
|
||||||
|
string name = state.forceStringNoCtx(*j->value, *j->pos);
|
||||||
|
|
||||||
Symbol sym = state.symbols.create(name);
|
Symbol sym = state.symbols.create(name);
|
||||||
if (seen.insert(sym).second) {
|
if (seen.insert(sym).second) {
|
||||||
Bindings::iterator j2 = v2.attrs->find(state.symbols.create(state.sValue));
|
Bindings::iterator j2 = getAttr(
|
||||||
if (j2 == v2.attrs->end())
|
state,
|
||||||
throw TypeError({
|
"listToAttrs",
|
||||||
.msg = hintfmt("'value' attribute missing in a call to 'listToAttrs'"),
|
state.sValue,
|
||||||
.errPos = pos
|
v2.attrs,
|
||||||
});
|
pos
|
||||||
|
);
|
||||||
v.attrs->push_back(Attr(sym, j2->value, j2->pos));
|
v.attrs->push_back(Attr(sym, j2->value, j2->pos));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2802,7 +2893,12 @@ static void prim_concatMap(EvalState & state, const Pos & pos, Value * * args, V
|
||||||
for (unsigned int n = 0; n < nrLists; ++n) {
|
for (unsigned int n = 0; n < nrLists; ++n) {
|
||||||
Value * vElem = args[1]->listElems()[n];
|
Value * vElem = args[1]->listElems()[n];
|
||||||
state.callFunction(*args[0], *vElem, lists[n], pos);
|
state.callFunction(*args[0], *vElem, lists[n], pos);
|
||||||
state.forceList(lists[n], pos);
|
try {
|
||||||
|
state.forceList(lists[n], lists[n].determinePos(args[0]->determinePos(pos)));
|
||||||
|
} catch (TypeError &e) {
|
||||||
|
e.addTrace(pos, hintfmt("while invoking '%s'", "concatMap"));
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
len += lists[n].listSize();
|
len += lists[n].listSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -303,17 +303,17 @@ static RegisterPrimOp primop_fetchGit({
|
||||||
of the repo at that URL is fetched. Otherwise, it can be an
|
of the repo at that URL is fetched. Otherwise, it can be an
|
||||||
attribute with the following attributes (all except `url` optional):
|
attribute with the following attributes (all except `url` optional):
|
||||||
|
|
||||||
- url
|
- url\
|
||||||
The URL of the repo.
|
The URL of the repo.
|
||||||
|
|
||||||
- name
|
- name\
|
||||||
The name of the directory the repo should be exported to in the
|
The name of the directory the repo should be exported to in the
|
||||||
store. Defaults to the basename of the URL.
|
store. Defaults to the basename of the URL.
|
||||||
|
|
||||||
- rev
|
- rev\
|
||||||
The git revision to fetch. Defaults to the tip of `ref`.
|
The git revision to fetch. Defaults to the tip of `ref`.
|
||||||
|
|
||||||
- ref
|
- ref\
|
||||||
The git ref to look for the requested revision under. This is
|
The git ref to look for the requested revision under. This is
|
||||||
often a branch or tag name. Defaults to `HEAD`.
|
often a branch or tag name. Defaults to `HEAD`.
|
||||||
|
|
||||||
|
@ -321,11 +321,11 @@ static RegisterPrimOp primop_fetchGit({
|
||||||
of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts
|
of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts
|
||||||
with `refs/`.
|
with `refs/`.
|
||||||
|
|
||||||
- submodules
|
- submodules\
|
||||||
A Boolean parameter that specifies whether submodules should be
|
A Boolean parameter that specifies whether submodules should be
|
||||||
checked out. Defaults to `false`.
|
checked out. Defaults to `false`.
|
||||||
|
|
||||||
- allRefs
|
- allRefs\
|
||||||
Whether to fetch all refs of the repository. With this argument being
|
Whether to fetch all refs of the repository. With this argument being
|
||||||
true, it's possible to load a `rev` from *any* `ref` (by default only
|
true, it's possible to load a `rev` from *any* `ref` (by default only
|
||||||
`rev`s from the specified `ref` are supported).
|
`rev`s from the specified `ref` are supported).
|
||||||
|
|
|
@ -341,6 +341,8 @@ public:
|
||||||
return internalType == tList1 ? 1 : internalType == tList2 ? 2 : bigList.size;
|
return internalType == tList1 ? 1 : internalType == tList2 ? 2 : bigList.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Pos determinePos(const Pos &pos) const;
|
||||||
|
|
||||||
/* Check whether forcing this value requires a trivial amount of
|
/* Check whether forcing this value requires a trivial amount of
|
||||||
computation. In particular, function applications are
|
computation. In particular, function applications are
|
||||||
non-trivial. */
|
non-trivial. */
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
|
|
||||||
#include <nlohmann/json_fwd.hpp>
|
#include <nlohmann/json_fwd.hpp>
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
namespace nix::fetchers {
|
namespace nix::fetchers {
|
||||||
|
|
||||||
typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;
|
typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;
|
||||||
|
|
|
@ -145,13 +145,7 @@ DownloadFileResult downloadFile(
|
||||||
bool immutable,
|
bool immutable,
|
||||||
const Headers & headers = {});
|
const Headers & headers = {});
|
||||||
|
|
||||||
struct DownloadTarballMeta
|
std::pair<Tree, time_t> downloadTarball(
|
||||||
{
|
|
||||||
time_t lastModified;
|
|
||||||
std::string effectiveUrl;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::pair<Tree, DownloadTarballMeta> downloadTarball(
|
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
const std::string & url,
|
const std::string & url,
|
||||||
const std::string & name,
|
const std::string & name,
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include "url-parts.hh"
|
#include "url-parts.hh"
|
||||||
|
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
|
#include <sys/wait.h>
|
||||||
|
|
||||||
using namespace std::string_literals;
|
using namespace std::string_literals;
|
||||||
|
|
||||||
|
@ -153,12 +154,14 @@ struct GitInputScheme : InputScheme
|
||||||
|
|
||||||
std::pair<bool, std::string> getActualUrl(const Input & input) const
|
std::pair<bool, std::string> getActualUrl(const Input & input) const
|
||||||
{
|
{
|
||||||
// Don't clone file:// URIs (but otherwise treat them the
|
// file:// URIs are normally not cloned (but otherwise treated the
|
||||||
// same as remote URIs, i.e. don't use the working tree or
|
// same as remote URIs, i.e. we don't use the working tree or
|
||||||
// HEAD).
|
// HEAD). Exception: If _NIX_FORCE_HTTP is set, or the repo is a bare git
|
||||||
|
// repo, treat as a remote URI to force a clone.
|
||||||
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing
|
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing
|
||||||
auto url = parseURL(getStrAttr(input.attrs, "url"));
|
auto url = parseURL(getStrAttr(input.attrs, "url"));
|
||||||
bool isLocal = url.scheme == "file" && !forceHttp;
|
bool isBareRepository = url.scheme == "file" && !pathExists(url.path + "/.git");
|
||||||
|
bool isLocal = url.scheme == "file" && !forceHttp && !isBareRepository;
|
||||||
return {isLocal, isLocal ? url.path : url.base};
|
return {isLocal, isLocal ? url.path : url.base};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,6 +365,8 @@ struct GitInputScheme : InputScheme
|
||||||
auto fetchRef = allRefs
|
auto fetchRef = allRefs
|
||||||
? "refs/*"
|
? "refs/*"
|
||||||
: ref->compare(0, 5, "refs/") == 0
|
: ref->compare(0, 5, "refs/") == 0
|
||||||
|
? *ref
|
||||||
|
: ref == "HEAD"
|
||||||
? *ref
|
? *ref
|
||||||
: "refs/heads/" + *ref;
|
: "refs/heads/" + *ref;
|
||||||
runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
|
runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
|
||||||
|
|
|
@ -207,16 +207,16 @@ struct GitArchiveInputScheme : InputScheme
|
||||||
|
|
||||||
auto url = getDownloadUrl(input);
|
auto url = getDownloadUrl(input);
|
||||||
|
|
||||||
auto [tree, meta] = downloadTarball(store, url.url, "source", true, url.headers);
|
auto [tree, lastModified] = downloadTarball(store, url.url, "source", true, url.headers);
|
||||||
|
|
||||||
input.attrs.insert_or_assign("lastModified", uint64_t(meta.lastModified));
|
input.attrs.insert_or_assign("lastModified", uint64_t(lastModified));
|
||||||
|
|
||||||
getCache()->add(
|
getCache()->add(
|
||||||
store,
|
store,
|
||||||
immutableAttrs,
|
immutableAttrs,
|
||||||
{
|
{
|
||||||
{"rev", rev->gitRev()},
|
{"rev", rev->gitRev()},
|
||||||
{"lastModified", uint64_t(meta.lastModified)}
|
{"lastModified", uint64_t(lastModified)}
|
||||||
},
|
},
|
||||||
tree.storePath,
|
tree.storePath,
|
||||||
true);
|
true);
|
||||||
|
|
|
@ -114,7 +114,7 @@ static std::shared_ptr<Registry> getSystemRegistry()
|
||||||
|
|
||||||
Path getUserRegistryPath()
|
Path getUserRegistryPath()
|
||||||
{
|
{
|
||||||
return getHome() + "/.config/nix/registry.json";
|
return getConfigDir() + "/nix/registry.json";
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<Registry> getUserRegistry()
|
std::shared_ptr<Registry> getUserRegistry()
|
||||||
|
|
|
@ -109,7 +109,7 @@ DownloadFileResult downloadFile(
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<Tree, DownloadTarballMeta> downloadTarball(
|
std::pair<Tree, time_t> downloadTarball(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
const std::string & url,
|
const std::string & url,
|
||||||
const std::string & name,
|
const std::string & name,
|
||||||
|
@ -127,10 +127,7 @@ std::pair<Tree, DownloadTarballMeta> downloadTarball(
|
||||||
if (cached && !cached->expired)
|
if (cached && !cached->expired)
|
||||||
return {
|
return {
|
||||||
Tree(store->toRealPath(cached->storePath), std::move(cached->storePath)),
|
Tree(store->toRealPath(cached->storePath), std::move(cached->storePath)),
|
||||||
{
|
getIntAttr(cached->infoAttrs, "lastModified")
|
||||||
.lastModified = time_t(getIntAttr(cached->infoAttrs, "lastModified")),
|
|
||||||
.effectiveUrl = maybeGetStrAttr(cached->infoAttrs, "effectiveUrl").value_or(url),
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto res = downloadFile(store, url, name, immutable, headers);
|
auto res = downloadFile(store, url, name, immutable, headers);
|
||||||
|
@ -155,7 +152,6 @@ std::pair<Tree, DownloadTarballMeta> downloadTarball(
|
||||||
|
|
||||||
Attrs infoAttrs({
|
Attrs infoAttrs({
|
||||||
{"lastModified", uint64_t(lastModified)},
|
{"lastModified", uint64_t(lastModified)},
|
||||||
{"effectiveUrl", res.effectiveUrl},
|
|
||||||
{"etag", res.etag},
|
{"etag", res.etag},
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -168,10 +164,7 @@ std::pair<Tree, DownloadTarballMeta> downloadTarball(
|
||||||
|
|
||||||
return {
|
return {
|
||||||
Tree(store->toRealPath(*unpackedStorePath), std::move(*unpackedStorePath)),
|
Tree(store->toRealPath(*unpackedStorePath), std::move(*unpackedStorePath)),
|
||||||
{
|
lastModified,
|
||||||
.lastModified = lastModified,
|
|
||||||
.effectiveUrl = res.effectiveUrl,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +178,8 @@ struct TarballInputScheme : InputScheme
|
||||||
&& !hasSuffix(url.path, ".tar")
|
&& !hasSuffix(url.path, ".tar")
|
||||||
&& !hasSuffix(url.path, ".tar.gz")
|
&& !hasSuffix(url.path, ".tar.gz")
|
||||||
&& !hasSuffix(url.path, ".tar.xz")
|
&& !hasSuffix(url.path, ".tar.xz")
|
||||||
&& !hasSuffix(url.path, ".tar.bz2"))
|
&& !hasSuffix(url.path, ".tar.bz2")
|
||||||
|
&& !hasSuffix(url.path, ".tar.zst"))
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
Input input;
|
Input input;
|
||||||
|
@ -230,11 +224,9 @@ struct TarballInputScheme : InputScheme
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
|
std::pair<Tree, Input> fetch(ref<Store> store, const Input & input) override
|
||||||
{
|
{
|
||||||
Input input(_input);
|
auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false).first;
|
||||||
auto [tree, meta] = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false);
|
|
||||||
input.attrs.insert_or_assign("url", meta.effectiveUrl);
|
|
||||||
return {std::move(tree), input};
|
return {std::move(tree), input};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -122,6 +122,7 @@ public:
|
||||||
|
|
||||||
void log(Verbosity lvl, const FormatOrString & fs) override
|
void log(Verbosity lvl, const FormatOrString & fs) override
|
||||||
{
|
{
|
||||||
|
if (lvl > verbosity) return;
|
||||||
auto state(state_.lock());
|
auto state(state_.lock());
|
||||||
log(*state, lvl, fs.s);
|
log(*state, lvl, fs.s);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ void printGCWarning()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void printMissing(ref<Store> store, const std::vector<StorePathWithOutputs> & paths, Verbosity lvl)
|
void printMissing(ref<Store> store, const std::vector<DerivedPath> & paths, Verbosity lvl)
|
||||||
{
|
{
|
||||||
uint64_t downloadSize, narSize;
|
uint64_t downloadSize, narSize;
|
||||||
StorePathSet willBuild, willSubstitute, unknown;
|
StorePathSet willBuild, willSubstitute, unknown;
|
||||||
|
@ -310,7 +310,7 @@ void printVersion(const string & programName)
|
||||||
|
|
||||||
void showManPage(const string & name)
|
void showManPage(const string & name)
|
||||||
{
|
{
|
||||||
restoreSignals();
|
restoreProcessContext();
|
||||||
setenv("MANPATH", settings.nixManDir.c_str(), 1);
|
setenv("MANPATH", settings.nixManDir.c_str(), 1);
|
||||||
execlp("man", "man", name.c_str(), nullptr);
|
execlp("man", "man", name.c_str(), nullptr);
|
||||||
throw SysError("command 'man %1%' failed", name.c_str());
|
throw SysError("command 'man %1%' failed", name.c_str());
|
||||||
|
@ -373,7 +373,7 @@ RunPager::RunPager()
|
||||||
throw SysError("dupping stdin");
|
throw SysError("dupping stdin");
|
||||||
if (!getenv("LESS"))
|
if (!getenv("LESS"))
|
||||||
setenv("LESS", "FRSXMK", 1);
|
setenv("LESS", "FRSXMK", 1);
|
||||||
restoreSignals();
|
restoreProcessContext();
|
||||||
if (pager)
|
if (pager)
|
||||||
execl("/bin/sh", "sh", "-c", pager, nullptr);
|
execl("/bin/sh", "sh", "-c", pager, nullptr);
|
||||||
execlp("pager", "pager", nullptr);
|
execlp("pager", "pager", nullptr);
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#include "args.hh"
|
#include "args.hh"
|
||||||
#include "common-args.hh"
|
#include "common-args.hh"
|
||||||
#include "path.hh"
|
#include "path.hh"
|
||||||
|
#include "derived-path.hh"
|
||||||
|
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
|
|
||||||
|
@ -42,7 +43,7 @@ struct StorePathWithOutputs;
|
||||||
|
|
||||||
void printMissing(
|
void printMissing(
|
||||||
ref<Store> store,
|
ref<Store> store,
|
||||||
const std::vector<StorePathWithOutputs> & paths,
|
const std::vector<DerivedPath> & paths,
|
||||||
Verbosity lvl = lvlInfo);
|
Verbosity lvl = lvlInfo);
|
||||||
|
|
||||||
void printMissing(ref<Store> store, const StorePathSet & willBuild,
|
void printMissing(ref<Store> store, const StorePathSet & willBuild,
|
||||||
|
|
|
@ -179,6 +179,9 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
|
||||||
narInfo->url = "nar/" + narInfo->fileHash->to_string(Base32, false) + ".nar"
|
narInfo->url = "nar/" + narInfo->fileHash->to_string(Base32, false) + ".nar"
|
||||||
+ (compression == "xz" ? ".xz" :
|
+ (compression == "xz" ? ".xz" :
|
||||||
compression == "bzip2" ? ".bz2" :
|
compression == "bzip2" ? ".bz2" :
|
||||||
|
compression == "zstd" ? ".zst" :
|
||||||
|
compression == "lzip" ? ".lzip" :
|
||||||
|
compression == "lz4" ? ".lz4" :
|
||||||
compression == "br" ? ".br" :
|
compression == "br" ? ".br" :
|
||||||
"");
|
"");
|
||||||
|
|
||||||
|
@ -447,18 +450,43 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
|
||||||
|
|
||||||
std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id)
|
std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id)
|
||||||
{
|
{
|
||||||
|
if (diskCache) {
|
||||||
|
auto [cacheOutcome, maybeCachedRealisation] =
|
||||||
|
diskCache->lookupRealisation(getUri(), id);
|
||||||
|
switch (cacheOutcome) {
|
||||||
|
case NarInfoDiskCache::oValid:
|
||||||
|
debug("Returning a cached realisation for %s", id.to_string());
|
||||||
|
return *maybeCachedRealisation;
|
||||||
|
case NarInfoDiskCache::oInvalid:
|
||||||
|
debug("Returning a cached missing realisation for %s", id.to_string());
|
||||||
|
return {};
|
||||||
|
case NarInfoDiskCache::oUnknown:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
|
auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
|
||||||
auto rawOutputInfo = getFile(outputInfoFilePath);
|
auto rawOutputInfo = getFile(outputInfoFilePath);
|
||||||
|
|
||||||
if (rawOutputInfo) {
|
if (rawOutputInfo) {
|
||||||
return {Realisation::fromJSON(
|
auto realisation = Realisation::fromJSON(
|
||||||
nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath)};
|
nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath);
|
||||||
|
|
||||||
|
if (diskCache)
|
||||||
|
diskCache->upsertRealisation(
|
||||||
|
getUri(), realisation);
|
||||||
|
|
||||||
|
return {realisation};
|
||||||
} else {
|
} else {
|
||||||
|
if (diskCache)
|
||||||
|
diskCache->upsertAbsentRealisation(getUri(), id);
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
|
void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
|
||||||
|
if (diskCache)
|
||||||
|
diskCache->upsertRealisation(getUri(), info);
|
||||||
auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
|
auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
|
||||||
upsertFile(filePath, info.toJSON().dump(), "application/json");
|
upsertFile(filePath, info.toJSON().dump(), "application/json");
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ private:
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
// The prefix under which realisation infos will be stored
|
// The prefix under which realisation infos will be stored
|
||||||
const std::string realisationsPrefix = "/realisations";
|
const std::string realisationsPrefix = "realisations";
|
||||||
|
|
||||||
BinaryCacheStore(const Params & params);
|
BinaryCacheStore(const Params & params);
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/socket.h>
|
#include <sys/socket.h>
|
||||||
#include <sys/un.h>
|
#include <sys/un.h>
|
||||||
|
#include <sys/wait.h>
|
||||||
#include <netdb.h>
|
#include <netdb.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
#include <termios.h>
|
#include <termios.h>
|
||||||
|
@ -73,7 +74,7 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath,
|
||||||
state = &DerivationGoal::getDerivation;
|
state = &DerivationGoal::getDerivation;
|
||||||
name = fmt(
|
name = fmt(
|
||||||
"building of '%s' from .drv file",
|
"building of '%s' from .drv file",
|
||||||
StorePathWithOutputs { drvPath, wantedOutputs }.to_string(worker.store));
|
DerivedPath::Built { drvPath, wantedOutputs }.to_string(worker.store));
|
||||||
trace("created");
|
trace("created");
|
||||||
|
|
||||||
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
|
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
|
||||||
|
@ -94,7 +95,7 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const BasicDerivation
|
||||||
state = &DerivationGoal::haveDerivation;
|
state = &DerivationGoal::haveDerivation;
|
||||||
name = fmt(
|
name = fmt(
|
||||||
"building of '%s' from in-memory derivation",
|
"building of '%s' from in-memory derivation",
|
||||||
StorePathWithOutputs { drvPath, drv.outputNames() }.to_string(worker.store));
|
DerivedPath::Built { drvPath, drv.outputNames() }.to_string(worker.store));
|
||||||
trace("created");
|
trace("created");
|
||||||
|
|
||||||
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
|
mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
|
||||||
|
@ -170,7 +171,7 @@ void DerivationGoal::getDerivation()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
addWaitee(upcast_goal(worker.makeSubstitutionGoal(drvPath)));
|
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath)));
|
||||||
|
|
||||||
state = &DerivationGoal::loadDerivation;
|
state = &DerivationGoal::loadDerivation;
|
||||||
}
|
}
|
||||||
|
@ -246,14 +247,19 @@ void DerivationGoal::haveDerivation()
|
||||||
through substitutes. If that doesn't work, we'll build
|
through substitutes. If that doesn't work, we'll build
|
||||||
them. */
|
them. */
|
||||||
if (settings.useSubstitutes && parsedDrv->substitutesAllowed())
|
if (settings.useSubstitutes && parsedDrv->substitutesAllowed())
|
||||||
for (auto & [_, status] : initialOutputs) {
|
for (auto & [outputName, status] : initialOutputs) {
|
||||||
if (!status.wanted) continue;
|
if (!status.wanted) continue;
|
||||||
if (!status.known) {
|
if (!status.known)
|
||||||
warn("do not know how to query for unknown floating content-addressed derivation output yet");
|
addWaitee(
|
||||||
/* Nothing to wait for; tail call */
|
upcast_goal(
|
||||||
return DerivationGoal::gaveUpOnSubstitution();
|
worker.makeDrvOutputSubstitutionGoal(
|
||||||
}
|
DrvOutput{status.outputHash, outputName},
|
||||||
addWaitee(upcast_goal(worker.makeSubstitutionGoal(
|
buildMode == bmRepair ? Repair : NoRepair
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
else
|
||||||
|
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(
|
||||||
status.known->path,
|
status.known->path,
|
||||||
buildMode == bmRepair ? Repair : NoRepair,
|
buildMode == bmRepair ? Repair : NoRepair,
|
||||||
getDerivationCA(*drv))));
|
getDerivationCA(*drv))));
|
||||||
|
@ -337,7 +343,7 @@ void DerivationGoal::gaveUpOnSubstitution()
|
||||||
if (!settings.useSubstitutes)
|
if (!settings.useSubstitutes)
|
||||||
throw Error("dependency '%s' of '%s' does not exist, and substitution is disabled",
|
throw Error("dependency '%s' of '%s' does not exist, and substitution is disabled",
|
||||||
worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
|
worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
|
||||||
addWaitee(upcast_goal(worker.makeSubstitutionGoal(i)));
|
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (waitees.empty()) /* to prevent hang (no wake-up event) */
|
if (waitees.empty()) /* to prevent hang (no wake-up event) */
|
||||||
|
@ -388,7 +394,7 @@ void DerivationGoal::repairClosure()
|
||||||
worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
|
worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
|
||||||
auto drvPath2 = outputsToDrv.find(i);
|
auto drvPath2 = outputsToDrv.find(i);
|
||||||
if (drvPath2 == outputsToDrv.end())
|
if (drvPath2 == outputsToDrv.end())
|
||||||
addWaitee(upcast_goal(worker.makeSubstitutionGoal(i, Repair)));
|
addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i, Repair)));
|
||||||
else
|
else
|
||||||
addWaitee(worker.makeDerivationGoal(drvPath2->second, StringSet(), bmRepair));
|
addWaitee(worker.makeDerivationGoal(drvPath2->second, StringSet(), bmRepair));
|
||||||
}
|
}
|
||||||
|
@ -920,6 +926,9 @@ void DerivationGoal::resolvedFinished() {
|
||||||
if (realisation) {
|
if (realisation) {
|
||||||
auto newRealisation = *realisation;
|
auto newRealisation = *realisation;
|
||||||
newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput};
|
newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput};
|
||||||
|
newRealisation.signatures.clear();
|
||||||
|
newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
|
||||||
|
signRealisation(newRealisation);
|
||||||
worker.store.registerDrvOutput(newRealisation);
|
worker.store.registerDrvOutput(newRealisation);
|
||||||
} else {
|
} else {
|
||||||
// If we don't have a realisation, then it must mean that something
|
// If we don't have a realisation, then it must mean that something
|
||||||
|
@ -1243,9 +1252,12 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
|
||||||
void DerivationGoal::checkPathValidity()
|
void DerivationGoal::checkPathValidity()
|
||||||
{
|
{
|
||||||
bool checkHash = buildMode == bmRepair;
|
bool checkHash = buildMode == bmRepair;
|
||||||
|
auto wantedOutputsLeft = wantedOutputs;
|
||||||
for (auto & i : queryPartialDerivationOutputMap()) {
|
for (auto & i : queryPartialDerivationOutputMap()) {
|
||||||
InitialOutput & info = initialOutputs.at(i.first);
|
InitialOutput & info = initialOutputs.at(i.first);
|
||||||
info.wanted = wantOutput(i.first, wantedOutputs);
|
info.wanted = wantOutput(i.first, wantedOutputs);
|
||||||
|
if (info.wanted)
|
||||||
|
wantedOutputsLeft.erase(i.first);
|
||||||
if (i.second) {
|
if (i.second) {
|
||||||
auto outputPath = *i.second;
|
auto outputPath = *i.second;
|
||||||
info.known = {
|
info.known = {
|
||||||
|
@ -1258,15 +1270,33 @@ void DerivationGoal::checkPathValidity()
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||||
if (auto real = worker.store.queryRealisation(
|
auto drvOutput = DrvOutput{initialOutputs.at(i.first).outputHash, i.first};
|
||||||
DrvOutput{initialOutputs.at(i.first).outputHash, i.first})) {
|
if (auto real = worker.store.queryRealisation(drvOutput)) {
|
||||||
info.known = {
|
info.known = {
|
||||||
.path = real->outPath,
|
.path = real->outPath,
|
||||||
.status = PathStatus::Valid,
|
.status = PathStatus::Valid,
|
||||||
};
|
};
|
||||||
|
} else if (info.known && info.known->status == PathStatus::Valid) {
|
||||||
|
// We know the output because it' a static output of the
|
||||||
|
// derivation, and the output path is valid, but we don't have
|
||||||
|
// its realisation stored (probably because it has been built
|
||||||
|
// without the `ca-derivations` experimental flag)
|
||||||
|
worker.store.registerDrvOutput(
|
||||||
|
Realisation{
|
||||||
|
drvOutput,
|
||||||
|
info.known->path,
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// If we requested all the outputs via the empty set, we are always fine.
|
||||||
|
// If we requested specific elements, the loop above removes all the valid
|
||||||
|
// ones, so any that are left must be invalid.
|
||||||
|
if (!wantedOutputsLeft.empty())
|
||||||
|
throw Error("derivation '%s' does not have wanted outputs %s",
|
||||||
|
worker.store.printStorePath(drvPath),
|
||||||
|
concatStringsSep(", ", quoteStrings(wantedOutputsLeft)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -180,6 +180,9 @@ struct DerivationGoal : public Goal
|
||||||
/* Open a log file and a pipe to it. */
|
/* Open a log file and a pipe to it. */
|
||||||
Path openLogFile();
|
Path openLogFile();
|
||||||
|
|
||||||
|
/* Sign the newly built realisation if the store allows it */
|
||||||
|
virtual void signRealisation(Realisation&) {}
|
||||||
|
|
||||||
/* Close the log file. */
|
/* Close the log file. */
|
||||||
void closeLogFile();
|
void closeLogFile();
|
||||||
|
|
||||||
|
|
122
src/libstore/build/drv-output-substitution-goal.cc
Normal file
122
src/libstore/build/drv-output-substitution-goal.cc
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
#include "drv-output-substitution-goal.hh"
|
||||||
|
#include "worker.hh"
|
||||||
|
#include "substitution-goal.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||||
|
: Goal(worker)
|
||||||
|
, id(id)
|
||||||
|
{
|
||||||
|
state = &DrvOutputSubstitutionGoal::init;
|
||||||
|
name = fmt("substitution of '%s'", id.to_string());
|
||||||
|
trace("created");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DrvOutputSubstitutionGoal::init()
|
||||||
|
{
|
||||||
|
trace("init");
|
||||||
|
|
||||||
|
/* If the derivation already exists, we’re done */
|
||||||
|
if (worker.store.queryRealisation(id)) {
|
||||||
|
amDone(ecSuccess);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
|
||||||
|
tryNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
void DrvOutputSubstitutionGoal::tryNext()
|
||||||
|
{
|
||||||
|
trace("Trying next substituter");
|
||||||
|
|
||||||
|
if (subs.size() == 0) {
|
||||||
|
/* None left. Terminate this goal and let someone else deal
|
||||||
|
with it. */
|
||||||
|
debug("drv output '%s' is required, but there is no substituter that can provide it", id.to_string());
|
||||||
|
|
||||||
|
/* Hack: don't indicate failure if there were no substituters.
|
||||||
|
In that case the calling derivation should just do a
|
||||||
|
build. */
|
||||||
|
amDone(substituterFailed ? ecFailed : ecNoSubstituters);
|
||||||
|
|
||||||
|
if (substituterFailed) {
|
||||||
|
worker.failedSubstitutions++;
|
||||||
|
worker.updateProgress();
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto sub = subs.front();
|
||||||
|
subs.pop_front();
|
||||||
|
|
||||||
|
// FIXME: Make async
|
||||||
|
outputInfo = sub->queryRealisation(id);
|
||||||
|
if (!outputInfo) {
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
|
||||||
|
if (depId != id) {
|
||||||
|
if (auto localOutputInfo = worker.store.queryRealisation(depId);
|
||||||
|
localOutputInfo && localOutputInfo->outPath != depPath) {
|
||||||
|
warn(
|
||||||
|
"substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
|
||||||
|
"Local: %s\n"
|
||||||
|
"Remote: %s",
|
||||||
|
sub->getUri(),
|
||||||
|
depId.to_string(),
|
||||||
|
worker.store.printStorePath(localOutputInfo->outPath),
|
||||||
|
worker.store.printStorePath(depPath)
|
||||||
|
);
|
||||||
|
tryNext();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
|
||||||
|
|
||||||
|
if (waitees.empty()) outPathValid();
|
||||||
|
else state = &DrvOutputSubstitutionGoal::outPathValid;
|
||||||
|
}
|
||||||
|
|
||||||
|
void DrvOutputSubstitutionGoal::outPathValid()
|
||||||
|
{
|
||||||
|
assert(outputInfo);
|
||||||
|
trace("Output path substituted");
|
||||||
|
|
||||||
|
if (nrFailed > 0) {
|
||||||
|
debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
|
||||||
|
amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
worker.store.registerDrvOutput(*outputInfo);
|
||||||
|
finished();
|
||||||
|
}
|
||||||
|
|
||||||
|
void DrvOutputSubstitutionGoal::finished()
|
||||||
|
{
|
||||||
|
trace("finished");
|
||||||
|
amDone(ecSuccess);
|
||||||
|
}
|
||||||
|
|
||||||
|
string DrvOutputSubstitutionGoal::key()
|
||||||
|
{
|
||||||
|
/* "a$" ensures substitution goals happen before derivation
|
||||||
|
goals. */
|
||||||
|
return "a$" + std::string(id.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
void DrvOutputSubstitutionGoal::work()
|
||||||
|
{
|
||||||
|
(this->*state)();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
50
src/libstore/build/drv-output-substitution-goal.hh
Normal file
50
src/libstore/build/drv-output-substitution-goal.hh
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "goal.hh"
|
||||||
|
#include "realisation.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
class Worker;
|
||||||
|
|
||||||
|
// Substitution of a derivation output.
|
||||||
|
// This is done in three steps:
|
||||||
|
// 1. Fetch the output info from a substituter
|
||||||
|
// 2. Substitute the corresponding output path
|
||||||
|
// 3. Register the output info
|
||||||
|
class DrvOutputSubstitutionGoal : public Goal {
|
||||||
|
private:
|
||||||
|
// The drv output we're trying to substitue
|
||||||
|
DrvOutput id;
|
||||||
|
|
||||||
|
// The realisation corresponding to the given output id.
|
||||||
|
// Will be filled once we can get it.
|
||||||
|
std::optional<Realisation> outputInfo;
|
||||||
|
|
||||||
|
/* The remaining substituters. */
|
||||||
|
std::list<ref<Store>> subs;
|
||||||
|
|
||||||
|
/* Whether a substituter failed. */
|
||||||
|
bool substituterFailed = false;
|
||||||
|
|
||||||
|
public:
|
||||||
|
DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||||
|
|
||||||
|
typedef void (DrvOutputSubstitutionGoal::*GoalState)();
|
||||||
|
GoalState state;
|
||||||
|
|
||||||
|
void init();
|
||||||
|
void tryNext();
|
||||||
|
void outPathValid();
|
||||||
|
void finished();
|
||||||
|
|
||||||
|
void timedOut(Error && ex) override { abort(); };
|
||||||
|
|
||||||
|
string key() override;
|
||||||
|
|
||||||
|
void work() override;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -6,16 +6,20 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
void Store::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode)
|
void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMode)
|
||||||
{
|
{
|
||||||
Worker worker(*this);
|
Worker worker(*this);
|
||||||
|
|
||||||
Goals goals;
|
Goals goals;
|
||||||
for (auto & path : drvPaths) {
|
for (auto & br : reqs) {
|
||||||
if (path.path.isDerivation())
|
std::visit(overloaded {
|
||||||
goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode));
|
[&](DerivedPath::Built bfd) {
|
||||||
else
|
goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
|
||||||
goals.insert(worker.makeSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair));
|
},
|
||||||
|
[&](DerivedPath::Opaque bo) {
|
||||||
|
goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
|
||||||
|
},
|
||||||
|
}, br.raw());
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.run(goals);
|
worker.run(goals);
|
||||||
|
@ -31,7 +35,7 @@ void Store::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, Build
|
||||||
}
|
}
|
||||||
if (i->exitCode != Goal::ecSuccess) {
|
if (i->exitCode != Goal::ecSuccess) {
|
||||||
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
|
if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
|
||||||
else if (auto i2 = dynamic_cast<SubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
|
else if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,7 +94,7 @@ void Store::ensurePath(const StorePath & path)
|
||||||
if (isValidPath(path)) return;
|
if (isValidPath(path)) return;
|
||||||
|
|
||||||
Worker worker(*this);
|
Worker worker(*this);
|
||||||
GoalPtr goal = worker.makeSubstitutionGoal(path);
|
GoalPtr goal = worker.makePathSubstitutionGoal(path);
|
||||||
Goals goals = {goal};
|
Goals goals = {goal};
|
||||||
|
|
||||||
worker.run(goals);
|
worker.run(goals);
|
||||||
|
@ -108,7 +112,7 @@ void Store::ensurePath(const StorePath & path)
|
||||||
void LocalStore::repairPath(const StorePath & path)
|
void LocalStore::repairPath(const StorePath & path)
|
||||||
{
|
{
|
||||||
Worker worker(*this);
|
Worker worker(*this);
|
||||||
GoalPtr goal = worker.makeSubstitutionGoal(path, Repair);
|
GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
|
||||||
Goals goals = {goal};
|
Goals goals = {goal};
|
||||||
|
|
||||||
worker.run(goals);
|
worker.run(goals);
|
||||||
|
|
|
@ -78,6 +78,8 @@ void Goal::amDone(ExitCode result, std::optional<Error> ex)
|
||||||
}
|
}
|
||||||
waiters.clear();
|
waiters.clear();
|
||||||
worker.removeGoal(shared_from_this());
|
worker.removeGoal(shared_from_this());
|
||||||
|
|
||||||
|
cleanup();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -100,6 +100,8 @@ struct Goal : public std::enable_shared_from_this<Goal>
|
||||||
virtual string key() = 0;
|
virtual string key() = 0;
|
||||||
|
|
||||||
void amDone(ExitCode result, std::optional<Error> ex = {});
|
void amDone(ExitCode result, std::optional<Error> ex = {});
|
||||||
|
|
||||||
|
virtual void cleanup() { }
|
||||||
};
|
};
|
||||||
|
|
||||||
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
|
void addToWeakGoals(WeakGoals & goals, GoalPtr p);
|
||||||
|
|
|
@ -153,6 +153,7 @@ void LocalDerivationGoal::killChild()
|
||||||
void LocalDerivationGoal::tryLocalBuild() {
|
void LocalDerivationGoal::tryLocalBuild() {
|
||||||
unsigned int curBuilds = worker.getNrLocalBuilds();
|
unsigned int curBuilds = worker.getNrLocalBuilds();
|
||||||
if (curBuilds >= settings.maxBuildJobs) {
|
if (curBuilds >= settings.maxBuildJobs) {
|
||||||
|
state = &DerivationGoal::tryToBuild;
|
||||||
worker.waitForBuildSlot(shared_from_this());
|
worker.waitForBuildSlot(shared_from_this());
|
||||||
outputLocks.unlock();
|
outputLocks.unlock();
|
||||||
return;
|
return;
|
||||||
|
@ -291,7 +292,7 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
|
||||||
auto & localStore = getLocalStore();
|
auto & localStore = getLocalStore();
|
||||||
uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable
|
uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable
|
||||||
struct statvfs st;
|
struct statvfs st;
|
||||||
if (statvfs(localStore.realStoreDir.c_str(), &st) == 0 &&
|
if (statvfs(localStore.realStoreDir.get().c_str(), &st) == 0 &&
|
||||||
(uint64_t) st.f_bavail * st.f_bsize < required)
|
(uint64_t) st.f_bavail * st.f_bsize < required)
|
||||||
diskFull = true;
|
diskFull = true;
|
||||||
if (statvfs(tmpDir.c_str(), &st) == 0 &&
|
if (statvfs(tmpDir.c_str(), &st) == 0 &&
|
||||||
|
@ -416,7 +417,7 @@ void LocalDerivationGoal::startBuilder()
|
||||||
}
|
}
|
||||||
|
|
||||||
auto & localStore = getLocalStore();
|
auto & localStore = getLocalStore();
|
||||||
if (localStore.storeDir != localStore.realStoreDir) {
|
if (localStore.storeDir != localStore.realStoreDir.get()) {
|
||||||
#if __linux__
|
#if __linux__
|
||||||
useChroot = true;
|
useChroot = true;
|
||||||
#else
|
#else
|
||||||
|
@ -581,7 +582,9 @@ void LocalDerivationGoal::startBuilder()
|
||||||
throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps",
|
throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps",
|
||||||
worker.store.printStorePath(drvPath), i);
|
worker.store.printStorePath(drvPath), i);
|
||||||
|
|
||||||
dirsInChroot[i] = i;
|
/* Allow files in __impureHostDeps to be missing; e.g.
|
||||||
|
macOS 11+ has no /usr/lib/libSystem*.dylib */
|
||||||
|
dirsInChroot[i] = {i, true};
|
||||||
}
|
}
|
||||||
|
|
||||||
#if __linux__
|
#if __linux__
|
||||||
|
@ -1190,6 +1193,26 @@ void LocalDerivationGoal::writeStructuredAttrs()
|
||||||
chownToBuilder(tmpDir + "/.attrs.sh");
|
chownToBuilder(tmpDir + "/.attrs.sh");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static StorePath pathPartOfReq(const DerivedPath & req)
|
||||||
|
{
|
||||||
|
return std::visit(overloaded {
|
||||||
|
[&](DerivedPath::Opaque bo) {
|
||||||
|
return bo.path;
|
||||||
|
},
|
||||||
|
[&](DerivedPath::Built bfd) {
|
||||||
|
return bfd.drvPath;
|
||||||
|
},
|
||||||
|
}, req.raw());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool LocalDerivationGoal::isAllowed(const DerivedPath & req)
|
||||||
|
{
|
||||||
|
return this->isAllowed(pathPartOfReq(req));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
struct RestrictedStoreConfig : virtual LocalFSStoreConfig
|
struct RestrictedStoreConfig : virtual LocalFSStoreConfig
|
||||||
{
|
{
|
||||||
using LocalFSStoreConfig::LocalFSStoreConfig;
|
using LocalFSStoreConfig::LocalFSStoreConfig;
|
||||||
|
@ -1310,33 +1333,52 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
||||||
std::optional<const Realisation> queryRealisation(const DrvOutput & id) override
|
std::optional<const Realisation> queryRealisation(const DrvOutput & id) override
|
||||||
// XXX: This should probably be allowed if the realisation corresponds to
|
// XXX: This should probably be allowed if the realisation corresponds to
|
||||||
// an allowed derivation
|
// an allowed derivation
|
||||||
{ throw Error("queryRealisation"); }
|
{
|
||||||
|
if (!goal.isAllowed(id))
|
||||||
|
throw InvalidPath("cannot query an unknown output id '%s' in recursive Nix", id.to_string());
|
||||||
|
return next->queryRealisation(id);
|
||||||
|
}
|
||||||
|
|
||||||
void buildPaths(const std::vector<StorePathWithOutputs> & paths, BuildMode buildMode) override
|
void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override
|
||||||
{
|
{
|
||||||
if (buildMode != bmNormal) throw Error("unsupported build mode");
|
if (buildMode != bmNormal) throw Error("unsupported build mode");
|
||||||
|
|
||||||
StorePathSet newPaths;
|
StorePathSet newPaths;
|
||||||
|
std::set<Realisation> newRealisations;
|
||||||
|
|
||||||
for (auto & path : paths) {
|
for (auto & req : paths) {
|
||||||
if (!goal.isAllowed(path.path))
|
if (!goal.isAllowed(req))
|
||||||
throw InvalidPath("cannot build unknown path '%s' in recursive Nix", printStorePath(path.path));
|
throw InvalidPath("cannot build '%s' in recursive Nix because path is unknown", req.to_string(*next));
|
||||||
}
|
}
|
||||||
|
|
||||||
next->buildPaths(paths, buildMode);
|
next->buildPaths(paths, buildMode);
|
||||||
|
|
||||||
for (auto & path : paths) {
|
for (auto & path : paths) {
|
||||||
if (!path.path.isDerivation()) continue;
|
auto p = std::get_if<DerivedPath::Built>(&path);
|
||||||
auto outputs = next->queryDerivationOutputMap(path.path);
|
if (!p) continue;
|
||||||
for (auto & output : outputs)
|
auto & bfd = *p;
|
||||||
if (wantOutput(output.first, path.outputs))
|
auto drv = readDerivation(bfd.drvPath);
|
||||||
newPaths.insert(output.second);
|
auto drvHashes = staticOutputHashes(*this, drv);
|
||||||
|
auto outputs = next->queryDerivationOutputMap(bfd.drvPath);
|
||||||
|
for (auto & [outputName, outputPath] : outputs)
|
||||||
|
if (wantOutput(outputName, bfd.outputs)) {
|
||||||
|
newPaths.insert(outputPath);
|
||||||
|
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||||
|
auto thisRealisation = next->queryRealisation(
|
||||||
|
DrvOutput{drvHashes.at(outputName), outputName}
|
||||||
|
);
|
||||||
|
assert(thisRealisation);
|
||||||
|
newRealisations.insert(*thisRealisation);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
StorePathSet closure;
|
StorePathSet closure;
|
||||||
next->computeFSClosure(newPaths, closure);
|
next->computeFSClosure(newPaths, closure);
|
||||||
for (auto & path : closure)
|
for (auto & path : closure)
|
||||||
goal.addDependency(path);
|
goal.addDependency(path);
|
||||||
|
for (auto & real : Realisation::closure(*next, newRealisations))
|
||||||
|
goal.addedDrvOutputs.insert(real.id);
|
||||||
}
|
}
|
||||||
|
|
||||||
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
|
||||||
|
@ -1358,7 +1400,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
||||||
void addSignatures(const StorePath & storePath, const StringSet & sigs) override
|
void addSignatures(const StorePath & storePath, const StringSet & sigs) override
|
||||||
{ unsupported("addSignatures"); }
|
{ unsupported("addSignatures"); }
|
||||||
|
|
||||||
void queryMissing(const std::vector<StorePathWithOutputs> & targets,
|
void queryMissing(const std::vector<DerivedPath> & targets,
|
||||||
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
|
StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
|
||||||
uint64_t & downloadSize, uint64_t & narSize) override
|
uint64_t & downloadSize, uint64_t & narSize) override
|
||||||
{
|
{
|
||||||
|
@ -1366,12 +1408,12 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
|
||||||
client about what paths will be built/substituted or are
|
client about what paths will be built/substituted or are
|
||||||
already present. Probably not a big deal. */
|
already present. Probably not a big deal. */
|
||||||
|
|
||||||
std::vector<StorePathWithOutputs> allowed;
|
std::vector<DerivedPath> allowed;
|
||||||
for (auto & path : targets) {
|
for (auto & req : targets) {
|
||||||
if (goal.isAllowed(path.path))
|
if (goal.isAllowed(req))
|
||||||
allowed.emplace_back(path);
|
allowed.emplace_back(req);
|
||||||
else
|
else
|
||||||
unknown.insert(path.path);
|
unknown.insert(pathPartOfReq(req));
|
||||||
}
|
}
|
||||||
|
|
||||||
next->queryMissing(allowed, willBuild, willSubstitute,
|
next->queryMissing(allowed, willBuild, willSubstitute,
|
||||||
|
@ -1703,18 +1745,18 @@ void LocalDerivationGoal::runChild()
|
||||||
network, so give them access to /etc/resolv.conf and so
|
network, so give them access to /etc/resolv.conf and so
|
||||||
on. */
|
on. */
|
||||||
if (derivationIsImpure(derivationType)) {
|
if (derivationIsImpure(derivationType)) {
|
||||||
ss.push_back("/etc/resolv.conf");
|
|
||||||
|
|
||||||
// Only use nss functions to resolve hosts and
|
// Only use nss functions to resolve hosts and
|
||||||
// services. Don’t use it for anything else that may
|
// services. Don’t use it for anything else that may
|
||||||
// be configured for this system. This limits the
|
// be configured for this system. This limits the
|
||||||
// potential impurities introduced in fixed-outputs.
|
// potential impurities introduced in fixed-outputs.
|
||||||
writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n");
|
writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n");
|
||||||
|
|
||||||
ss.push_back("/etc/services");
|
/* N.B. it is realistic that these paths might not exist. It
|
||||||
ss.push_back("/etc/hosts");
|
happens when testing Nix building fixed-output derivations
|
||||||
if (pathExists("/var/run/nscd/socket"))
|
within a pure derivation. */
|
||||||
ss.push_back("/var/run/nscd/socket");
|
for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts", "/var/run/nscd/socket" })
|
||||||
|
if (pathExists(path))
|
||||||
|
ss.push_back(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto & i : ss) dirsInChroot.emplace(i, i);
|
for (auto & i : ss) dirsInChroot.emplace(i, i);
|
||||||
|
@ -2276,10 +2318,6 @@ void LocalDerivationGoal::registerOutputs()
|
||||||
sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
|
sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
|
||||||
StringSource source(*sink.s);
|
StringSource source(*sink.s);
|
||||||
restorePath(actualPath, source);
|
restorePath(actualPath, source);
|
||||||
|
|
||||||
/* FIXME: set proper permissions in restorePath() so
|
|
||||||
we don't have to do another traversal. */
|
|
||||||
canonicalisePathMetaData(actualPath, -1, inodesSeen);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2333,32 +2371,19 @@ void LocalDerivationGoal::registerOutputs()
|
||||||
}
|
}
|
||||||
auto got = caSink.finish().first;
|
auto got = caSink.finish().first;
|
||||||
auto refs = rewriteRefs();
|
auto refs = rewriteRefs();
|
||||||
HashModuloSink narSink { htSHA256, oldHashPart };
|
|
||||||
dumpPath(actualPath, narSink);
|
auto finalPath = worker.store.makeFixedOutputPath(
|
||||||
auto narHashAndSize = narSink.finish();
|
|
||||||
ValidPathInfo newInfo0 {
|
|
||||||
worker.store.makeFixedOutputPath(
|
|
||||||
outputHash.method,
|
outputHash.method,
|
||||||
got,
|
got,
|
||||||
outputPathName(drv->name, outputName),
|
outputPathName(drv->name, outputName),
|
||||||
refs.second,
|
refs.second,
|
||||||
refs.first),
|
refs.first);
|
||||||
narHashAndSize.first,
|
if (scratchPath != finalPath) {
|
||||||
};
|
|
||||||
newInfo0.narSize = narHashAndSize.second;
|
|
||||||
newInfo0.ca = FixedOutputHash {
|
|
||||||
.method = outputHash.method,
|
|
||||||
.hash = got,
|
|
||||||
};
|
|
||||||
newInfo0.references = refs.second;
|
|
||||||
if (refs.first)
|
|
||||||
newInfo0.references.insert(newInfo0.path);
|
|
||||||
if (scratchPath != newInfo0.path) {
|
|
||||||
// Also rewrite the output path
|
// Also rewrite the output path
|
||||||
auto source = sinkToSource([&](Sink & nextSink) {
|
auto source = sinkToSource([&](Sink & nextSink) {
|
||||||
StringSink sink;
|
StringSink sink;
|
||||||
dumpPath(actualPath, sink);
|
dumpPath(actualPath, sink);
|
||||||
RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
|
RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
|
||||||
rsink2(*sink.s);
|
rsink2(*sink.s);
|
||||||
rsink2.flush();
|
rsink2.flush();
|
||||||
});
|
});
|
||||||
|
@ -2368,6 +2393,21 @@ void LocalDerivationGoal::registerOutputs()
|
||||||
movePath(tmpPath, actualPath);
|
movePath(tmpPath, actualPath);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HashResult narHashAndSize = hashPath(htSHA256, actualPath);
|
||||||
|
ValidPathInfo newInfo0 {
|
||||||
|
finalPath,
|
||||||
|
narHashAndSize.first,
|
||||||
|
};
|
||||||
|
|
||||||
|
newInfo0.narSize = narHashAndSize.second;
|
||||||
|
newInfo0.ca = FixedOutputHash {
|
||||||
|
.method = outputHash.method,
|
||||||
|
.hash = got,
|
||||||
|
};
|
||||||
|
newInfo0.references = refs.second;
|
||||||
|
if (refs.first)
|
||||||
|
newInfo0.references.insert(newInfo0.path);
|
||||||
|
|
||||||
assert(newInfo0.ca);
|
assert(newInfo0.ca);
|
||||||
return newInfo0;
|
return newInfo0;
|
||||||
};
|
};
|
||||||
|
@ -2428,6 +2468,10 @@ void LocalDerivationGoal::registerOutputs()
|
||||||
},
|
},
|
||||||
}, output.output);
|
}, output.output);
|
||||||
|
|
||||||
|
/* FIXME: set proper permissions in restorePath() so
|
||||||
|
we don't have to do another traversal. */
|
||||||
|
canonicalisePathMetaData(actualPath, -1, inodesSeen);
|
||||||
|
|
||||||
/* Calculate where we'll move the output files. In the checking case we
|
/* Calculate where we'll move the output files. In the checking case we
|
||||||
will leave leave them where they are, for now, rather than move to
|
will leave leave them where they are, for now, rather than move to
|
||||||
their usual "final destination" */
|
their usual "final destination" */
|
||||||
|
@ -2460,6 +2504,7 @@ void LocalDerivationGoal::registerOutputs()
|
||||||
assert(newInfo.ca);
|
assert(newInfo.ca);
|
||||||
} else {
|
} else {
|
||||||
auto destPath = worker.store.toRealPath(finalDestPath);
|
auto destPath = worker.store.toRealPath(finalDestPath);
|
||||||
|
deletePath(destPath);
|
||||||
movePath(actualPath, destPath);
|
movePath(actualPath, destPath);
|
||||||
actualPath = destPath;
|
actualPath = destPath;
|
||||||
}
|
}
|
||||||
|
@ -2615,11 +2660,20 @@ void LocalDerivationGoal::registerOutputs()
|
||||||
but it's fine to do in all cases. */
|
but it's fine to do in all cases. */
|
||||||
|
|
||||||
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||||
for (auto& [outputName, newInfo] : infos)
|
for (auto& [outputName, newInfo] : infos) {
|
||||||
worker.store.registerDrvOutput(Realisation{
|
auto thisRealisation = Realisation{
|
||||||
.id = DrvOutput{initialOutputs.at(outputName).outputHash, outputName},
|
.id = DrvOutput{initialOutputs.at(outputName).outputHash,
|
||||||
.outPath = newInfo.path});
|
outputName},
|
||||||
|
.outPath = newInfo.path};
|
||||||
|
signRealisation(thisRealisation);
|
||||||
|
worker.store.registerDrvOutput(thisRealisation);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LocalDerivationGoal::signRealisation(Realisation & realisation)
|
||||||
|
{
|
||||||
|
getLocalStore().signRealisation(realisation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -108,6 +108,9 @@ struct LocalDerivationGoal : public DerivationGoal
|
||||||
/* Paths that were added via recursive Nix calls. */
|
/* Paths that were added via recursive Nix calls. */
|
||||||
StorePathSet addedPaths;
|
StorePathSet addedPaths;
|
||||||
|
|
||||||
|
/* Realisations that were added via recursive Nix calls. */
|
||||||
|
std::set<DrvOutput> addedDrvOutputs;
|
||||||
|
|
||||||
/* Recursive Nix calls are only allowed to build or realize paths
|
/* Recursive Nix calls are only allowed to build or realize paths
|
||||||
in the original input closure or added via a recursive Nix call
|
in the original input closure or added via a recursive Nix call
|
||||||
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
(so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
|
||||||
|
@ -116,6 +119,12 @@ struct LocalDerivationGoal : public DerivationGoal
|
||||||
{
|
{
|
||||||
return inputPaths.count(path) || addedPaths.count(path);
|
return inputPaths.count(path) || addedPaths.count(path);
|
||||||
}
|
}
|
||||||
|
bool isAllowed(const DrvOutput & id)
|
||||||
|
{
|
||||||
|
return addedDrvOutputs.count(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isAllowed(const DerivedPath & req);
|
||||||
|
|
||||||
friend struct RestrictedStore;
|
friend struct RestrictedStore;
|
||||||
|
|
||||||
|
@ -161,6 +170,8 @@ struct LocalDerivationGoal : public DerivationGoal
|
||||||
as valid. */
|
as valid. */
|
||||||
void registerOutputs() override;
|
void registerOutputs() override;
|
||||||
|
|
||||||
|
void signRealisation(Realisation &) override;
|
||||||
|
|
||||||
/* Check that an output meets the requirements specified by the
|
/* Check that an output meets the requirements specified by the
|
||||||
'outputChecks' attribute (or the legacy
|
'outputChecks' attribute (or the legacy
|
||||||
'{allowed,disallowed}{References,Requisites}' attributes). */
|
'{allowed,disallowed}{References,Requisites}' attributes). */
|
||||||
|
|
|
@ -5,40 +5,32 @@
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
SubstitutionGoal::SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||||
: Goal(worker)
|
: Goal(worker)
|
||||||
, storePath(storePath)
|
, storePath(storePath)
|
||||||
, repair(repair)
|
, repair(repair)
|
||||||
, ca(ca)
|
, ca(ca)
|
||||||
{
|
{
|
||||||
state = &SubstitutionGoal::init;
|
state = &PathSubstitutionGoal::init;
|
||||||
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
|
name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
|
||||||
trace("created");
|
trace("created");
|
||||||
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
|
maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SubstitutionGoal::~SubstitutionGoal()
|
PathSubstitutionGoal::~PathSubstitutionGoal()
|
||||||
{
|
{
|
||||||
try {
|
cleanup();
|
||||||
if (thr.joinable()) {
|
|
||||||
// FIXME: signal worker thread to quit.
|
|
||||||
thr.join();
|
|
||||||
worker.childTerminated(this);
|
|
||||||
}
|
|
||||||
} catch (...) {
|
|
||||||
ignoreException();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::work()
|
void PathSubstitutionGoal::work()
|
||||||
{
|
{
|
||||||
(this->*state)();
|
(this->*state)();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::init()
|
void PathSubstitutionGoal::init()
|
||||||
{
|
{
|
||||||
trace("init");
|
trace("init");
|
||||||
|
|
||||||
|
@ -59,10 +51,12 @@ void SubstitutionGoal::init()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::tryNext()
|
void PathSubstitutionGoal::tryNext()
|
||||||
{
|
{
|
||||||
trace("trying next substituter");
|
trace("trying next substituter");
|
||||||
|
|
||||||
|
cleanup();
|
||||||
|
|
||||||
if (subs.size() == 0) {
|
if (subs.size() == 0) {
|
||||||
/* None left. Terminate this goal and let someone else deal
|
/* None left. Terminate this goal and let someone else deal
|
||||||
with it. */
|
with it. */
|
||||||
|
@ -142,7 +136,7 @@ void SubstitutionGoal::tryNext()
|
||||||
/* Bail out early if this substituter lacks a valid
|
/* Bail out early if this substituter lacks a valid
|
||||||
signature. LocalStore::addToStore() also checks for this, but
|
signature. LocalStore::addToStore() also checks for this, but
|
||||||
only after we've downloaded the path. */
|
only after we've downloaded the path. */
|
||||||
if (!sub->isTrusted && worker.store.pathInfoIsTrusted(*info))
|
if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
|
||||||
{
|
{
|
||||||
warn("substituter '%s' does not have a valid signature for path '%s'",
|
warn("substituter '%s' does not have a valid signature for path '%s'",
|
||||||
sub->getUri(), worker.store.printStorePath(storePath));
|
sub->getUri(), worker.store.printStorePath(storePath));
|
||||||
|
@ -154,16 +148,16 @@ void SubstitutionGoal::tryNext()
|
||||||
paths referenced by this one. */
|
paths referenced by this one. */
|
||||||
for (auto & i : info->references)
|
for (auto & i : info->references)
|
||||||
if (i != storePath) /* ignore self-references */
|
if (i != storePath) /* ignore self-references */
|
||||||
addWaitee(worker.makeSubstitutionGoal(i));
|
addWaitee(worker.makePathSubstitutionGoal(i));
|
||||||
|
|
||||||
if (waitees.empty()) /* to prevent hang (no wake-up event) */
|
if (waitees.empty()) /* to prevent hang (no wake-up event) */
|
||||||
referencesValid();
|
referencesValid();
|
||||||
else
|
else
|
||||||
state = &SubstitutionGoal::referencesValid;
|
state = &PathSubstitutionGoal::referencesValid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::referencesValid()
|
void PathSubstitutionGoal::referencesValid()
|
||||||
{
|
{
|
||||||
trace("all references realised");
|
trace("all references realised");
|
||||||
|
|
||||||
|
@ -177,12 +171,12 @@ void SubstitutionGoal::referencesValid()
|
||||||
if (i != storePath) /* ignore self-references */
|
if (i != storePath) /* ignore self-references */
|
||||||
assert(worker.store.isValidPath(i));
|
assert(worker.store.isValidPath(i));
|
||||||
|
|
||||||
state = &SubstitutionGoal::tryToRun;
|
state = &PathSubstitutionGoal::tryToRun;
|
||||||
worker.wakeUp(shared_from_this());
|
worker.wakeUp(shared_from_this());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::tryToRun()
|
void PathSubstitutionGoal::tryToRun()
|
||||||
{
|
{
|
||||||
trace("trying to run");
|
trace("trying to run");
|
||||||
|
|
||||||
|
@ -205,7 +199,7 @@ void SubstitutionGoal::tryToRun()
|
||||||
thr = std::thread([this]() {
|
thr = std::thread([this]() {
|
||||||
try {
|
try {
|
||||||
/* Wake up the worker loop when we're done. */
|
/* Wake up the worker loop when we're done. */
|
||||||
Finally updateStats([this]() { outPipe.writeSide = -1; });
|
Finally updateStats([this]() { outPipe.writeSide.close(); });
|
||||||
|
|
||||||
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
|
Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
|
||||||
PushActivity pact(act.id);
|
PushActivity pact(act.id);
|
||||||
|
@ -221,11 +215,11 @@ void SubstitutionGoal::tryToRun()
|
||||||
|
|
||||||
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
|
worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
|
||||||
|
|
||||||
state = &SubstitutionGoal::finished;
|
state = &PathSubstitutionGoal::finished;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::finished()
|
void PathSubstitutionGoal::finished()
|
||||||
{
|
{
|
||||||
trace("substitute finished");
|
trace("substitute finished");
|
||||||
|
|
||||||
|
@ -249,7 +243,7 @@ void SubstitutionGoal::finished()
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try the next substitute. */
|
/* Try the next substitute. */
|
||||||
state = &SubstitutionGoal::tryNext;
|
state = &PathSubstitutionGoal::tryNext;
|
||||||
worker.wakeUp(shared_from_this());
|
worker.wakeUp(shared_from_this());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -278,14 +272,31 @@ void SubstitutionGoal::finished()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::handleChildOutput(int fd, const string & data)
|
void PathSubstitutionGoal::handleChildOutput(int fd, const string & data)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void SubstitutionGoal::handleEOF(int fd)
|
void PathSubstitutionGoal::handleEOF(int fd)
|
||||||
{
|
{
|
||||||
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
|
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void PathSubstitutionGoal::cleanup()
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
if (thr.joinable()) {
|
||||||
|
// FIXME: signal worker thread to quit.
|
||||||
|
thr.join();
|
||||||
|
worker.childTerminated(this);
|
||||||
|
}
|
||||||
|
|
||||||
|
outPipe.close();
|
||||||
|
} catch (...) {
|
||||||
|
ignoreException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,13 +8,13 @@ namespace nix {
|
||||||
|
|
||||||
class Worker;
|
class Worker;
|
||||||
|
|
||||||
struct SubstitutionGoal : public Goal
|
struct PathSubstitutionGoal : public Goal
|
||||||
{
|
{
|
||||||
/* The store path that should be realised through a substitute. */
|
/* The store path that should be realised through a substitute. */
|
||||||
StorePath storePath;
|
StorePath storePath;
|
||||||
|
|
||||||
/* The path the substituter refers to the path as. This will be
|
/* The path the substituter refers to the path as. This will be
|
||||||
* different when the stores have different names. */
|
different when the stores have different names. */
|
||||||
std::optional<StorePath> subPath;
|
std::optional<StorePath> subPath;
|
||||||
|
|
||||||
/* The remaining substituters. */
|
/* The remaining substituters. */
|
||||||
|
@ -47,14 +47,15 @@ struct SubstitutionGoal : public Goal
|
||||||
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
|
std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
|
||||||
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
|
maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
|
||||||
|
|
||||||
typedef void (SubstitutionGoal::*GoalState)();
|
typedef void (PathSubstitutionGoal::*GoalState)();
|
||||||
GoalState state;
|
GoalState state;
|
||||||
|
|
||||||
/* Content address for recomputing store path */
|
/* Content address for recomputing store path */
|
||||||
std::optional<ContentAddress> ca;
|
std::optional<ContentAddress> ca;
|
||||||
|
|
||||||
SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
public:
|
||||||
~SubstitutionGoal();
|
PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||||
|
~PathSubstitutionGoal();
|
||||||
|
|
||||||
void timedOut(Error && ex) override { abort(); };
|
void timedOut(Error && ex) override { abort(); };
|
||||||
|
|
||||||
|
@ -78,6 +79,8 @@ struct SubstitutionGoal : public Goal
|
||||||
/* Callback used by the worker to write to the log. */
|
/* Callback used by the worker to write to the log. */
|
||||||
void handleChildOutput(int fd, const string & data) override;
|
void handleChildOutput(int fd, const string & data) override;
|
||||||
void handleEOF(int fd) override;
|
void handleEOF(int fd) override;
|
||||||
|
|
||||||
|
void cleanup() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#include "machines.hh"
|
#include "machines.hh"
|
||||||
#include "worker.hh"
|
#include "worker.hh"
|
||||||
#include "substitution-goal.hh"
|
#include "substitution-goal.hh"
|
||||||
|
#include "drv-output-substitution-goal.hh"
|
||||||
#include "local-derivation-goal.hh"
|
#include "local-derivation-goal.hh"
|
||||||
#include "hook-instance.hh"
|
#include "hook-instance.hh"
|
||||||
|
|
||||||
|
@ -78,20 +79,32 @@ std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::shared_ptr<SubstitutionGoal> Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||||
{
|
{
|
||||||
std::weak_ptr<SubstitutionGoal> & goal_weak = substitutionGoals[path];
|
std::weak_ptr<PathSubstitutionGoal> & goal_weak = substitutionGoals[path];
|
||||||
auto goal = goal_weak.lock(); // FIXME
|
auto goal = goal_weak.lock(); // FIXME
|
||||||
if (!goal) {
|
if (!goal) {
|
||||||
goal = std::make_shared<SubstitutionGoal>(path, *this, repair, ca);
|
goal = std::make_shared<PathSubstitutionGoal>(path, *this, repair, ca);
|
||||||
goal_weak = goal;
|
goal_weak = goal;
|
||||||
wakeUp(goal);
|
wakeUp(goal);
|
||||||
}
|
}
|
||||||
return goal;
|
return goal;
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename G>
|
std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
|
||||||
static void removeGoal(std::shared_ptr<G> goal, std::map<StorePath, std::weak_ptr<G>> & goalMap)
|
{
|
||||||
|
std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
|
||||||
|
auto goal = goal_weak.lock(); // FIXME
|
||||||
|
if (!goal) {
|
||||||
|
goal = std::make_shared<DrvOutputSubstitutionGoal>(id, *this, repair, ca);
|
||||||
|
goal_weak = goal;
|
||||||
|
wakeUp(goal);
|
||||||
|
}
|
||||||
|
return goal;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename K, typename G>
|
||||||
|
static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
|
||||||
{
|
{
|
||||||
/* !!! inefficient */
|
/* !!! inefficient */
|
||||||
for (auto i = goalMap.begin();
|
for (auto i = goalMap.begin();
|
||||||
|
@ -109,10 +122,13 @@ void Worker::removeGoal(GoalPtr goal)
|
||||||
{
|
{
|
||||||
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
|
if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
|
||||||
nix::removeGoal(drvGoal, derivationGoals);
|
nix::removeGoal(drvGoal, derivationGoals);
|
||||||
else if (auto subGoal = std::dynamic_pointer_cast<SubstitutionGoal>(goal))
|
else if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
|
||||||
nix::removeGoal(subGoal, substitutionGoals);
|
nix::removeGoal(subGoal, substitutionGoals);
|
||||||
|
else if (auto subGoal = std::dynamic_pointer_cast<DrvOutputSubstitutionGoal>(goal))
|
||||||
|
nix::removeGoal(subGoal, drvOutputSubstitutionGoals);
|
||||||
else
|
else
|
||||||
assert(false);
|
assert(false);
|
||||||
|
|
||||||
if (topGoals.find(goal) != topGoals.end()) {
|
if (topGoals.find(goal) != topGoals.end()) {
|
||||||
topGoals.erase(goal);
|
topGoals.erase(goal);
|
||||||
/* If a top-level goal failed, then kill all other goals
|
/* If a top-level goal failed, then kill all other goals
|
||||||
|
@ -211,14 +227,14 @@ void Worker::waitForAWhile(GoalPtr goal)
|
||||||
|
|
||||||
void Worker::run(const Goals & _topGoals)
|
void Worker::run(const Goals & _topGoals)
|
||||||
{
|
{
|
||||||
std::vector<nix::StorePathWithOutputs> topPaths;
|
std::vector<nix::DerivedPath> topPaths;
|
||||||
|
|
||||||
for (auto & i : _topGoals) {
|
for (auto & i : _topGoals) {
|
||||||
topGoals.insert(i);
|
topGoals.insert(i);
|
||||||
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
|
if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
|
||||||
topPaths.push_back({goal->drvPath, goal->wantedOutputs});
|
topPaths.push_back(DerivedPath::Built{goal->drvPath, goal->wantedOutputs});
|
||||||
} else if (auto goal = dynamic_cast<SubstitutionGoal *>(i.get())) {
|
} else if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
|
||||||
topPaths.push_back({goal->storePath});
|
topPaths.push_back(DerivedPath::Opaque{goal->storePath});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -471,7 +487,10 @@ void Worker::markContentsGood(const StorePath & path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal) {
|
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal) {
|
||||||
|
return subGoal;
|
||||||
|
}
|
||||||
|
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal) {
|
||||||
return subGoal;
|
return subGoal;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#include "lock.hh"
|
#include "lock.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
#include "goal.hh"
|
#include "goal.hh"
|
||||||
|
#include "realisation.hh"
|
||||||
|
|
||||||
#include <future>
|
#include <future>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
@ -12,18 +13,20 @@ namespace nix {
|
||||||
|
|
||||||
/* Forward definition. */
|
/* Forward definition. */
|
||||||
struct DerivationGoal;
|
struct DerivationGoal;
|
||||||
struct SubstitutionGoal;
|
struct PathSubstitutionGoal;
|
||||||
|
class DrvOutputSubstitutionGoal;
|
||||||
|
|
||||||
/* Workaround for not being able to declare a something like
|
/* Workaround for not being able to declare a something like
|
||||||
|
|
||||||
class SubstitutionGoal : public Goal;
|
class PathSubstitutionGoal : public Goal;
|
||||||
|
|
||||||
even when Goal is a complete type.
|
even when Goal is a complete type.
|
||||||
|
|
||||||
This is still a static cast. The purpose of exporting it is to define it in
|
This is still a static cast. The purpose of exporting it is to define it in
|
||||||
a place where `SubstitutionGoal` is concrete, and use it in a place where it
|
a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
|
||||||
is opaque. */
|
is opaque. */
|
||||||
GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal);
|
GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
|
||||||
|
GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
|
||||||
|
|
||||||
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
|
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
|
||||||
|
|
||||||
|
@ -72,7 +75,8 @@ private:
|
||||||
/* Maps used to prevent multiple instantiations of a goal for the
|
/* Maps used to prevent multiple instantiations of a goal for the
|
||||||
same derivation / path. */
|
same derivation / path. */
|
||||||
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
|
std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
|
||||||
std::map<StorePath, std::weak_ptr<SubstitutionGoal>> substitutionGoals;
|
std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
|
||||||
|
std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
|
||||||
|
|
||||||
/* Goals waiting for busy paths to be unlocked. */
|
/* Goals waiting for busy paths to be unlocked. */
|
||||||
WeakGoals waitingForAnyGoal;
|
WeakGoals waitingForAnyGoal;
|
||||||
|
@ -146,7 +150,8 @@ public:
|
||||||
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
|
const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
|
||||||
|
|
||||||
/* substitution goal */
|
/* substitution goal */
|
||||||
std::shared_ptr<SubstitutionGoal> makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||||
|
std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
|
||||||
|
|
||||||
/* Remove a dead goal. */
|
/* Remove a dead goal. */
|
||||||
void removeGoal(GoalPtr goal);
|
void removeGoal(GoalPtr goal);
|
||||||
|
|
|
@ -3,9 +3,19 @@
|
||||||
-- is enabled
|
-- is enabled
|
||||||
|
|
||||||
create table if not exists Realisations (
|
create table if not exists Realisations (
|
||||||
|
id integer primary key autoincrement not null,
|
||||||
drvPath text not null,
|
drvPath text not null,
|
||||||
outputName text not null, -- symbolic output id, usually "out"
|
outputName text not null, -- symbolic output id, usually "out"
|
||||||
outputPath integer not null,
|
outputPath integer not null,
|
||||||
primary key (drvPath, outputName),
|
signatures text, -- space-separated list
|
||||||
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||||
);
|
);
|
||||||
|
|
||||||
|
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||||
|
|
||||||
|
create table if not exists RealisationsRefs (
|
||||||
|
referrer integer not null,
|
||||||
|
realisationReference integer,
|
||||||
|
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||||
|
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||||
|
);
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#include "monitor-fd.hh"
|
#include "monitor-fd.hh"
|
||||||
#include "worker-protocol.hh"
|
#include "worker-protocol.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
#include "path-with-outputs.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "affinity.hh"
|
#include "affinity.hh"
|
||||||
#include "archive.hh"
|
#include "archive.hh"
|
||||||
|
@ -259,6 +260,18 @@ static void writeValidPathInfo(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, Source & from)
|
||||||
|
{
|
||||||
|
std::vector<DerivedPath> reqs;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 30) {
|
||||||
|
reqs = worker_proto::read(store, from, Phantom<std::vector<DerivedPath>> {});
|
||||||
|
} else {
|
||||||
|
for (auto & s : readStrings<Strings>(from))
|
||||||
|
reqs.push_back(parsePathWithOutputs(store, s).toDerivedPath());
|
||||||
|
}
|
||||||
|
return reqs;
|
||||||
|
}
|
||||||
|
|
||||||
static void performOp(TunnelLogger * logger, ref<Store> store,
|
static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
|
TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
|
||||||
Source & from, BufferedSink & to, unsigned int op)
|
Source & from, BufferedSink & to, unsigned int op)
|
||||||
|
@ -493,9 +506,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
}
|
}
|
||||||
|
|
||||||
case wopBuildPaths: {
|
case wopBuildPaths: {
|
||||||
std::vector<StorePathWithOutputs> drvs;
|
auto drvs = readDerivedPaths(*store, clientVersion, from);
|
||||||
for (auto & s : readStrings<Strings>(from))
|
|
||||||
drvs.push_back(store->parsePathWithOutputs(s));
|
|
||||||
BuildMode mode = bmNormal;
|
BuildMode mode = bmNormal;
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
|
||||||
mode = (BuildMode) readInt(from);
|
mode = (BuildMode) readInt(from);
|
||||||
|
@ -575,7 +586,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
auto res = store->buildDerivation(drvPath, drv, buildMode);
|
||||||
logger->stopWork();
|
logger->stopWork();
|
||||||
to << res.status << res.errorMsg;
|
to << res.status << res.errorMsg;
|
||||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 0xc) {
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 29) {
|
||||||
|
to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime;
|
||||||
|
}
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 28) {
|
||||||
worker_proto::write(*store, to, res.builtOutputs);
|
worker_proto::write(*store, to, res.builtOutputs);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -856,9 +870,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
}
|
}
|
||||||
|
|
||||||
case wopQueryMissing: {
|
case wopQueryMissing: {
|
||||||
std::vector<StorePathWithOutputs> targets;
|
auto targets = readDerivedPaths(*store, clientVersion, from);
|
||||||
for (auto & s : readStrings<Strings>(from))
|
|
||||||
targets.push_back(store->parsePathWithOutputs(s));
|
|
||||||
logger->startWork();
|
logger->startWork();
|
||||||
StorePathSet willBuild, willSubstitute, unknown;
|
StorePathSet willBuild, willSubstitute, unknown;
|
||||||
uint64_t downloadSize, narSize;
|
uint64_t downloadSize, narSize;
|
||||||
|
@ -873,11 +885,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
|
|
||||||
case wopRegisterDrvOutput: {
|
case wopRegisterDrvOutput: {
|
||||||
logger->startWork();
|
logger->startWork();
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
|
||||||
auto outputId = DrvOutput::parse(readString(from));
|
auto outputId = DrvOutput::parse(readString(from));
|
||||||
auto outputPath = StorePath(readString(from));
|
auto outputPath = StorePath(readString(from));
|
||||||
auto resolvedDrv = StorePath(readString(from));
|
|
||||||
store->registerDrvOutput(Realisation{
|
store->registerDrvOutput(Realisation{
|
||||||
.id = outputId, .outPath = outputPath});
|
.id = outputId, .outPath = outputPath});
|
||||||
|
} else {
|
||||||
|
auto realisation = worker_proto::read(*store, from, Phantom<Realisation>());
|
||||||
|
store->registerDrvOutput(realisation);
|
||||||
|
}
|
||||||
logger->stopWork();
|
logger->stopWork();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -887,9 +903,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
||||||
auto outputId = DrvOutput::parse(readString(from));
|
auto outputId = DrvOutput::parse(readString(from));
|
||||||
auto info = store->queryRealisation(outputId);
|
auto info = store->queryRealisation(outputId);
|
||||||
logger->stopWork();
|
logger->stopWork();
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
|
||||||
std::set<StorePath> outPaths;
|
std::set<StorePath> outPaths;
|
||||||
if (info) outPaths.insert(info->outPath);
|
if (info) outPaths.insert(info->outPath);
|
||||||
worker_proto::write(*store, to, outPaths);
|
worker_proto::write(*store, to, outPaths);
|
||||||
|
} else {
|
||||||
|
std::set<Realisation> realisations;
|
||||||
|
if (info) realisations.insert(*info);
|
||||||
|
worker_proto::write(*store, to, realisations);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -590,14 +590,6 @@ std::map<std::string, Hash> staticOutputHashes(Store& store, const Derivation& d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::string StorePathWithOutputs::to_string(const Store & store) const
|
|
||||||
{
|
|
||||||
return outputs.empty()
|
|
||||||
? store.printStorePath(path)
|
|
||||||
: store.printStorePath(path) + "!" + concatStringsSep(",", outputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool wantOutput(const string & output, const std::set<string> & wanted)
|
bool wantOutput(const string & output, const std::set<string> & wanted)
|
||||||
{
|
{
|
||||||
return wanted.empty() || wanted.find(output) != wanted.end();
|
return wanted.empty() || wanted.find(output) != wanted.end();
|
||||||
|
|
|
@ -52,7 +52,7 @@ struct DerivationOutput
|
||||||
DerivationOutputCAFloating,
|
DerivationOutputCAFloating,
|
||||||
DerivationOutputDeferred
|
DerivationOutputDeferred
|
||||||
> output;
|
> output;
|
||||||
std::optional<HashType> hashAlgoOpt(const Store & store) const;
|
|
||||||
/* Note, when you use this function you should make sure that you're passing
|
/* Note, when you use this function you should make sure that you're passing
|
||||||
the right derivation name. When in doubt, you should use the safer
|
the right derivation name. When in doubt, you should use the safer
|
||||||
interface provided by BasicDerivation::outputsAndOptPaths */
|
interface provided by BasicDerivation::outputsAndOptPaths */
|
||||||
|
|
118
src/libstore/derived-path.cc
Normal file
118
src/libstore/derived-path.cc
Normal file
|
@ -0,0 +1,118 @@
|
||||||
|
#include "derived-path.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
|
||||||
|
nlohmann::json res;
|
||||||
|
res["path"] = store->printStorePath(path);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
nlohmann::json BuiltPath::Built::toJSON(ref<Store> store) const {
|
||||||
|
nlohmann::json res;
|
||||||
|
res["drvPath"] = store->printStorePath(drvPath);
|
||||||
|
for (const auto& [output, path] : outputs) {
|
||||||
|
res["outputs"][output] = store->printStorePath(path);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
StorePathSet BuiltPath::outPaths() const
|
||||||
|
{
|
||||||
|
return std::visit(
|
||||||
|
overloaded{
|
||||||
|
[](BuiltPath::Opaque p) { return StorePathSet{p.path}; },
|
||||||
|
[](BuiltPath::Built b) {
|
||||||
|
StorePathSet res;
|
||||||
|
for (auto & [_, path] : b.outputs)
|
||||||
|
res.insert(path);
|
||||||
|
return res;
|
||||||
|
},
|
||||||
|
}, raw()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store) {
|
||||||
|
auto res = nlohmann::json::array();
|
||||||
|
for (const BuiltPath & buildable : buildables) {
|
||||||
|
std::visit([&res, store](const auto & buildable) {
|
||||||
|
res.push_back(buildable.toJSON(store));
|
||||||
|
}, buildable.raw());
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::string DerivedPath::Opaque::to_string(const Store & store) const {
|
||||||
|
return store.printStorePath(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string DerivedPath::Built::to_string(const Store & store) const {
|
||||||
|
return store.printStorePath(drvPath)
|
||||||
|
+ "!"
|
||||||
|
+ (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string DerivedPath::to_string(const Store & store) const
|
||||||
|
{
|
||||||
|
return std::visit(
|
||||||
|
[&](const auto & req) { return req.to_string(store); },
|
||||||
|
this->raw());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_view s)
|
||||||
|
{
|
||||||
|
return {store.parseStorePath(s)};
|
||||||
|
}
|
||||||
|
|
||||||
|
DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s)
|
||||||
|
{
|
||||||
|
size_t n = s.find("!");
|
||||||
|
assert(n != s.npos);
|
||||||
|
auto drvPath = store.parseStorePath(s.substr(0, n));
|
||||||
|
auto outputsS = s.substr(n + 1);
|
||||||
|
std::set<string> outputs;
|
||||||
|
if (outputsS != "*")
|
||||||
|
outputs = tokenizeString<std::set<string>>(outputsS, ",");
|
||||||
|
return {drvPath, outputs};
|
||||||
|
}
|
||||||
|
|
||||||
|
DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
|
||||||
|
{
|
||||||
|
size_t n = s.find("!");
|
||||||
|
return n == s.npos
|
||||||
|
? (DerivedPath) DerivedPath::Opaque::parse(store, s)
|
||||||
|
: (DerivedPath) DerivedPath::Built::parse(store, s);
|
||||||
|
}
|
||||||
|
|
||||||
|
RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
|
||||||
|
{
|
||||||
|
RealisedPath::Set res;
|
||||||
|
std::visit(
|
||||||
|
overloaded{
|
||||||
|
[&](BuiltPath::Opaque p) { res.insert(p.path); },
|
||||||
|
[&](BuiltPath::Built p) {
|
||||||
|
auto drvHashes =
|
||||||
|
staticOutputHashes(store, store.readDerivation(p.drvPath));
|
||||||
|
for (auto& [outputName, outputPath] : p.outputs) {
|
||||||
|
if (settings.isExperimentalFeatureEnabled(
|
||||||
|
"ca-derivations")) {
|
||||||
|
auto thisRealisation = store.queryRealisation(
|
||||||
|
DrvOutput{drvHashes.at(outputName), outputName});
|
||||||
|
assert(thisRealisation); // We’ve built it, so we must h
|
||||||
|
// ve the realisation
|
||||||
|
res.insert(*thisRealisation);
|
||||||
|
} else {
|
||||||
|
res.insert(outputPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
raw());
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
123
src/libstore/derived-path.hh
Normal file
123
src/libstore/derived-path.hh
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "util.hh"
|
||||||
|
#include "path.hh"
|
||||||
|
#include "realisation.hh"
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
|
|
||||||
|
#include <nlohmann/json_fwd.hpp>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
class Store;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An opaque derived path.
|
||||||
|
*
|
||||||
|
* Opaque derived paths are just store paths, and fully evaluated. They
|
||||||
|
* cannot be simplified further. Since they are opaque, they cannot be
|
||||||
|
* built, but they can fetched.
|
||||||
|
*/
|
||||||
|
struct DerivedPathOpaque {
|
||||||
|
StorePath path;
|
||||||
|
|
||||||
|
nlohmann::json toJSON(ref<Store> store) const;
|
||||||
|
std::string to_string(const Store & store) const;
|
||||||
|
static DerivedPathOpaque parse(const Store & store, std::string_view);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A derived path that is built from a derivation
|
||||||
|
*
|
||||||
|
* Built derived paths are pair of a derivation and some output names.
|
||||||
|
* They are evaluated by building the derivation, and then replacing the
|
||||||
|
* output names with the resulting outputs.
|
||||||
|
*
|
||||||
|
* Note that does mean a derived store paths evaluates to multiple
|
||||||
|
* opaque paths, which is sort of icky as expressions are supposed to
|
||||||
|
* evaluate to single values. Perhaps this should have just a single
|
||||||
|
* output name.
|
||||||
|
*/
|
||||||
|
struct DerivedPathBuilt {
|
||||||
|
StorePath drvPath;
|
||||||
|
std::set<std::string> outputs;
|
||||||
|
|
||||||
|
std::string to_string(const Store & store) const;
|
||||||
|
static DerivedPathBuilt parse(const Store & store, std::string_view);
|
||||||
|
};
|
||||||
|
|
||||||
|
using _DerivedPathRaw = std::variant<
|
||||||
|
DerivedPathOpaque,
|
||||||
|
DerivedPathBuilt
|
||||||
|
>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A "derived path" is a very simple sort of expression that evaluates
|
||||||
|
* to (concrete) store path. It is either:
|
||||||
|
*
|
||||||
|
* - opaque, in which case it is just a concrete store path with
|
||||||
|
* possibly no known derivation
|
||||||
|
*
|
||||||
|
* - built, in which case it is a pair of a derivation path and an
|
||||||
|
* output name.
|
||||||
|
*/
|
||||||
|
struct DerivedPath : _DerivedPathRaw {
|
||||||
|
using Raw = _DerivedPathRaw;
|
||||||
|
using Raw::Raw;
|
||||||
|
|
||||||
|
using Opaque = DerivedPathOpaque;
|
||||||
|
using Built = DerivedPathBuilt;
|
||||||
|
|
||||||
|
inline const Raw & raw() const {
|
||||||
|
return static_cast<const Raw &>(*this);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string to_string(const Store & store) const;
|
||||||
|
static DerivedPath parse(const Store & store, std::string_view);
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A built derived path with hints in the form of optional concrete output paths.
|
||||||
|
*
|
||||||
|
* See 'BuiltPath' for more an explanation.
|
||||||
|
*/
|
||||||
|
struct BuiltPathBuilt {
|
||||||
|
StorePath drvPath;
|
||||||
|
std::map<std::string, StorePath> outputs;
|
||||||
|
|
||||||
|
nlohmann::json toJSON(ref<Store> store) const;
|
||||||
|
static BuiltPathBuilt parse(const Store & store, std::string_view);
|
||||||
|
};
|
||||||
|
|
||||||
|
using _BuiltPathRaw = std::variant<
|
||||||
|
DerivedPath::Opaque,
|
||||||
|
BuiltPathBuilt
|
||||||
|
>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A built path. Similar to a `DerivedPath`, but enriched with the corresponding
|
||||||
|
* output path(s).
|
||||||
|
*/
|
||||||
|
struct BuiltPath : _BuiltPathRaw {
|
||||||
|
using Raw = _BuiltPathRaw;
|
||||||
|
using Raw::Raw;
|
||||||
|
|
||||||
|
using Opaque = DerivedPathOpaque;
|
||||||
|
using Built = BuiltPathBuilt;
|
||||||
|
|
||||||
|
inline const Raw & raw() const {
|
||||||
|
return static_cast<const Raw &>(*this);
|
||||||
|
}
|
||||||
|
|
||||||
|
StorePathSet outPaths() const;
|
||||||
|
RealisedPath::Set toRealisedPaths(Store & store) const;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::vector<DerivedPath> DerivedPaths;
|
||||||
|
typedef std::vector<BuiltPath> BuiltPaths;
|
||||||
|
|
||||||
|
nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
|
||||||
|
|
||||||
|
}
|
|
@ -7,7 +7,7 @@
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "callback.hh"
|
#include "callback.hh"
|
||||||
|
|
||||||
#ifdef ENABLE_S3
|
#if ENABLE_S3
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
#include <aws/core/client/ClientConfiguration.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ struct curlFileTransfer : public FileTransfer
|
||||||
}
|
}
|
||||||
|
|
||||||
LambdaSink finalSink;
|
LambdaSink finalSink;
|
||||||
std::shared_ptr<CompressionSink> decompressionSink;
|
std::shared_ptr<FinishSink> decompressionSink;
|
||||||
std::optional<StringSink> errorSink;
|
std::optional<StringSink> errorSink;
|
||||||
|
|
||||||
std::exception_ptr writeException;
|
std::exception_ptr writeException;
|
||||||
|
@ -665,7 +665,7 @@ struct curlFileTransfer : public FileTransfer
|
||||||
writeFull(wakeupPipe.writeSide.get(), " ");
|
writeFull(wakeupPipe.writeSide.get(), " ");
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ENABLE_S3
|
#if ENABLE_S3
|
||||||
std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri)
|
std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri)
|
||||||
{
|
{
|
||||||
auto [path, params] = splitUriAndParams(uri);
|
auto [path, params] = splitUriAndParams(uri);
|
||||||
|
@ -688,7 +688,7 @@ struct curlFileTransfer : public FileTransfer
|
||||||
if (hasPrefix(request.uri, "s3://")) {
|
if (hasPrefix(request.uri, "s3://")) {
|
||||||
// FIXME: do this on a worker thread
|
// FIXME: do this on a worker thread
|
||||||
try {
|
try {
|
||||||
#ifdef ENABLE_S3
|
#if ENABLE_S3
|
||||||
auto [bucketName, key, params] = parseS3Uri(request.uri);
|
auto [bucketName, key, params] = parseS3Uri(request.uri);
|
||||||
|
|
||||||
std::string profile = get(params, "profile").value_or("");
|
std::string profile = get(params, "profile").value_or("");
|
||||||
|
|
|
@ -775,7 +775,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
AutoCloseDir dir(opendir(realStoreDir.c_str()));
|
AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
|
||||||
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
|
if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
|
||||||
|
|
||||||
/* Read the store and immediately delete all paths that
|
/* Read the store and immediately delete all paths that
|
||||||
|
@ -856,7 +856,7 @@ void LocalStore::autoGC(bool sync)
|
||||||
return std::stoll(readFile(*fakeFreeSpaceFile));
|
return std::stoll(readFile(*fakeFreeSpaceFile));
|
||||||
|
|
||||||
struct statvfs st;
|
struct statvfs st;
|
||||||
if (statvfs(realStoreDir.c_str(), &st))
|
if (statvfs(realStoreDir.get().c_str(), &st))
|
||||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||||
|
|
||||||
return (uint64_t) st.f_bavail * st.f_frsize;
|
return (uint64_t) st.f_bavail * st.f_frsize;
|
||||||
|
|
|
@ -81,7 +81,7 @@ void loadConfFile()
|
||||||
|
|
||||||
/* We only want to send overrides to the daemon, i.e. stuff from
|
/* We only want to send overrides to the daemon, i.e. stuff from
|
||||||
~/.nix/nix.conf or the command line. */
|
~/.nix/nix.conf or the command line. */
|
||||||
globalConfig.resetOverriden();
|
globalConfig.resetOverridden();
|
||||||
|
|
||||||
auto files = settings.nixUserConfFiles;
|
auto files = settings.nixUserConfFiles;
|
||||||
for (auto file = files.rbegin(); file != files.rend(); file++) {
|
for (auto file = files.rbegin(); file != files.rend(); file++) {
|
||||||
|
|
|
@ -206,7 +206,10 @@ public:
|
||||||
|
|
||||||
Setting<std::string> builders{
|
Setting<std::string> builders{
|
||||||
this, "@" + nixConfDir + "/machines", "builders",
|
this, "@" + nixConfDir + "/machines", "builders",
|
||||||
"A semicolon-separated list of build machines, in the format of `nix.machines`."};
|
R"(
|
||||||
|
A semicolon-separated list of build machines.
|
||||||
|
For the exact format and examples, see [the manual chapter on remote builds](../advanced-topics/distributed-builds.md)
|
||||||
|
)"};
|
||||||
|
|
||||||
Setting<bool> buildersUseSubstitutes{
|
Setting<bool> buildersUseSubstitutes{
|
||||||
this, false, "builders-use-substitutes",
|
this, false, "builders-use-substitutes",
|
||||||
|
@ -614,8 +617,10 @@ public:
|
||||||
Strings{"https://cache.nixos.org/"},
|
Strings{"https://cache.nixos.org/"},
|
||||||
"substituters",
|
"substituters",
|
||||||
R"(
|
R"(
|
||||||
A list of URLs of substituters, separated by whitespace. The default
|
A list of URLs of substituters, separated by whitespace. Substituters
|
||||||
is `https://cache.nixos.org`.
|
are tried based on their Priority value, which each substituter can set
|
||||||
|
independently. Lower value means higher priority.
|
||||||
|
The default is `https://cache.nixos.org`, with a Priority of 40.
|
||||||
)",
|
)",
|
||||||
{"binary-caches"}};
|
{"binary-caches"}};
|
||||||
|
|
||||||
|
@ -698,7 +703,7 @@ public:
|
||||||
send a series of commands to modify various settings to stdout. The
|
send a series of commands to modify various settings to stdout. The
|
||||||
currently recognized commands are:
|
currently recognized commands are:
|
||||||
|
|
||||||
- `extra-sandbox-paths`
|
- `extra-sandbox-paths`\
|
||||||
Pass a list of files and directories to be included in the
|
Pass a list of files and directories to be included in the
|
||||||
sandbox for this build. One entry per line, terminated by an
|
sandbox for this build. One entry per line, terminated by an
|
||||||
empty line. Entries have the same format as `sandbox-paths`.
|
empty line. Entries have the same format as `sandbox-paths`.
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
#include "remote-store.hh"
|
#include "remote-store.hh"
|
||||||
#include "serve-protocol.hh"
|
#include "serve-protocol.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
#include "path-with-outputs.hh"
|
||||||
#include "worker-protocol.hh"
|
#include "worker-protocol.hh"
|
||||||
#include "ssh.hh"
|
#include "ssh.hh"
|
||||||
#include "derivations.hh"
|
#include "derivations.hh"
|
||||||
|
@ -266,14 +267,23 @@ public:
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
void buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode) override
|
void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode) override
|
||||||
{
|
{
|
||||||
auto conn(connections->get());
|
auto conn(connections->get());
|
||||||
|
|
||||||
conn->to << cmdBuildPaths;
|
conn->to << cmdBuildPaths;
|
||||||
Strings ss;
|
Strings ss;
|
||||||
for (auto & p : drvPaths)
|
for (auto & p : drvPaths) {
|
||||||
ss.push_back(p.to_string(*this));
|
auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
|
||||||
|
std::visit(overloaded {
|
||||||
|
[&](StorePathWithOutputs s) {
|
||||||
|
ss.push_back(s.to_string(*this));
|
||||||
|
},
|
||||||
|
[&](StorePath drvPath) {
|
||||||
|
throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
|
||||||
|
},
|
||||||
|
}, sOrDrvPath);
|
||||||
|
}
|
||||||
conn->to << ss;
|
conn->to << ss;
|
||||||
|
|
||||||
putBuildSettings(*conn);
|
putBuildSettings(*conn);
|
||||||
|
|
|
@ -2,6 +2,8 @@
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "nar-info-disk-cache.hh"
|
#include "nar-info-disk-cache.hh"
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
|
struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
|
||||||
|
@ -50,7 +52,8 @@ protected:
|
||||||
const std::string & mimeType) override
|
const std::string & mimeType) override
|
||||||
{
|
{
|
||||||
auto path2 = binaryCacheDir + "/" + path;
|
auto path2 = binaryCacheDir + "/" + path;
|
||||||
Path tmp = path2 + ".tmp." + std::to_string(getpid());
|
static std::atomic<int> counter{0};
|
||||||
|
Path tmp = fmt("%s.tmp.%d.%d", path2, getpid(), ++counter);
|
||||||
AutoDelete del(tmp, false);
|
AutoDelete del(tmp, false);
|
||||||
StreamToSourceAdapter source(istream);
|
StreamToSourceAdapter source(istream);
|
||||||
writeFile(tmp, source);
|
writeFile(tmp, source);
|
||||||
|
@ -90,7 +93,7 @@ protected:
|
||||||
void LocalBinaryCacheStore::init()
|
void LocalBinaryCacheStore::init()
|
||||||
{
|
{
|
||||||
createDirs(binaryCacheDir + "/nar");
|
createDirs(binaryCacheDir + "/nar");
|
||||||
createDirs(binaryCacheDir + realisationsPrefix);
|
createDirs(binaryCacheDir + "/" + realisationsPrefix);
|
||||||
if (writeDebugInfo)
|
if (writeDebugInfo)
|
||||||
createDirs(binaryCacheDir + "/debuginfo");
|
createDirs(binaryCacheDir + "/debuginfo");
|
||||||
BinaryCacheStore::init();
|
BinaryCacheStore::init();
|
||||||
|
|
|
@ -18,6 +18,9 @@ struct LocalFSStoreConfig : virtual StoreConfig
|
||||||
const PathSetting logDir{(StoreConfig*) this, false,
|
const PathSetting logDir{(StoreConfig*) this, false,
|
||||||
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
|
rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
|
||||||
"log", "directory where Nix will store state"};
|
"log", "directory where Nix will store state"};
|
||||||
|
const PathSetting realStoreDir{(StoreConfig*) this, false,
|
||||||
|
rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
|
||||||
|
"physical path to the Nix store"};
|
||||||
};
|
};
|
||||||
|
|
||||||
class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store
|
class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store
|
||||||
|
@ -34,7 +37,7 @@ public:
|
||||||
/* Register a permanent GC root. */
|
/* Register a permanent GC root. */
|
||||||
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
|
Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
|
||||||
|
|
||||||
virtual Path getRealStoreDir() { return storeDir; }
|
virtual Path getRealStoreDir() { return realStoreDir; }
|
||||||
|
|
||||||
Path toRealPath(const Path & storePath) override
|
Path toRealPath(const Path & storePath) override
|
||||||
{
|
{
|
||||||
|
|
|
@ -53,12 +53,15 @@ struct LocalStore::State::Stmts {
|
||||||
SQLiteStmt InvalidatePath;
|
SQLiteStmt InvalidatePath;
|
||||||
SQLiteStmt AddDerivationOutput;
|
SQLiteStmt AddDerivationOutput;
|
||||||
SQLiteStmt RegisterRealisedOutput;
|
SQLiteStmt RegisterRealisedOutput;
|
||||||
|
SQLiteStmt UpdateRealisedOutput;
|
||||||
SQLiteStmt QueryValidDerivers;
|
SQLiteStmt QueryValidDerivers;
|
||||||
SQLiteStmt QueryDerivationOutputs;
|
SQLiteStmt QueryDerivationOutputs;
|
||||||
SQLiteStmt QueryRealisedOutput;
|
SQLiteStmt QueryRealisedOutput;
|
||||||
SQLiteStmt QueryAllRealisedOutputs;
|
SQLiteStmt QueryAllRealisedOutputs;
|
||||||
SQLiteStmt QueryPathFromHashPart;
|
SQLiteStmt QueryPathFromHashPart;
|
||||||
SQLiteStmt QueryValidPaths;
|
SQLiteStmt QueryValidPaths;
|
||||||
|
SQLiteStmt QueryRealisationReferences;
|
||||||
|
SQLiteStmt AddRealisationReference;
|
||||||
};
|
};
|
||||||
|
|
||||||
int getSchema(Path schemaPath)
|
int getSchema(Path schemaPath)
|
||||||
|
@ -76,7 +79,7 @@ int getSchema(Path schemaPath)
|
||||||
|
|
||||||
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
||||||
{
|
{
|
||||||
const int nixCASchemaVersion = 1;
|
const int nixCASchemaVersion = 2;
|
||||||
int curCASchema = getSchema(schemaPath);
|
int curCASchema = getSchema(schemaPath);
|
||||||
if (curCASchema != nixCASchemaVersion) {
|
if (curCASchema != nixCASchemaVersion) {
|
||||||
if (curCASchema > nixCASchemaVersion) {
|
if (curCASchema > nixCASchemaVersion) {
|
||||||
|
@ -94,7 +97,39 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
|
||||||
#include "ca-specific-schema.sql.gen.hh"
|
#include "ca-specific-schema.sql.gen.hh"
|
||||||
;
|
;
|
||||||
db.exec(schema);
|
db.exec(schema);
|
||||||
|
curCASchema = nixCASchemaVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (curCASchema < 2) {
|
||||||
|
SQLiteTxn txn(db);
|
||||||
|
// Ugly little sql dance to add a new `id` column and make it the primary key
|
||||||
|
db.exec(R"(
|
||||||
|
create table Realisations2 (
|
||||||
|
id integer primary key autoincrement not null,
|
||||||
|
drvPath text not null,
|
||||||
|
outputName text not null, -- symbolic output id, usually "out"
|
||||||
|
outputPath integer not null,
|
||||||
|
signatures text, -- space-separated list
|
||||||
|
foreign key (outputPath) references ValidPaths(id) on delete cascade
|
||||||
|
);
|
||||||
|
insert into Realisations2 (drvPath, outputName, outputPath, signatures)
|
||||||
|
select drvPath, outputName, outputPath, signatures from Realisations;
|
||||||
|
drop table Realisations;
|
||||||
|
alter table Realisations2 rename to Realisations;
|
||||||
|
)");
|
||||||
|
db.exec(R"(
|
||||||
|
create index if not exists IndexRealisations on Realisations(drvPath, outputName);
|
||||||
|
|
||||||
|
create table if not exists RealisationsRefs (
|
||||||
|
referrer integer not null,
|
||||||
|
realisationReference integer,
|
||||||
|
foreign key (referrer) references Realisations(id) on delete cascade,
|
||||||
|
foreign key (realisationReference) references Realisations(id) on delete restrict
|
||||||
|
);
|
||||||
|
)");
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
|
writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
|
||||||
lockFile(lockFd.get(), ltRead, true);
|
lockFile(lockFd.get(), ltRead, true);
|
||||||
}
|
}
|
||||||
|
@ -106,9 +141,6 @@ LocalStore::LocalStore(const Params & params)
|
||||||
, LocalStoreConfig(params)
|
, LocalStoreConfig(params)
|
||||||
, Store(params)
|
, Store(params)
|
||||||
, LocalFSStore(params)
|
, LocalFSStore(params)
|
||||||
, realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
|
|
||||||
"physical path to the Nix store"}
|
|
||||||
, realStoreDir(realStoreDir_)
|
|
||||||
, dbDir(stateDir + "/db")
|
, dbDir(stateDir + "/db")
|
||||||
, linksDir(realStoreDir + "/.links")
|
, linksDir(realStoreDir + "/.links")
|
||||||
, reservedPath(dbDir + "/reserved")
|
, reservedPath(dbDir + "/reserved")
|
||||||
|
@ -153,13 +185,13 @@ LocalStore::LocalStore(const Params & params)
|
||||||
printError("warning: the group '%1%' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
|
printError("warning: the group '%1%' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
|
||||||
else {
|
else {
|
||||||
struct stat st;
|
struct stat st;
|
||||||
if (stat(realStoreDir.c_str(), &st))
|
if (stat(realStoreDir.get().c_str(), &st))
|
||||||
throw SysError("getting attributes of path '%1%'", realStoreDir);
|
throw SysError("getting attributes of path '%1%'", realStoreDir);
|
||||||
|
|
||||||
if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
|
if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
|
||||||
if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1)
|
if (chown(realStoreDir.get().c_str(), 0, gr->gr_gid) == -1)
|
||||||
throw SysError("changing ownership of path '%1%'", realStoreDir);
|
throw SysError("changing ownership of path '%1%'", realStoreDir);
|
||||||
if (chmod(realStoreDir.c_str(), perm) == -1)
|
if (chmod(realStoreDir.get().c_str(), perm) == -1)
|
||||||
throw SysError("changing permissions on path '%1%'", realStoreDir);
|
throw SysError("changing permissions on path '%1%'", realStoreDir);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -310,13 +342,22 @@ LocalStore::LocalStore(const Params & params)
|
||||||
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
|
||||||
state->stmts->RegisterRealisedOutput.create(state->db,
|
state->stmts->RegisterRealisedOutput.create(state->db,
|
||||||
R"(
|
R"(
|
||||||
insert or replace into Realisations (drvPath, outputName, outputPath)
|
insert or replace into Realisations (drvPath, outputName, outputPath, signatures)
|
||||||
values (?, ?, (select id from ValidPaths where path = ?))
|
values (?, ?, (select id from ValidPaths where path = ?), ?)
|
||||||
|
;
|
||||||
|
)");
|
||||||
|
state->stmts->UpdateRealisedOutput.create(state->db,
|
||||||
|
R"(
|
||||||
|
update Realisations
|
||||||
|
set signatures = ?
|
||||||
|
where
|
||||||
|
drvPath = ? and
|
||||||
|
outputName = ?
|
||||||
;
|
;
|
||||||
)");
|
)");
|
||||||
state->stmts->QueryRealisedOutput.create(state->db,
|
state->stmts->QueryRealisedOutput.create(state->db,
|
||||||
R"(
|
R"(
|
||||||
select Output.path from Realisations
|
select Realisations.id, Output.path, Realisations.signatures from Realisations
|
||||||
inner join ValidPaths as Output on Output.id = Realisations.outputPath
|
inner join ValidPaths as Output on Output.id = Realisations.outputPath
|
||||||
where drvPath = ? and outputName = ?
|
where drvPath = ? and outputName = ?
|
||||||
;
|
;
|
||||||
|
@ -328,6 +369,19 @@ LocalStore::LocalStore(const Params & params)
|
||||||
where drvPath = ?
|
where drvPath = ?
|
||||||
;
|
;
|
||||||
)");
|
)");
|
||||||
|
state->stmts->QueryRealisationReferences.create(state->db,
|
||||||
|
R"(
|
||||||
|
select drvPath, outputName from Realisations
|
||||||
|
join RealisationsRefs on realisationReference = Realisations.id
|
||||||
|
where referrer = ?;
|
||||||
|
)");
|
||||||
|
state->stmts->AddRealisationReference.create(state->db,
|
||||||
|
R"(
|
||||||
|
insert or replace into RealisationsRefs (referrer, realisationReference)
|
||||||
|
values (
|
||||||
|
?,
|
||||||
|
(select id from Realisations where drvPath = ? and outputName = ?));
|
||||||
|
)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -437,14 +491,14 @@ void LocalStore::makeStoreWritable()
|
||||||
if (getuid() != 0) return;
|
if (getuid() != 0) return;
|
||||||
/* Check if /nix/store is on a read-only mount. */
|
/* Check if /nix/store is on a read-only mount. */
|
||||||
struct statvfs stat;
|
struct statvfs stat;
|
||||||
if (statvfs(realStoreDir.c_str(), &stat) != 0)
|
if (statvfs(realStoreDir.get().c_str(), &stat) != 0)
|
||||||
throw SysError("getting info about the Nix store mount point");
|
throw SysError("getting info about the Nix store mount point");
|
||||||
|
|
||||||
if (stat.f_flag & ST_RDONLY) {
|
if (stat.f_flag & ST_RDONLY) {
|
||||||
if (unshare(CLONE_NEWNS) == -1)
|
if (unshare(CLONE_NEWNS) == -1)
|
||||||
throw SysError("setting up a private mount namespace");
|
throw SysError("setting up a private mount namespace");
|
||||||
|
|
||||||
if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
|
if (mount(0, realStoreDir.get().c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
|
||||||
throw SysError("remounting %1% writable", realStoreDir);
|
throw SysError("remounting %1% writable", realStoreDir);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -652,17 +706,66 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs)
|
||||||
|
{
|
||||||
|
settings.requireExperimentalFeature("ca-derivations");
|
||||||
|
if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
|
||||||
|
registerDrvOutput(info);
|
||||||
|
else
|
||||||
|
throw Error("cannot register realisation '%s' because it lacks a valid signature", info.outPath.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
void LocalStore::registerDrvOutput(const Realisation & info)
|
void LocalStore::registerDrvOutput(const Realisation & info)
|
||||||
{
|
{
|
||||||
settings.requireExperimentalFeature("ca-derivations");
|
settings.requireExperimentalFeature("ca-derivations");
|
||||||
auto state(_state.lock());
|
|
||||||
retrySQLite<void>([&]() {
|
retrySQLite<void>([&]() {
|
||||||
|
auto state(_state.lock());
|
||||||
|
if (auto oldR = queryRealisation_(*state, info.id)) {
|
||||||
|
if (info.isCompatibleWith(*oldR)) {
|
||||||
|
auto combinedSignatures = oldR->signatures;
|
||||||
|
combinedSignatures.insert(info.signatures.begin(),
|
||||||
|
info.signatures.end());
|
||||||
|
state->stmts->UpdateRealisedOutput.use()
|
||||||
|
(concatStringsSep(" ", combinedSignatures))
|
||||||
|
(info.id.strHash())
|
||||||
|
(info.id.outputName)
|
||||||
|
.exec();
|
||||||
|
} else {
|
||||||
|
throw Error("Trying to register a realisation of '%s', but we already "
|
||||||
|
"have another one locally.\n"
|
||||||
|
"Local: %s\n"
|
||||||
|
"Remote: %s",
|
||||||
|
info.id.to_string(),
|
||||||
|
printStorePath(oldR->outPath),
|
||||||
|
printStorePath(info.outPath)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
state->stmts->RegisterRealisedOutput.use()
|
state->stmts->RegisterRealisedOutput.use()
|
||||||
(info.id.strHash())
|
(info.id.strHash())
|
||||||
(info.id.outputName)
|
(info.id.outputName)
|
||||||
(printStorePath(info.outPath))
|
(printStorePath(info.outPath))
|
||||||
|
(concatStringsSep(" ", info.signatures))
|
||||||
.exec();
|
.exec();
|
||||||
|
}
|
||||||
|
uint64_t myId = state->db.getLastInsertedRowId();
|
||||||
|
for (auto & [outputId, depPath] : info.dependentRealisations) {
|
||||||
|
auto localRealisation = queryRealisationCore_(*state, outputId);
|
||||||
|
if (!localRealisation)
|
||||||
|
throw Error("unable to register the derivation '%s' as it "
|
||||||
|
"depends on the non existent '%s'",
|
||||||
|
info.id.to_string(), outputId.to_string());
|
||||||
|
if (localRealisation->second.outPath != depPath)
|
||||||
|
throw Error("unable to register the derivation '%s' as it "
|
||||||
|
"depends on a realisation of '%s' that doesn’t"
|
||||||
|
"match what we have locally",
|
||||||
|
info.id.to_string(), outputId.to_string());
|
||||||
|
state->stmts->AddRealisationReference.use()
|
||||||
|
(myId)
|
||||||
|
(outputId.strHash())
|
||||||
|
(outputId.outputName)
|
||||||
|
.exec();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1102,15 +1205,20 @@ const PublicKeys & LocalStore::getPublicKeys()
|
||||||
return *state->publicKeys;
|
return *state->publicKeys;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LocalStore::pathInfoIsTrusted(const ValidPathInfo & info)
|
bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info)
|
||||||
{
|
{
|
||||||
return requireSigs && !info.checkSignatures(*this, getPublicKeys());
|
return requireSigs && !info.checkSignatures(*this, getPublicKeys());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool LocalStore::realisationIsUntrusted(const Realisation & realisation)
|
||||||
|
{
|
||||||
|
return requireSigs && !realisation.checkSignatures(getPublicKeys());
|
||||||
|
}
|
||||||
|
|
||||||
void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||||
RepairFlag repair, CheckSigsFlag checkSigs)
|
RepairFlag repair, CheckSigsFlag checkSigs)
|
||||||
{
|
{
|
||||||
if (checkSigs && pathInfoIsTrusted(info))
|
if (checkSigs && pathInfoIsUntrusted(info))
|
||||||
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
|
throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
|
||||||
|
|
||||||
addTempRoot(info.path);
|
addTempRoot(info.path);
|
||||||
|
@ -1138,17 +1246,13 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||||
|
|
||||||
/* While restoring the path from the NAR, compute the hash
|
/* While restoring the path from the NAR, compute the hash
|
||||||
of the NAR. */
|
of the NAR. */
|
||||||
std::unique_ptr<AbstractHashSink> hashSink;
|
HashSink hashSink(htSHA256);
|
||||||
if (!info.ca.has_value() || !info.references.count(info.path))
|
|
||||||
hashSink = std::make_unique<HashSink>(htSHA256);
|
|
||||||
else
|
|
||||||
hashSink = std::make_unique<HashModuloSink>(htSHA256, std::string(info.path.hashPart()));
|
|
||||||
|
|
||||||
TeeSource wrapperSource { source, *hashSink };
|
TeeSource wrapperSource { source, hashSink };
|
||||||
|
|
||||||
restorePath(realPath, wrapperSource);
|
restorePath(realPath, wrapperSource);
|
||||||
|
|
||||||
auto hashResult = hashSink->finish();
|
auto hashResult = hashSink.finish();
|
||||||
|
|
||||||
if (hashResult.first != info.narHash)
|
if (hashResult.first != info.narHash)
|
||||||
throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
throw Error("hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||||
|
@ -1158,6 +1262,31 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||||
throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s",
|
throw Error("size mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||||
printStorePath(info.path), info.narSize, hashResult.second);
|
printStorePath(info.path), info.narSize, hashResult.second);
|
||||||
|
|
||||||
|
if (info.ca) {
|
||||||
|
if (auto foHash = std::get_if<FixedOutputHash>(&*info.ca)) {
|
||||||
|
auto actualFoHash = hashCAPath(
|
||||||
|
foHash->method,
|
||||||
|
foHash->hash.type,
|
||||||
|
info.path
|
||||||
|
);
|
||||||
|
if (foHash->hash != actualFoHash.hash) {
|
||||||
|
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||||
|
printStorePath(info.path),
|
||||||
|
foHash->hash.to_string(Base32, true),
|
||||||
|
actualFoHash.hash.to_string(Base32, true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (auto textHash = std::get_if<TextHash>(&*info.ca)) {
|
||||||
|
auto actualTextHash = hashString(htSHA256, readFile(realPath));
|
||||||
|
if (textHash->hash != actualTextHash) {
|
||||||
|
throw Error("ca hash mismatch importing path '%s';\n specified: %s\n got: %s",
|
||||||
|
printStorePath(info.path),
|
||||||
|
textHash->hash.to_string(Base32, true),
|
||||||
|
actualTextHash.to_string(Base32, true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
autoGC();
|
autoGC();
|
||||||
|
|
||||||
canonicalisePathMetaData(realPath, -1);
|
canonicalisePathMetaData(realPath, -1);
|
||||||
|
@ -1426,14 +1555,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
||||||
/* Check the content hash (optionally - slow). */
|
/* Check the content hash (optionally - slow). */
|
||||||
printMsg(lvlTalkative, "checking contents of '%s'", printStorePath(i));
|
printMsg(lvlTalkative, "checking contents of '%s'", printStorePath(i));
|
||||||
|
|
||||||
std::unique_ptr<AbstractHashSink> hashSink;
|
auto hashSink = HashSink(info->narHash.type);
|
||||||
if (!info->ca || !info->references.count(info->path))
|
|
||||||
hashSink = std::make_unique<HashSink>(info->narHash.type);
|
|
||||||
else
|
|
||||||
hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
|
|
||||||
|
|
||||||
dumpPath(Store::toRealPath(i), *hashSink);
|
dumpPath(Store::toRealPath(i), hashSink);
|
||||||
auto current = hashSink->finish();
|
auto current = hashSink.finish();
|
||||||
|
|
||||||
if (info->narHash != nullHash && info->narHash != current.first) {
|
if (info->narHash != nullHash && info->narHash != current.first) {
|
||||||
printError("path '%s' was modified! expected hash '%s', got '%s'",
|
printError("path '%s' was modified! expected hash '%s', got '%s'",
|
||||||
|
@ -1612,6 +1737,18 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::signRealisation(Realisation & realisation)
|
||||||
|
{
|
||||||
|
// FIXME: keep secret keys in memory.
|
||||||
|
|
||||||
|
auto secretKeyFiles = settings.secretKeyFiles;
|
||||||
|
|
||||||
|
for (auto & secretKeyFile : secretKeyFiles.get()) {
|
||||||
|
SecretKey secretKey(readFile(secretKeyFile));
|
||||||
|
realisation.sign(secretKey);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void LocalStore::signPathInfo(ValidPathInfo & info)
|
void LocalStore::signPathInfo(ValidPathInfo & info)
|
||||||
{
|
{
|
||||||
// FIXME: keep secret keys in memory.
|
// FIXME: keep secret keys in memory.
|
||||||
|
@ -1639,18 +1776,97 @@ void LocalStore::createUser(const std::string & userName, uid_t userId)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<const Realisation> LocalStore::queryRealisation(
|
std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_(
|
||||||
const DrvOutput& id) {
|
LocalStore::State & state,
|
||||||
typedef std::optional<const Realisation> Ret;
|
const DrvOutput & id)
|
||||||
return retrySQLite<Ret>([&]() -> Ret {
|
{
|
||||||
auto state(_state.lock());
|
auto useQueryRealisedOutput(
|
||||||
auto use(state->stmts->QueryRealisedOutput.use()(id.strHash())(
|
state.stmts->QueryRealisedOutput.use()
|
||||||
id.outputName));
|
(id.strHash())
|
||||||
if (!use.next())
|
(id.outputName));
|
||||||
|
if (!useQueryRealisedOutput.next())
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
auto outputPath = parseStorePath(use.getStr(0));
|
auto realisationDbId = useQueryRealisedOutput.getInt(0);
|
||||||
return Ret{
|
auto outputPath = parseStorePath(useQueryRealisedOutput.getStr(1));
|
||||||
Realisation{.id = id, .outPath = outputPath}};
|
auto signatures =
|
||||||
|
tokenizeString<StringSet>(useQueryRealisedOutput.getStr(2));
|
||||||
|
|
||||||
|
return {{
|
||||||
|
realisationDbId,
|
||||||
|
Realisation{
|
||||||
|
.id = id,
|
||||||
|
.outPath = outputPath,
|
||||||
|
.signatures = signatures,
|
||||||
|
}
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<const Realisation> LocalStore::queryRealisation_(
|
||||||
|
LocalStore::State & state,
|
||||||
|
const DrvOutput & id)
|
||||||
|
{
|
||||||
|
auto maybeCore = queryRealisationCore_(state, id);
|
||||||
|
if (!maybeCore)
|
||||||
|
return std::nullopt;
|
||||||
|
auto [realisationDbId, res] = *maybeCore;
|
||||||
|
|
||||||
|
std::map<DrvOutput, StorePath> dependentRealisations;
|
||||||
|
auto useRealisationRefs(
|
||||||
|
state.stmts->QueryRealisationReferences.use()
|
||||||
|
(realisationDbId));
|
||||||
|
while (useRealisationRefs.next()) {
|
||||||
|
auto depId = DrvOutput {
|
||||||
|
Hash::parseAnyPrefixed(useRealisationRefs.getStr(0)),
|
||||||
|
useRealisationRefs.getStr(1),
|
||||||
|
};
|
||||||
|
auto dependentRealisation = queryRealisationCore_(state, depId);
|
||||||
|
assert(dependentRealisation); // Enforced by the db schema
|
||||||
|
auto outputPath = dependentRealisation->second.outPath;
|
||||||
|
dependentRealisations.insert({depId, outputPath});
|
||||||
|
}
|
||||||
|
|
||||||
|
res.dependentRealisations = dependentRealisations;
|
||||||
|
|
||||||
|
return { res };
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<const Realisation>
|
||||||
|
LocalStore::queryRealisation(const DrvOutput & id)
|
||||||
|
{
|
||||||
|
return retrySQLite<std::optional<const Realisation>>([&]() {
|
||||||
|
auto state(_state.lock());
|
||||||
|
return queryRealisation_(*state, id);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FixedOutputHash LocalStore::hashCAPath(
|
||||||
|
const FileIngestionMethod & method, const HashType & hashType,
|
||||||
|
const StorePath & path)
|
||||||
|
{
|
||||||
|
return hashCAPath(method, hashType, Store::toRealPath(path), path.hashPart());
|
||||||
|
}
|
||||||
|
|
||||||
|
FixedOutputHash LocalStore::hashCAPath(
|
||||||
|
const FileIngestionMethod & method,
|
||||||
|
const HashType & hashType,
|
||||||
|
const Path & path,
|
||||||
|
const std::string_view pathHash
|
||||||
|
)
|
||||||
|
{
|
||||||
|
HashModuloSink caSink ( hashType, std::string(pathHash) );
|
||||||
|
switch (method) {
|
||||||
|
case FileIngestionMethod::Recursive:
|
||||||
|
dumpPath(path, caSink);
|
||||||
|
break;
|
||||||
|
case FileIngestionMethod::Flat:
|
||||||
|
readFile(path, caSink);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto hash = caSink.finish().first;
|
||||||
|
return FixedOutputHash{
|
||||||
|
.method = method,
|
||||||
|
.hash = hash,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace nix
|
} // namespace nix
|
||||||
|
|
|
@ -83,9 +83,6 @@ private:
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
PathSetting realStoreDir_;
|
|
||||||
|
|
||||||
const Path realStoreDir;
|
|
||||||
const Path dbDir;
|
const Path dbDir;
|
||||||
const Path linksDir;
|
const Path linksDir;
|
||||||
const Path reservedPath;
|
const Path reservedPath;
|
||||||
|
@ -136,7 +133,8 @@ public:
|
||||||
void querySubstitutablePathInfos(const StorePathCAMap & paths,
|
void querySubstitutablePathInfos(const StorePathCAMap & paths,
|
||||||
SubstitutablePathInfos & infos) override;
|
SubstitutablePathInfos & infos) override;
|
||||||
|
|
||||||
bool pathInfoIsTrusted(const ValidPathInfo &) override;
|
bool pathInfoIsUntrusted(const ValidPathInfo &) override;
|
||||||
|
bool realisationIsUntrusted(const Realisation & ) override;
|
||||||
|
|
||||||
void addToStore(const ValidPathInfo & info, Source & source,
|
void addToStore(const ValidPathInfo & info, Source & source,
|
||||||
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
RepairFlag repair, CheckSigsFlag checkSigs) override;
|
||||||
|
@ -202,8 +200,11 @@ public:
|
||||||
/* Register the store path 'output' as the output named 'outputName' of
|
/* Register the store path 'output' as the output named 'outputName' of
|
||||||
derivation 'deriver'. */
|
derivation 'deriver'. */
|
||||||
void registerDrvOutput(const Realisation & info) override;
|
void registerDrvOutput(const Realisation & info) override;
|
||||||
|
void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
|
||||||
void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output);
|
void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output);
|
||||||
|
|
||||||
|
std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
|
||||||
|
std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
|
||||||
std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
|
std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -272,16 +273,30 @@ private:
|
||||||
bool isValidPath_(State & state, const StorePath & path);
|
bool isValidPath_(State & state, const StorePath & path);
|
||||||
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
|
void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
|
||||||
|
|
||||||
/* Add signatures to a ValidPathInfo using the secret keys
|
/* Add signatures to a ValidPathInfo or Realisation using the secret keys
|
||||||
specified by the ‘secret-key-files’ option. */
|
specified by the ‘secret-key-files’ option. */
|
||||||
void signPathInfo(ValidPathInfo & info);
|
void signPathInfo(ValidPathInfo & info);
|
||||||
|
void signRealisation(Realisation &);
|
||||||
Path getRealStoreDir() override { return realStoreDir; }
|
|
||||||
|
|
||||||
void createUser(const std::string & userName, uid_t userId) override;
|
void createUser(const std::string & userName, uid_t userId) override;
|
||||||
|
|
||||||
|
// XXX: Make a generic `Store` method
|
||||||
|
FixedOutputHash hashCAPath(
|
||||||
|
const FileIngestionMethod & method,
|
||||||
|
const HashType & hashType,
|
||||||
|
const StorePath & path);
|
||||||
|
|
||||||
|
FixedOutputHash hashCAPath(
|
||||||
|
const FileIngestionMethod & method,
|
||||||
|
const HashType & hashType,
|
||||||
|
const Path & path,
|
||||||
|
const std::string_view pathHash
|
||||||
|
);
|
||||||
|
|
||||||
friend struct LocalDerivationGoal;
|
friend struct LocalDerivationGoal;
|
||||||
|
friend struct PathSubstitutionGoal;
|
||||||
friend struct SubstitutionGoal;
|
friend struct SubstitutionGoal;
|
||||||
|
friend struct DerivationGoal;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
|
||||||
libstore_LIBS = libutil
|
libstore_LIBS = libutil
|
||||||
|
|
||||||
libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
|
libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
|
||||||
ifneq ($(OS), FreeBSD)
|
ifeq ($(OS), Linux)
|
||||||
libstore_LDFLAGS += -ldl
|
libstore_LDFLAGS += -ldl
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue