mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-23 14:36:16 +02:00
Merge remote-tracking branch 'upstream/master' into ca-drv
This commit is contained in:
commit
6b7f4ec4ab
210 changed files with 8090 additions and 2358 deletions
27
.github/ISSUE_TEMPLATE.md
vendored
27
.github/ISSUE_TEMPLATE.md
vendored
|
@ -1,27 +0,0 @@
|
|||
<!--
|
||||
|
||||
# Filing a Nix issue
|
||||
|
||||
*WAIT* Are you sure you're filing your issue in the right repository?
|
||||
|
||||
We appreciate you taking the time to tell us about issues you encounter, but routing the issue to the right place will get you help sooner and save everyone time.
|
||||
|
||||
This is the Nix repository, and issues here should be about Nix the build and package management *_tool_*.
|
||||
|
||||
If you have a problem with a specific package on NixOS or when using Nix, you probably want to file an issue with _nixpkgs_, whose issue tracker is over at https://github.com/NixOS/nixpkgs/issues.
|
||||
|
||||
Examples of _Nix_ issues:
|
||||
|
||||
- Nix segfaults when I run `nix-build -A blahblah`
|
||||
- The Nix language needs a new builtin: `builtins.foobar`
|
||||
- Regression in the behavior of `nix-env` in Nix 2.0
|
||||
|
||||
Examples of _nixpkgs_ issues:
|
||||
|
||||
- glibc is b0rked on aarch64
|
||||
- chromium in NixOS doesn't support U2F but google-chrome does!
|
||||
- The OpenJDK package on macOS is missing a key component
|
||||
|
||||
Chances are if you're a newcomer to the Nix world, you'll probably want the [nixpkgs tracker](https://github.com/NixOS/nixpkgs/issues). It also gets a lot more eyeball traffic so you'll probably get a response a lot more quickly.
|
||||
|
||||
-->
|
32
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
If you have a problem with a specific package or NixOS,
|
||||
you probably want to file an issue at https://github.com/NixOS/nixpkgs/issues.
|
||||
|
||||
**Steps To Reproduce**
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**`nix-env --version` output**
|
||||
|
||||
**Additional context**
|
||||
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: improvement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
|
@ -6,7 +6,7 @@ jobs:
|
|||
tests:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-18.04, macos]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -47,7 +47,7 @@ perl/Makefile.config
|
|||
/src/libexpr/nix.tbl
|
||||
|
||||
# /src/libstore/
|
||||
/src/libstore/*.gen.hh
|
||||
*.gen.*
|
||||
|
||||
/src/nix/nix
|
||||
|
||||
|
@ -75,6 +75,8 @@ perl/Makefile.config
|
|||
|
||||
/src/nix-copy-closure/nix-copy-closure
|
||||
|
||||
/src/error-demo/error-demo
|
||||
|
||||
/src/build-remote/build-remote
|
||||
|
||||
# /tests/
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
matrix:
|
||||
include:
|
||||
- language: osx
|
||||
script: ./tests/install-darwin.sh
|
||||
- language: nix
|
||||
script: nix-build release.nix -A build.x86_64-linux
|
||||
notifications:
|
||||
email: false
|
2
Makefile
2
Makefile
|
@ -3,7 +3,9 @@ makefiles = \
|
|||
local.mk \
|
||||
nix-rust/local.mk \
|
||||
src/libutil/local.mk \
|
||||
src/libutil/tests/local.mk \
|
||||
src/libstore/local.mk \
|
||||
src/libfetchers/local.mk \
|
||||
src/libmain/local.mk \
|
||||
src/libexpr/local.mk \
|
||||
src/nix/local.mk \
|
||||
|
|
|
@ -1,42 +1,44 @@
|
|||
AR = @AR@
|
||||
BDW_GC_LIBS = @BDW_GC_LIBS@
|
||||
BOOST_LDFLAGS = @BOOST_LDFLAGS@
|
||||
BUILD_SHARED_LIBS = @BUILD_SHARED_LIBS@
|
||||
CC = @CC@
|
||||
CFLAGS = @CFLAGS@
|
||||
CXX = @CXX@
|
||||
CXXFLAGS = @CXXFLAGS@
|
||||
LDFLAGS = @LDFLAGS@
|
||||
EDITLINE_LIBS = @EDITLINE_LIBS@
|
||||
ENABLE_S3 = @ENABLE_S3@
|
||||
HAVE_SODIUM = @HAVE_SODIUM@
|
||||
GTEST_LIBS = @GTEST_LIBS@
|
||||
HAVE_SECCOMP = @HAVE_SECCOMP@
|
||||
BOOST_LDFLAGS = @BOOST_LDFLAGS@
|
||||
HAVE_SODIUM = @HAVE_SODIUM@
|
||||
LDFLAGS = @LDFLAGS@
|
||||
LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
|
||||
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
|
||||
LIBCURL_LIBS = @LIBCURL_LIBS@
|
||||
LIBLZMA_LIBS = @LIBLZMA_LIBS@
|
||||
OPENSSL_LIBS = @OPENSSL_LIBS@
|
||||
PACKAGE_NAME = @PACKAGE_NAME@
|
||||
PACKAGE_VERSION = @PACKAGE_VERSION@
|
||||
SODIUM_LIBS = @SODIUM_LIBS@
|
||||
LIBLZMA_LIBS = @LIBLZMA_LIBS@
|
||||
SQLITE3_LIBS = @SQLITE3_LIBS@
|
||||
LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
|
||||
LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
|
||||
EDITLINE_LIBS = @EDITLINE_LIBS@
|
||||
bash = @bash@
|
||||
bindir = @bindir@
|
||||
lsof = @lsof@
|
||||
datadir = @datadir@
|
||||
datarootdir = @datarootdir@
|
||||
doc_generate = @doc_generate@
|
||||
docdir = @docdir@
|
||||
exec_prefix = @exec_prefix@
|
||||
includedir = @includedir@
|
||||
libdir = @libdir@
|
||||
libexecdir = @libexecdir@
|
||||
localstatedir = @localstatedir@
|
||||
lsof = @lsof@
|
||||
mandir = @mandir@
|
||||
pkglibdir = $(libdir)/$(PACKAGE_NAME)
|
||||
prefix = @prefix@
|
||||
sandbox_shell = @sandbox_shell@
|
||||
storedir = @storedir@
|
||||
sysconfdir = @sysconfdir@
|
||||
doc_generate = @doc_generate@
|
||||
system = @system@
|
||||
xmllint = @xmllint@
|
||||
xsltproc = @xsltproc@
|
||||
|
|
57
README.md
57
README.md
|
@ -1,21 +1,54 @@
|
|||
# Nix
|
||||
|
||||
[![Open Collective supporters](https://opencollective.com/nixos/tiers/supporter/badge.svg?label=Supporters&color=brightgreen)](https://opencollective.com/nixos)
|
||||
[![Test](https://github.com/NixOS/nix/workflows/Test/badge.svg)](https://github.com/NixOS/nix/actions)
|
||||
|
||||
Nix, the purely functional package manager
|
||||
------------------------------------------
|
||||
Nix is a powerful package manager for Linux and other Unix systems that makes package
|
||||
management reliable and reproducible. Please refer to the [Nix manual](https://nixos.org/nix/manual)
|
||||
for more details.
|
||||
|
||||
Nix is a new take on package management that is fairly unique. Because of its
|
||||
purity aspects, a lot of issues found in traditional package managers don't
|
||||
appear with Nix.
|
||||
## Installation
|
||||
|
||||
To find out more about the tool, usage and installation instructions, please
|
||||
read the manual, which is available on the Nix website at
|
||||
<https://nixos.org/nix/manual>.
|
||||
On Linux and macOS the easiest way to Install Nix is to run the following shell command
|
||||
(as a user other than root):
|
||||
|
||||
## Contributing
|
||||
```
|
||||
$ curl -L https://nixos.org/nix/install | sh
|
||||
```
|
||||
|
||||
Take a look at the [Hacking Section](https://nixos.org/nix/manual/#chap-hacking)
|
||||
of the manual. It helps you to get started with building Nix from source.
|
||||
Information on additional installation methods is available on the [Nix download page](https://nixos.org/download.html).
|
||||
|
||||
## Building And Developing
|
||||
|
||||
### Building Nix
|
||||
|
||||
You can build Nix using one of the targets provided by [release.nix](./release.nix):
|
||||
|
||||
```
|
||||
$ nix-build ./release.nix -A build.aarch64-linux
|
||||
$ nix-build ./release.nix -A build.x86_64-darwin
|
||||
$ nix-build ./release.nix -A build.i686-linux
|
||||
$ nix-build ./release.nix -A build.x86_64-linux
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
You can use the provided `shell.nix` to get a working development environment:
|
||||
|
||||
```
|
||||
$ nix-shell
|
||||
$ ./bootstrap.sh
|
||||
$ ./configure
|
||||
$ make
|
||||
```
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Nix manual](https://nixos.org/nix/manual)
|
||||
- [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
|
||||
- [NixOS Discourse](https://discourse.nixos.org/)
|
||||
- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
|
||||
|
||||
## License
|
||||
|
||||
Nix is released under the LGPL v2.1
|
||||
Nix is released under the [LGPL v2.1](./COPYING).
|
||||
|
|
|
@ -266,6 +266,10 @@ if test "$gc" = yes; then
|
|||
fi
|
||||
|
||||
|
||||
# Look for gtest.
|
||||
PKG_CHECK_MODULES([GTEST], [gtest_main])
|
||||
|
||||
|
||||
# documentation generation switch
|
||||
AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
|
||||
[disable documentation generation]),
|
||||
|
|
|
@ -61,7 +61,7 @@ substituters = https://cache.nixos.org/ s3://example-nix-cache
|
|||
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
|
||||
</programlisting>
|
||||
|
||||
<para>we will restart the Nix daemon a later step.</para>
|
||||
<para>We will restart the Nix daemon in a later step.</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
|
@ -139,7 +139,7 @@ $ nix-store --delete /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
|||
|
||||
<para>Now, copy the path back from the cache:</para>
|
||||
<screen>
|
||||
$ nix store --realize /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||
$ nix-store --realise /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||
copying path '/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example from 's3://example-nix-cache'...
|
||||
warning: you did not specify '--add-root'; the result might be removed by the garbage collector
|
||||
/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example
|
||||
|
|
|
@ -19,26 +19,30 @@
|
|||
|
||||
<refsection><title>Description</title>
|
||||
|
||||
<para>Nix reads settings from two configuration files:</para>
|
||||
<para>By default Nix reads settings from the following places:</para>
|
||||
|
||||
<itemizedlist>
|
||||
<para>The system-wide configuration file
|
||||
<filename><replaceable>sysconfdir</replaceable>/nix/nix.conf</filename>
|
||||
(i.e. <filename>/etc/nix/nix.conf</filename> on most systems), or
|
||||
<filename>$NIX_CONF_DIR/nix.conf</filename> if
|
||||
<envar>NIX_CONF_DIR</envar> is set. Values loaded in this file are not forwarded to the Nix daemon. The
|
||||
client assumes that the daemon has already loaded them.
|
||||
</para>
|
||||
|
||||
<listitem>
|
||||
<para>The system-wide configuration file
|
||||
<filename><replaceable>sysconfdir</replaceable>/nix/nix.conf</filename>
|
||||
(i.e. <filename>/etc/nix/nix.conf</filename> on most systems), or
|
||||
<filename>$NIX_CONF_DIR/nix.conf</filename> if
|
||||
<envar>NIX_CONF_DIR</envar> is set.</para>
|
||||
</listitem>
|
||||
<para>User-specific configuration files:</para>
|
||||
|
||||
<listitem>
|
||||
<para>The user configuration file
|
||||
<filename>$XDG_CONFIG_HOME/nix/nix.conf</filename>, or
|
||||
<filename>~/.config/nix/nix.conf</filename> if
|
||||
<envar>XDG_CONFIG_HOME</envar> is not set.</para>
|
||||
</listitem>
|
||||
<para>
|
||||
If <envar>NIX_USER_CONF_FILES</envar> is set, then each path separated by
|
||||
<literal>:</literal> will be loaded in reverse order.
|
||||
</para>
|
||||
|
||||
</itemizedlist>
|
||||
<para>
|
||||
Otherwise it will look for <filename>nix/nix.conf</filename> files in
|
||||
<envar>XDG_CONFIG_DIRS</envar> and <envar>XDG_CONFIG_HOME</envar>.
|
||||
|
||||
The default location is <filename>$HOME/.config/nix.conf</filename> if
|
||||
those environment variables are unset.
|
||||
</para>
|
||||
|
||||
<para>The configuration files consist of
|
||||
<literal><replaceable>name</replaceable> =
|
||||
|
@ -382,7 +386,7 @@ false</literal>.</para>
|
|||
|
||||
<programlisting>
|
||||
builtins.fetchurl {
|
||||
url = https://example.org/foo-1.2.3.tar.xz;
|
||||
url = "https://example.org/foo-1.2.3.tar.xz";
|
||||
sha256 = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae";
|
||||
}
|
||||
</programlisting>
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
will cause Nix to look for paths relative to
|
||||
<filename>/home/eelco/Dev</filename> and
|
||||
<filename>/etc/nixos</filename>, in that order. It is also
|
||||
<filename>/etc/nixos</filename>, in this order. It is also
|
||||
possible to match paths against a prefix. For example, the value
|
||||
|
||||
<screen>
|
||||
|
@ -53,13 +53,13 @@ nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos</screen>
|
|||
<envar>NIX_PATH</envar> to
|
||||
|
||||
<screen>
|
||||
nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-15.09.tar.gz</screen>
|
||||
nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz</screen>
|
||||
|
||||
tells Nix to download the latest revision in the Nixpkgs/NixOS
|
||||
15.09 channel.</para>
|
||||
|
||||
<para>A following shorthand can be used to refer to the official channels:
|
||||
|
||||
|
||||
<screen>nixpkgs=channel:nixos-15.09</screen>
|
||||
</para>
|
||||
|
||||
|
@ -137,12 +137,19 @@ $ mount -o bind /mnt/otherdisk/nix /nix</screen>
|
|||
|
||||
<varlistentry><term><envar>NIX_CONF_DIR</envar></term>
|
||||
|
||||
<listitem><para>Overrides the location of the Nix configuration
|
||||
<listitem><para>Overrides the location of the system Nix configuration
|
||||
directory (default
|
||||
<filename><replaceable>prefix</replaceable>/etc/nix</filename>).</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><envar>NIX_USER_CONF_FILES</envar></term>
|
||||
|
||||
<listitem><para>Overrides the location of the user Nix configuration files
|
||||
to load from (defaults to the XDG spec locations). The variable is treated
|
||||
as a list separated by the <literal>:</literal> token.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><envar>TMPDIR</envar></term>
|
||||
|
||||
|
|
|
@ -526,13 +526,10 @@ these paths will be fetched (0.04 MiB download, 0.19 MiB unpacked):
|
|||
14.12 channel:
|
||||
|
||||
<screen>
|
||||
$ nix-env -f https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz -iA firefox
|
||||
$ nix-env -f https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz -iA firefox
|
||||
</screen>
|
||||
|
||||
(The GitHub repository <literal>nixpkgs-channels</literal> is updated
|
||||
automatically from the main <literal>nixpkgs</literal> repository
|
||||
after certain tests have succeeded and binaries have been built and
|
||||
uploaded to the binary cache at <uri>cache.nixos.org</uri>.)</para>
|
||||
</para>
|
||||
|
||||
</refsection>
|
||||
|
||||
|
|
|
@ -258,7 +258,7 @@ path. You can override it by passing <option>-I</option> or setting
|
|||
containing the Pan package from a specific revision of Nixpkgs:
|
||||
|
||||
<screen>
|
||||
$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz
|
||||
$ nix-shell -p pan -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/8a3eea054838b55aca962c3fbde9c83c102b8bf2.tar.gz
|
||||
|
||||
[nix-shell:~]$ pan --version
|
||||
Pan 0.139
|
||||
|
@ -352,7 +352,7 @@ following Haskell script uses a specific branch of Nixpkgs/NixOS (the
|
|||
<programlisting><![CDATA[
|
||||
#! /usr/bin/env nix-shell
|
||||
#! nix-shell -i runghc -p "haskellPackages.ghcWithPackages (ps: [ps.HTTP ps.tagsoup])"
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-18.03.tar.gz
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-18.03.tar.gz
|
||||
|
||||
import Network.HTTP
|
||||
import Text.HTML.TagSoup
|
||||
|
@ -370,7 +370,7 @@ If you want to be even more precise, you can specify a specific
|
|||
revision of Nixpkgs:
|
||||
|
||||
<programlisting>
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz
|
||||
#! nix-shell -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/0672315759b3e15e2121365f067c1c8c56bb4722.tar.gz
|
||||
</programlisting>
|
||||
|
||||
</para>
|
||||
|
|
|
@ -360,7 +360,6 @@ EOF
|
|||
<arg choice='plain'><option>--print-roots</option></arg>
|
||||
<arg choice='plain'><option>--print-live</option></arg>
|
||||
<arg choice='plain'><option>--print-dead</option></arg>
|
||||
<arg choice='plain'><option>--delete</option></arg>
|
||||
</group>
|
||||
<arg><option>--max-freed</option> <replaceable>bytes</replaceable></arg>
|
||||
</cmdsynopsis>
|
||||
|
@ -407,14 +406,6 @@ the Nix store not reachable via file system references from a set of
|
|||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><option>--delete</option></term>
|
||||
|
||||
<listitem><para>This operation performs an actual garbage
|
||||
collection. All dead paths are removed from the
|
||||
store. This is the default.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
</variablelist>
|
||||
|
||||
<para>By default, all unreachable paths are deleted. The following
|
||||
|
@ -444,10 +435,10 @@ and <link
|
|||
linkend="conf-keep-derivations"><literal>keep-derivations</literal></link>
|
||||
variables in the Nix configuration file.</para>
|
||||
|
||||
<para>With <option>--delete</option>, the collector prints the total
|
||||
number of freed bytes when it finishes (or when it is interrupted).
|
||||
With <option>--print-dead</option>, it prints the number of bytes that
|
||||
would be freed.</para>
|
||||
<para>By default, the collector prints the total number of freed bytes
|
||||
when it finishes (or when it is interrupted). With
|
||||
<option>--print-dead</option>, it prints the number of bytes that would
|
||||
be freed.</para>
|
||||
|
||||
</refsection>
|
||||
|
||||
|
@ -945,7 +936,7 @@ $ nix-store --add ./foo.c
|
|||
|
||||
<para>The operation <option>--add-fixed</option> adds the specified paths to
|
||||
the Nix store. Unlike <option>--add</option> paths are registered using the
|
||||
specified hashing algorithm, resulting in the same output path as a fixed output
|
||||
specified hashing algorithm, resulting in the same output path as a fixed-output
|
||||
derivation. This can be used for sources that are not available from a public
|
||||
url or broke since the download expression was written.
|
||||
</para>
|
||||
|
@ -1148,7 +1139,7 @@ the information that Nix considers important. For instance,
|
|||
timestamps are elided because all files in the Nix store have their
|
||||
timestamp set to 0 anyway. Likewise, all permissions are left out
|
||||
except for the execute bit, because all files in the Nix store have
|
||||
644 or 755 permission.</para>
|
||||
444 or 555 permission.</para>
|
||||
|
||||
<para>Also, a NAR archive is <emphasis>canonical</emphasis>, meaning
|
||||
that “equal” paths always produce the same NAR archive. For instance,
|
||||
|
|
|
@ -178,7 +178,7 @@ impureEnvVars = [ "http_proxy" "https_proxy" <replaceable>...</replaceable> ];
|
|||
|
||||
<programlisting>
|
||||
fetchurl {
|
||||
url = http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
}
|
||||
</programlisting>
|
||||
|
@ -189,7 +189,7 @@ fetchurl {
|
|||
|
||||
<programlisting>
|
||||
fetchurl {
|
||||
url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
}
|
||||
</programlisting>
|
||||
|
|
|
@ -324,7 +324,7 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
|
|||
particular version of Nixpkgs, e.g.
|
||||
|
||||
<programlisting>
|
||||
with import (fetchTarball https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz) {};
|
||||
with import (fetchTarball https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz) {};
|
||||
|
||||
stdenv.mkDerivation { … }
|
||||
</programlisting>
|
||||
|
@ -349,7 +349,7 @@ stdenv.mkDerivation { … }
|
|||
|
||||
<programlisting>
|
||||
with import (fetchTarball {
|
||||
url = https://github.com/NixOS/nixpkgs-channels/archive/nixos-14.12.tar.gz;
|
||||
url = "https://github.com/NixOS/nixpkgs/archive/nixos-14.12.tar.gz";
|
||||
sha256 = "1jppksrfvbk5ypiqdz4cddxdl8z6zyzdb2srq8fcffr327ld5jj2";
|
||||
}) {};
|
||||
|
||||
|
@ -422,6 +422,16 @@ stdenv.mkDerivation { … }
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
<varlistentry>
|
||||
<term>submodules</term>
|
||||
<listitem>
|
||||
<para>
|
||||
A Boolean parameter that specifies whether submodules
|
||||
should be checked out. Defaults to
|
||||
<literal>false</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<example>
|
||||
|
@ -1396,7 +1406,7 @@ stdenv.mkDerivation {
|
|||
";
|
||||
|
||||
src = fetchurl {
|
||||
url = http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "http://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
};
|
||||
inherit perl;
|
||||
|
|
|
@ -15,7 +15,7 @@ stdenv.mkDerivation { <co xml:id='ex-hello-nix-co-2' />
|
|||
name = "hello-2.1.1"; <co xml:id='ex-hello-nix-co-3' />
|
||||
builder = ./builder.sh; <co xml:id='ex-hello-nix-co-4' />
|
||||
src = fetchurl { <co xml:id='ex-hello-nix-co-5' />
|
||||
url = ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz;
|
||||
url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
|
||||
sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
|
||||
};
|
||||
inherit perl; <co xml:id='ex-hello-nix-co-6' />
|
||||
|
|
|
@ -73,12 +73,4 @@ waiting for lock on `/nix/store/0h5b7hp8d4hqfrw8igvx97x1xawrjnac-hello-2.1.1x'</
|
|||
So it is always safe to run multiple instances of Nix in parallel
|
||||
(which isn’t the case with, say, <command>make</command>).</para>
|
||||
|
||||
<para>If you have a system with multiple CPUs, you may want to have
|
||||
Nix build different derivations in parallel (insofar as possible).
|
||||
Just pass the option <link linkend='opt-max-jobs'><option>-j
|
||||
<replaceable>N</replaceable></option></link>, where
|
||||
<replaceable>N</replaceable> is the maximum number of jobs to be run
|
||||
in parallel, or set. Typically this should be the number of
|
||||
CPUs.</para>
|
||||
|
||||
</section>
|
||||
|
|
|
@ -6,16 +6,30 @@
|
|||
|
||||
<title>Installing a Binary Distribution</title>
|
||||
|
||||
<para>If you are using Linux or macOS, the easiest way to install Nix
|
||||
is to run the following command:
|
||||
<para>
|
||||
If you are using Linux or macOS versions up to 10.14 (Mojave), the
|
||||
easiest way to install Nix is to run the following command:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
$ sh <(curl https://nixos.org/nix/install)
|
||||
</screen>
|
||||
|
||||
As of Nix 2.1.0, the Nix installer will always default to creating a
|
||||
single-user installation, however opting in to the multi-user
|
||||
installation is highly recommended.
|
||||
<para>
|
||||
If you're using macOS 10.15 (Catalina) or newer, consult
|
||||
<link linkend="sect-macos-installation">the macOS installation instructions</link>
|
||||
before installing.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As of Nix 2.1.0, the Nix installer will always default to creating a
|
||||
single-user installation, however opting in to the multi-user
|
||||
installation is highly recommended.
|
||||
<!-- TODO: this explains *neither* why the default version is
|
||||
single-user, nor why we'd recommend multi-user over the default.
|
||||
True prospective users don't have much basis for evaluating this.
|
||||
What's it to me? Who should pick which? Why? What if I pick wrong?
|
||||
-->
|
||||
</para>
|
||||
|
||||
<section xml:id="sect-single-user-installation">
|
||||
|
@ -36,7 +50,7 @@ run this under your usual user account, <emphasis>not</emphasis> as
|
|||
root. The script will invoke <command>sudo</command> to create
|
||||
<filename>/nix</filename> if it doesn’t already exist. If you don’t
|
||||
have <command>sudo</command>, you should manually create
|
||||
<command>/nix</command> first as root, e.g.:
|
||||
<filename>/nix</filename> first as root, e.g.:
|
||||
|
||||
<screen>
|
||||
$ mkdir /nix
|
||||
|
@ -47,7 +61,7 @@ The install script will modify the first writable file from amongst
|
|||
<filename>.bash_profile</filename>, <filename>.bash_login</filename>
|
||||
and <filename>.profile</filename> to source
|
||||
<filename>~/.nix-profile/etc/profile.d/nix.sh</filename>. You can set
|
||||
the <command>NIX_INSTALLER_NO_MODIFY_PROFILE</command> environment
|
||||
the <envar>NIX_INSTALLER_NO_MODIFY_PROFILE</envar> environment
|
||||
variable before executing the install script to disable this
|
||||
behaviour.
|
||||
</para>
|
||||
|
@ -81,12 +95,10 @@ $ rm -rf /nix
|
|||
<para>
|
||||
You can instruct the installer to perform a multi-user
|
||||
installation on your system:
|
||||
|
||||
<screen>
|
||||
sh <(curl https://nixos.org/nix/install) --daemon
|
||||
</screen>
|
||||
</para>
|
||||
|
||||
<screen>sh <(curl https://nixos.org/nix/install) --daemon</screen>
|
||||
|
||||
<para>
|
||||
The multi-user installation of Nix will create build users between
|
||||
the user IDs 30001 and 30032, and a group with the group ID 30000.
|
||||
|
@ -136,6 +148,273 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
|
|||
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation">
|
||||
<title>macOS Installation</title>
|
||||
|
||||
<para>
|
||||
Starting with macOS 10.15 (Catalina), the root filesystem is read-only.
|
||||
This means <filename>/nix</filename> can no longer live on your system
|
||||
volume, and that you'll need a workaround to install Nix.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The recommended approach, which creates an unencrypted APFS volume
|
||||
for your Nix store and a "synthetic" empty directory to mount it
|
||||
over at <filename>/nix</filename>, is least likely to impair Nix
|
||||
or your system.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
With all separate-volume approaches, it's possible something on
|
||||
your system (particularly daemons/services and restored apps) may
|
||||
need access to your Nix store before the volume is mounted. Adding
|
||||
additional encryption makes this more likely.
|
||||
</para></note>
|
||||
|
||||
<para>
|
||||
If you're using a recent Mac with a
|
||||
<link xlink:href="https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf">T2 chip</link>,
|
||||
your drive will still be encrypted at rest (in which case "unencrypted"
|
||||
is a bit of a misnomer). To use this approach, just install Nix with:
|
||||
</para>
|
||||
|
||||
<screen>$ sh <(curl https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume</screen>
|
||||
|
||||
<para>
|
||||
If you don't like the sound of this, you'll want to weigh the
|
||||
other approaches and tradeoffs detailed in this section.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<title>Eventual solutions?</title>
|
||||
<para>
|
||||
All of the known workarounds have drawbacks, but we hope
|
||||
better solutions will be available in the future. Some that
|
||||
we have our eye on are:
|
||||
</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
A true firmlink would enable the Nix store to live on the
|
||||
primary data volume without the build problems caused by
|
||||
the symlink approach. End users cannot currently
|
||||
create true firmlinks.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
If the Nix store volume shared FileVault encryption
|
||||
with the primary data volume (probably by using the same
|
||||
volume group and role), FileVault encryption could be
|
||||
easily supported by the installer without requiring
|
||||
manual setup by each user.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</note>
|
||||
|
||||
<section xml:id="sect-macos-installation-change-store-prefix">
|
||||
<title>Change the Nix store path prefix</title>
|
||||
<para>
|
||||
Changing the default prefix for the Nix store is a simple
|
||||
approach which enables you to leave it on your root volume,
|
||||
where it can take full advantage of FileVault encryption if
|
||||
enabled. Unfortunately, this approach also opts your device out
|
||||
of some benefits that are enabled by using the same prefix
|
||||
across systems:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
Your system won't be able to take advantage of the binary
|
||||
cache (unless someone is able to stand up and support
|
||||
duplicate caching infrastructure), which means you'll
|
||||
spend more time waiting for builds.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
It's harder to build and deploy packages to Linux systems.
|
||||
</para>
|
||||
</listitem>
|
||||
<!-- TODO: may be more here -->
|
||||
</itemizedlist>
|
||||
|
||||
<!-- TODO: Yes, but how?! -->
|
||||
|
||||
It would also possible (and often requested) to just apply this
|
||||
change ecosystem-wide, but it's an intrusive process that has
|
||||
side effects we want to avoid for now.
|
||||
<!-- magnificent hand-wavy gesture -->
|
||||
</para>
|
||||
<para>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation-encrypted-volume">
|
||||
<title>Use a separate encrypted volume</title>
|
||||
<para>
|
||||
If you like, you can also add encryption to the recommended
|
||||
approach taken by the installer. You can do this by pre-creating
|
||||
an encrypted volume before you run the installer--or you can
|
||||
run the installer and encrypt the volume it creates later.
|
||||
<!-- TODO: see later note about whether this needs both add-encryption and from-scratch directions -->
|
||||
</para>
|
||||
<para>
|
||||
In either case, adding encryption to a second volume isn't quite
|
||||
as simple as enabling FileVault for your boot volume. Before you
|
||||
dive in, there are a few things to weigh:
|
||||
</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
The additional volume won't be encrypted with your existing
|
||||
FileVault key, so you'll need another mechanism to decrypt
|
||||
the volume.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can store the password in Keychain to automatically
|
||||
decrypt the volume on boot--but it'll have to wait on Keychain
|
||||
and may not mount before your GUI apps restore. If any of
|
||||
your launchd agents or apps depend on Nix-installed software
|
||||
(for example, if you use a Nix-installed login shell), the
|
||||
restore may fail or break.
|
||||
</para>
|
||||
<para>
|
||||
On a case-by-case basis, you may be able to work around this
|
||||
problem by using <command>wait4path</command> to block
|
||||
execution until your executable is available.
|
||||
</para>
|
||||
<para>
|
||||
It's also possible to decrypt and mount the volume earlier
|
||||
with a login hook--but this mechanism appears to be
|
||||
deprecated and its future is unclear.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
You can hard-code the password in the clear, so that your
|
||||
store volume can be decrypted before Keychain is available.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
<para>
|
||||
If you are comfortable navigating these tradeoffs, you can encrypt the volume with
|
||||
something along the lines of:
|
||||
<!-- TODO:
|
||||
I don't know if this also needs from-scratch instructions?
|
||||
can we just recommend use-the-installer-and-then-encrypt?
|
||||
-->
|
||||
</para>
|
||||
<!--
|
||||
TODO: it looks like this option can be encryptVolume|encrypt|enableFileVault
|
||||
|
||||
It may be more clear to use encryptVolume, here? FileVault seems
|
||||
heavily associated with the boot-volume behavior; I worry
|
||||
a little that it can mislead here, especially as it gets
|
||||
copied around minus doc context...?
|
||||
-->
|
||||
<screen>alice$ diskutil apfs enableFileVault /nix -user disk</screen>
|
||||
|
||||
<!-- TODO: and then go into detail on the mount/decrypt approaches? -->
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation-symlink">
|
||||
<!--
|
||||
Maybe a good razor is: if we'd hate having to support someone who
|
||||
installed Nix this way, it shouldn't even be detailed?
|
||||
-->
|
||||
<title>Symlink the Nix store to a custom location</title>
|
||||
<para>
|
||||
Another simple approach is using <filename>/etc/synthetic.conf</filename>
|
||||
to symlink the Nix store to the data volume. This option also
|
||||
enables your store to share any configured FileVault encryption.
|
||||
Unfortunately, builds that resolve the symlink may leak the
|
||||
canonical path or even fail.
|
||||
</para>
|
||||
<para>
|
||||
Because of these downsides, we can't recommend this approach.
|
||||
</para>
|
||||
<!-- Leaving out instructions for this one. -->
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-macos-installation-recommended-notes">
|
||||
<title>Notes on the recommended approach</title>
|
||||
<para>
|
||||
This section goes into a little more detail on the recommended
|
||||
approach. You don't need to understand it to run the installer,
|
||||
but it can serve as a helpful reference if you run into trouble.
|
||||
</para>
|
||||
<orderedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
In order to compose user-writable locations into the new
|
||||
read-only system root, Apple introduced a new concept called
|
||||
<literal>firmlinks</literal>, which it describes as a
|
||||
"bi-directional wormhole" between two filesystems. You can
|
||||
see the current firmlinks in <filename>/usr/share/firmlinks</filename>.
|
||||
Unfortunately, firmlinks aren't (currently?) user-configurable.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For special cases like NFS mount points or package manager roots,
|
||||
<link xlink:href="https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html">synthetic.conf(5)</link>
|
||||
supports limited user-controlled file-creation (of symlinks,
|
||||
and synthetic empty directories) at <filename>/</filename>.
|
||||
To create a synthetic empty directory for mounting at <filename>/nix</filename>,
|
||||
add the following line to <filename>/etc/synthetic.conf</filename>
|
||||
(create it if necessary):
|
||||
</para>
|
||||
|
||||
<screen>nix</screen>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
This configuration is applied at boot time, but you can use
|
||||
<command>apfs.util</command> to trigger creation (not deletion)
|
||||
of new entries without a reboot:
|
||||
</para>
|
||||
|
||||
<screen>alice$ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B</screen>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Create the new APFS volume with diskutil:
|
||||
</para>
|
||||
|
||||
<screen>alice$ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix</screen>
|
||||
</listitem>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Using <command>vifs</command>, add the new mount to
|
||||
<filename>/etc/fstab</filename>. If it doesn't already have
|
||||
other entries, it should look something like:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
#
|
||||
# Warning - this file should only be modified with vifs(8)
|
||||
#
|
||||
# Failure to do so is unsupported and may be destructive.
|
||||
#
|
||||
LABEL=Nix\040Store /nix apfs rw,nobrowse
|
||||
</screen>
|
||||
|
||||
<para>
|
||||
The nobrowse setting will keep Spotlight from indexing this
|
||||
volume, and keep it from showing up on your desktop.
|
||||
</para>
|
||||
</listitem>
|
||||
</orderedlist>
|
||||
</section>
|
||||
|
||||
</section>
|
||||
|
||||
<section xml:id="sect-nix-install-pinned-version-url">
|
||||
<title>Installing a pinned Nix version from a URL</title>
|
||||
|
||||
|
|
|
@ -17,6 +17,11 @@
|
|||
|
||||
<para>
|
||||
Single-user installations of Nix should run this:
|
||||
<command>nix-channel --update; nix-env -iA nixpkgs.nix</command>
|
||||
<command>nix-channel --update; nix-env -iA nixpkgs.nix nixpkgs.cacert</command>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Multi-user Nix users on Linux should run this with sudo:
|
||||
<command>nix-channel --update; nix-env -iA nixpkgs.nix nixpkgs.cacert; systemctl daemon-reload; systemctl restart nix-daemon</command>
|
||||
</para>
|
||||
</chapter>
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
<para>NOTE: the hashing scheme in Nix 0.8 changed (as detailed below).
|
||||
As a result, <command>nix-pull</command> manifests and channels built
|
||||
for Nix 0.7 and below will now work anymore. However, the Nix
|
||||
for Nix 0.7 and below will not work anymore. However, the Nix
|
||||
expression language has not changed, so you can still build from
|
||||
source. Also, existing user environments continue to work. Nix 0.8
|
||||
will automatically upgrade the database schema of previous
|
||||
|
|
4
local.mk
4
local.mk
|
@ -6,9 +6,11 @@ dist-files += configure config.h.in perl/configure
|
|||
|
||||
clean-files += Makefile.config
|
||||
|
||||
GLOBAL_CXXFLAGS += -I . -I src -I src/libutil -I src/libstore -I src/libmain -I src/libexpr -I src/nix -Wno-deprecated-declarations
|
||||
GLOBAL_CXXFLAGS += -Wno-deprecated-declarations
|
||||
|
||||
$(foreach i, config.h $(call rwildcard, src/lib*, *.hh), \
|
||||
$(eval $(call install-file-in, $(i), $(includedir)/nix, 0644)))
|
||||
|
||||
$(GCH) $(PCH): src/libutil/util.hh config.h
|
||||
|
||||
GCH_CXXFLAGS = -I src/libutil
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#! /usr/bin/env nix-shell
|
||||
#! nix-shell -i perl -p perl perlPackages.LWPUserAgent perlPackages.LWPProtocolHttps perlPackages.FileSlurp gnupg1
|
||||
#! nix-shell -i perl -p perl perlPackages.LWPUserAgent perlPackages.LWPProtocolHttps perlPackages.FileSlurp perlPackages.NetAmazonS3 gnupg1
|
||||
|
||||
use strict;
|
||||
use Data::Dumper;
|
||||
|
@ -9,12 +9,16 @@ use File::Slurp;
|
|||
use File::Copy;
|
||||
use JSON::PP;
|
||||
use LWP::UserAgent;
|
||||
use Net::Amazon::S3;
|
||||
|
||||
my $evalId = $ARGV[0] or die "Usage: $0 EVAL-ID\n";
|
||||
|
||||
my $releasesDir = "/home/eelco/mnt/releases";
|
||||
my $releasesBucketName = "nix-releases";
|
||||
my $channelsBucketName = "nix-channels";
|
||||
my $nixpkgsDir = "/home/eelco/Dev/nixpkgs-pristine";
|
||||
|
||||
my $TMPDIR = $ENV{'TMPDIR'} // "/tmp";
|
||||
|
||||
# FIXME: cut&paste from nixos-channel-scripts.
|
||||
sub fetch {
|
||||
my ($url, $type) = @_;
|
||||
|
@ -42,13 +46,31 @@ my $version = $1;
|
|||
|
||||
print STDERR "Nix revision is $nixRev, version is $version\n";
|
||||
|
||||
File::Path::make_path($releasesDir);
|
||||
if (system("mountpoint -q $releasesDir") != 0) {
|
||||
system("sshfs hydra-mirror\@nixos.org:/releases $releasesDir") == 0 or die;
|
||||
}
|
||||
my $releaseDir = "nix/$releaseName";
|
||||
|
||||
my $releaseDir = "$releasesDir/nix/$releaseName";
|
||||
File::Path::make_path($releaseDir);
|
||||
my $tmpDir = "$TMPDIR/nix-release/$releaseName";
|
||||
File::Path::make_path($tmpDir);
|
||||
|
||||
# S3 setup.
|
||||
my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} or die "No AWS_ACCESS_KEY_ID given.";
|
||||
my $aws_secret_access_key = $ENV{'AWS_SECRET_ACCESS_KEY'} or die "No AWS_SECRET_ACCESS_KEY given.";
|
||||
|
||||
my $s3 = Net::Amazon::S3->new(
|
||||
{ aws_access_key_id => $aws_access_key_id,
|
||||
aws_secret_access_key => $aws_secret_access_key,
|
||||
retry => 1,
|
||||
host => "s3-eu-west-1.amazonaws.com",
|
||||
});
|
||||
|
||||
my $releasesBucket = $s3->bucket($releasesBucketName) or die;
|
||||
|
||||
my $s3_us = Net::Amazon::S3->new(
|
||||
{ aws_access_key_id => $aws_access_key_id,
|
||||
aws_secret_access_key => $aws_secret_access_key,
|
||||
retry => 1,
|
||||
});
|
||||
|
||||
my $channelsBucket = $s3_us->bucket($channelsBucketName) or die;
|
||||
|
||||
sub downloadFile {
|
||||
my ($jobName, $productNr, $dstName) = @_;
|
||||
|
@ -57,40 +79,49 @@ sub downloadFile {
|
|||
|
||||
my $srcFile = $buildInfo->{buildproducts}->{$productNr}->{path} or die "job '$jobName' lacks product $productNr\n";
|
||||
$dstName //= basename($srcFile);
|
||||
my $dstFile = "$releaseDir/" . $dstName;
|
||||
my $tmpFile = "$tmpDir/$dstName";
|
||||
|
||||
if (! -e $dstFile) {
|
||||
print STDERR "downloading $srcFile to $dstFile...\n";
|
||||
system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$dstFile.tmp'") == 0
|
||||
if (!-e $tmpFile) {
|
||||
print STDERR "downloading $srcFile to $tmpFile...\n";
|
||||
system("NIX_REMOTE=https://cache.nixos.org/ nix cat-store '$srcFile' > '$tmpFile'") == 0
|
||||
or die "unable to fetch $srcFile\n";
|
||||
rename("$dstFile.tmp", $dstFile) or die;
|
||||
}
|
||||
|
||||
my $sha256_expected = $buildInfo->{buildproducts}->{$productNr}->{sha256hash} or die;
|
||||
my $sha256_actual = `nix hash-file --base16 --type sha256 '$dstFile'`;
|
||||
my $sha256_actual = `nix hash-file --base16 --type sha256 '$tmpFile'`;
|
||||
chomp $sha256_actual;
|
||||
if ($sha256_expected ne $sha256_actual) {
|
||||
print STDERR "file $dstFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
|
||||
print STDERR "file $tmpFile is corrupt, got $sha256_actual, expected $sha256_expected\n";
|
||||
exit 1;
|
||||
}
|
||||
|
||||
write_file("$dstFile.sha256", $sha256_expected);
|
||||
write_file("$tmpFile.sha256", $sha256_expected);
|
||||
|
||||
if (! -e "$dstFile.asc") {
|
||||
system("gpg2 --detach-sign --armor $dstFile") == 0 or die "unable to sign $dstFile\n";
|
||||
if (! -e "$tmpFile.asc") {
|
||||
system("gpg2 --detach-sign --armor $tmpFile") == 0 or die "unable to sign $tmpFile\n";
|
||||
}
|
||||
|
||||
return ($dstFile, $sha256_expected);
|
||||
return $sha256_expected;
|
||||
}
|
||||
|
||||
downloadFile("tarball", "2"); # .tar.bz2
|
||||
my ($tarball, $tarballHash) = downloadFile("tarball", "3"); # .tar.xz
|
||||
my $tarballHash = downloadFile("tarball", "3"); # .tar.xz
|
||||
downloadFile("binaryTarball.i686-linux", "1");
|
||||
downloadFile("binaryTarball.x86_64-linux", "1");
|
||||
downloadFile("binaryTarball.aarch64-linux", "1");
|
||||
downloadFile("binaryTarball.x86_64-darwin", "1");
|
||||
downloadFile("installerScript", "1");
|
||||
|
||||
for my $fn (glob "$tmpDir/*") {
|
||||
my $name = basename($fn);
|
||||
my $dstKey = "$releaseDir/" . $name;
|
||||
unless (defined $releasesBucket->head_key($dstKey)) {
|
||||
print STDERR "uploading $fn to s3://$releasesBucketName/$dstKey...\n";
|
||||
$releasesBucket->add_key_filename($dstKey, $fn)
|
||||
or die $releasesBucket->err . ": " . $releasesBucket->errstr;
|
||||
}
|
||||
}
|
||||
|
||||
exit if $version =~ /pre/;
|
||||
|
||||
# Update Nixpkgs in a very hacky way.
|
||||
|
@ -125,18 +156,11 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
|
|||
|
||||
system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
|
||||
|
||||
# Extract the HTML manual.
|
||||
File::Path::make_path("$releaseDir/manual");
|
||||
|
||||
system("tar xvf $tarball --strip-components=3 -C $releaseDir/manual --wildcards '*/doc/manual/*.html' '*/doc/manual/*.css' '*/doc/manual/*.gif' '*/doc/manual/*.png'") == 0 or die;
|
||||
|
||||
if (! -e "$releaseDir/manual/index.html") {
|
||||
symlink("manual.html", "$releaseDir/manual/index.html") or die;
|
||||
}
|
||||
|
||||
# Update the "latest" symlink.
|
||||
symlink("$releaseName", "$releasesDir/nix/latest-tmp") or die;
|
||||
rename("$releasesDir/nix/latest-tmp", "$releasesDir/nix/latest") or die;
|
||||
$channelsBucket->add_key(
|
||||
"nix-latest/install", "",
|
||||
{ "x-amz-website-redirect-location" => "https://releases.nixos.org/$releaseDir/install" })
|
||||
or die $channelsBucket->err . ": " . $channelsBucket->errstr;
|
||||
|
||||
# Tag the release in Git.
|
||||
chdir("/home/eelco/Dev/nix-pristine") or die;
|
||||
|
|
|
@ -8,14 +8,14 @@ GCH = $(buildprefix)precompiled-headers.h.gch
|
|||
$(GCH): precompiled-headers.h
|
||||
@rm -f $@
|
||||
@mkdir -p "$(dir $@)"
|
||||
$(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS)
|
||||
$(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS) $(GCH_CXXFLAGS)
|
||||
|
||||
PCH = $(buildprefix)precompiled-headers.h.pch
|
||||
|
||||
$(PCH): precompiled-headers.h
|
||||
@rm -f $@
|
||||
@mkdir -p "$(dir $@)"
|
||||
$(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS)
|
||||
$(trace-gen) $(CXX) -x c++-header -o $@ $< $(GLOBAL_CXXFLAGS) $(GCH_CXXFLAGS)
|
||||
|
||||
clean-files += $(GCH) $(PCH)
|
||||
|
||||
|
|
|
@ -35,24 +35,28 @@ define build-program
|
|||
$$(trace-ld) $(CXX) -o $$@ $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE))
|
||||
|
||||
$(1)_INSTALL_DIR ?= $$(bindir)
|
||||
$(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$(1)
|
||||
|
||||
$$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
|
||||
ifdef $(1)_INSTALL_DIR
|
||||
|
||||
install: $(DESTDIR)$$($(1)_INSTALL_PATH)
|
||||
$(1)_INSTALL_PATH := $$($(1)_INSTALL_DIR)/$(1)
|
||||
|
||||
ifeq ($(BUILD_SHARED_LIBS), 1)
|
||||
$$(eval $$(call create-dir, $$($(1)_INSTALL_DIR)))
|
||||
|
||||
_libs_final := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_INSTALL_PATH))
|
||||
install: $(DESTDIR)$$($(1)_INSTALL_PATH)
|
||||
|
||||
$(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
|
||||
ifeq ($(BUILD_SHARED_LIBS), 1)
|
||||
|
||||
_libs_final := $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_INSTALL_PATH))
|
||||
|
||||
$(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_OBJS) $$(_libs_final) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
|
||||
$$(trace-ld) $(CXX) -o $$@ $$(LDFLAGS) $$(GLOBAL_LDFLAGS) $$($(1)_OBJS) $$($(1)_LDFLAGS) $$(foreach lib, $$($(1)_LIBS), $$($$(lib)_LDFLAGS_USE_INSTALLED))
|
||||
|
||||
else
|
||||
else
|
||||
|
||||
$(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_PATH) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
|
||||
$(DESTDIR)$$($(1)_INSTALL_PATH): $$($(1)_PATH) | $(DESTDIR)$$($(1)_INSTALL_DIR)/
|
||||
install -t $(DESTDIR)$$($(1)_INSTALL_DIR) $$<
|
||||
|
||||
endif
|
||||
endif
|
||||
|
||||
# Propagate CFLAGS and CXXFLAGS to the individual object files.
|
||||
|
@ -76,4 +80,10 @@ define build-program
|
|||
programs-list += $$($(1)_PATH)
|
||||
clean-files += $$($(1)_PATH) $$(_d)/*.o $$(_d)/.*.dep $$($(1)_DEPS) $$($(1)_OBJS)
|
||||
dist-files += $$(_srcs)
|
||||
|
||||
# Phony target to run this program (typically as a dependency of 'check').
|
||||
.PHONY: $(1)_RUN
|
||||
$(1)_RUN: $$($(1)_PATH)
|
||||
$(trace-test) $$($(1)_PATH)
|
||||
|
||||
endef
|
||||
|
|
|
@ -11,6 +11,7 @@ ifeq ($(V), 0)
|
|||
trace-javac = @echo " JAVAC " $@;
|
||||
trace-jar = @echo " JAR " $@;
|
||||
trace-mkdir = @echo " MKDIR " $@;
|
||||
trace-test = @echo " TEST " $@;
|
||||
|
||||
suppress = @
|
||||
|
||||
|
|
|
@ -41,5 +41,5 @@ ifneq ($(OS), Darwin)
|
|||
check: rust-tests
|
||||
|
||||
rust-tests:
|
||||
cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) cargo test --release $$(if [[ -d vendor ]]; then echo --offline; fi)
|
||||
$(trace-test) cd nix-rust && CARGO_HOME=$$(if [[ -d vendor ]]; then echo vendor; fi) cargo test --release $$(if [[ -d vendor ]]; then echo --offline; fi)
|
||||
endif
|
||||
|
|
|
@ -274,7 +274,8 @@ int checkSignature(SV * publicKey_, SV * sig_, char * msg)
|
|||
SV * addToStore(char * srcPath, int recursive, char * algo)
|
||||
PPCODE:
|
||||
try {
|
||||
auto path = store()->addToStore(std::string(baseNameOf(srcPath)), srcPath, recursive, parseHashType(algo));
|
||||
auto method = recursive ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
|
||||
auto path = store()->addToStore(std::string(baseNameOf(srcPath)), srcPath, method, parseHashType(algo));
|
||||
XPUSHs(sv_2mortal(newSVpv(store()->printStorePath(path).c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak("%s", e.what());
|
||||
|
@ -285,7 +286,8 @@ SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
|
|||
PPCODE:
|
||||
try {
|
||||
Hash h(hash, parseHashType(algo));
|
||||
auto path = store()->makeFixedOutputPath(recursive, h, name);
|
||||
auto method = recursive ? FileIngestionMethod::Recursive : FileIngestionMethod::Flat;
|
||||
auto path = store()->makeFixedOutputPath(method, h, name);
|
||||
XPUSHs(sv_2mortal(newSVpv(store()->printStorePath(path).c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak("%s", e.what());
|
||||
|
|
|
@ -56,6 +56,3 @@
|
|||
#include <sys/wait.h>
|
||||
#include <termios.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "util.hh"
|
||||
#include "args.hh"
|
||||
|
|
|
@ -55,6 +55,7 @@ rec {
|
|||
# Tests
|
||||
git
|
||||
mercurial
|
||||
gmock
|
||||
]
|
||||
++ lib.optionals stdenv.isLinux [libseccomp utillinuxMinimal]
|
||||
++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
|
||||
|
|
121
release.nix
121
release.nix
|
@ -12,52 +12,64 @@ let
|
|||
builtins.readFile ./.version
|
||||
+ (if officialRelease then "" else "pre${toString nix.revCount}_${nix.shortRev}");
|
||||
|
||||
# Create a "vendor" directory that contains the crates listed in
|
||||
# Cargo.lock. This allows Nix to be built without network access.
|
||||
vendoredCrates' =
|
||||
let
|
||||
lockFile = builtins.fromTOML (builtins.readFile nix-rust/Cargo.lock);
|
||||
|
||||
files = map (pkg: import <nix/fetchurl.nix> {
|
||||
url = "https://crates.io/api/v1/crates/${pkg.name}/${pkg.version}/download";
|
||||
sha256 = lockFile.metadata."checksum ${pkg.name} ${pkg.version} (registry+https://github.com/rust-lang/crates.io-index)";
|
||||
}) (builtins.filter (pkg: pkg.source or "" == "registry+https://github.com/rust-lang/crates.io-index") lockFile.package);
|
||||
|
||||
in pkgs.runCommand "cargo-vendor-dir" {}
|
||||
''
|
||||
mkdir -p $out/vendor
|
||||
|
||||
cat > $out/vendor/config <<EOF
|
||||
[source.crates-io]
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source.vendored-sources]
|
||||
directory = "vendor"
|
||||
EOF
|
||||
|
||||
${toString (builtins.map (file: ''
|
||||
mkdir $out/vendor/tmp
|
||||
tar xvf ${file} -C $out/vendor/tmp
|
||||
dir=$(echo $out/vendor/tmp/*)
|
||||
|
||||
# Add just enough metadata to keep Cargo happy.
|
||||
printf '{"files":{},"package":"${file.outputHash}"}' > "$dir/.cargo-checksum.json"
|
||||
|
||||
# Clean up some cruft from the winapi crates. FIXME: find
|
||||
# a way to remove winapi* from our dependencies.
|
||||
if [[ $dir =~ /winapi ]]; then
|
||||
find $dir -name "*.a" -print0 | xargs -0 rm -f --
|
||||
fi
|
||||
|
||||
mv "$dir" $out/vendor/
|
||||
|
||||
rm -rf $out/vendor/tmp
|
||||
'') files)}
|
||||
'';
|
||||
|
||||
jobs = rec {
|
||||
|
||||
# Create a "vendor" directory that contains the crates listed in
|
||||
# Cargo.lock. This allows Nix to be built without network access.
|
||||
vendoredCrates =
|
||||
let
|
||||
lockFile = builtins.fromTOML (builtins.readFile nix-rust/Cargo.lock);
|
||||
|
||||
files = map (pkg: import <nix/fetchurl.nix> {
|
||||
url = "https://crates.io/api/v1/crates/${pkg.name}/${pkg.version}/download";
|
||||
sha256 = lockFile.metadata."checksum ${pkg.name} ${pkg.version} (registry+https://github.com/rust-lang/crates.io-index)";
|
||||
}) (builtins.filter (pkg: pkg.source or "" == "registry+https://github.com/rust-lang/crates.io-index") lockFile.package);
|
||||
|
||||
in pkgs.runCommand "cargo-vendor-dir" {}
|
||||
with pkgs;
|
||||
runCommand "vendored-crates" {}
|
||||
''
|
||||
mkdir -p $out/vendor
|
||||
|
||||
cat > $out/vendor/config <<EOF
|
||||
[source.crates-io]
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source.vendored-sources]
|
||||
directory = "vendor"
|
||||
EOF
|
||||
|
||||
${toString (builtins.map (file: ''
|
||||
mkdir $out/vendor/tmp
|
||||
tar xvf ${file} -C $out/vendor/tmp
|
||||
dir=$(echo $out/vendor/tmp/*)
|
||||
|
||||
# Add just enough metadata to keep Cargo happy.
|
||||
printf '{"files":{},"package":"${file.outputHash}"}' > "$dir/.cargo-checksum.json"
|
||||
|
||||
# Clean up some cruft from the winapi crates. FIXME: find
|
||||
# a way to remove winapi* from our dependencies.
|
||||
if [[ $dir =~ /winapi ]]; then
|
||||
find $dir -name "*.a" -print0 | xargs -0 rm -f --
|
||||
fi
|
||||
|
||||
mv "$dir" $out/vendor/
|
||||
|
||||
rm -rf $out/vendor/tmp
|
||||
'') files)}
|
||||
mkdir -p $out/nix-support
|
||||
name=nix-vendored-crates-${version}
|
||||
fn=$out/$name.tar.xz
|
||||
tar cvfJ $fn -C ${vendoredCrates'} vendor \
|
||||
--owner=0 --group=0 --mode=u+rw,uga+r \
|
||||
--transform "s,vendor,$name,"
|
||||
echo "file crates-tarball $fn" >> $out/nix-support/hydra-build-products
|
||||
'';
|
||||
|
||||
|
||||
build = pkgs.lib.genAttrs systems (system:
|
||||
|
||||
let pkgs = import nixpkgs { inherit system; }; in
|
||||
|
@ -89,7 +101,7 @@ let
|
|||
patchelf --set-rpath $out/lib:${stdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.*
|
||||
''}
|
||||
|
||||
ln -sfn ${vendoredCrates}/vendor/ nix-rust/vendor
|
||||
ln -sfn ${vendoredCrates'}/vendor/ nix-rust/vendor
|
||||
|
||||
(cd perl; autoreconf --install --force --verbose)
|
||||
'';
|
||||
|
@ -103,17 +115,17 @@ let
|
|||
|
||||
installFlags = "sysconfdir=$(out)/etc";
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $doc/nix-support
|
||||
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
|
||||
'';
|
||||
|
||||
doCheck = true;
|
||||
|
||||
doInstallCheck = true;
|
||||
installCheckFlags = "sysconfdir=$(out)/etc";
|
||||
|
||||
separateDebugInfo = true;
|
||||
|
||||
preDist = ''
|
||||
mkdir -p $doc/nix-support
|
||||
echo "doc manual $doc/share/doc/nix/manual" >> $doc/nix-support/hydra-build-products
|
||||
'';
|
||||
});
|
||||
|
||||
|
||||
|
@ -165,10 +177,10 @@ let
|
|||
}
|
||||
''
|
||||
cp ${installerClosureInfo}/registration $TMPDIR/reginfo
|
||||
cp ${./scripts/create-darwin-volume.sh} $TMPDIR/create-darwin-volume.sh
|
||||
substitute ${./scripts/install-nix-from-closure.sh} $TMPDIR/install \
|
||||
--subst-var-by nix ${toplevel} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
||||
substitute ${./scripts/install-darwin-multi-user.sh} $TMPDIR/install-darwin-multi-user.sh \
|
||||
--subst-var-by nix ${toplevel} \
|
||||
--subst-var-by cacert ${cacert}
|
||||
|
@ -183,6 +195,7 @@ let
|
|||
# SC1090: Don't worry about not being able to find
|
||||
# $nix/etc/profile.d/nix.sh
|
||||
shellcheck --exclude SC1090 $TMPDIR/install
|
||||
shellcheck $TMPDIR/create-darwin-volume.sh
|
||||
shellcheck $TMPDIR/install-darwin-multi-user.sh
|
||||
shellcheck $TMPDIR/install-systemd-multi-user.sh
|
||||
|
||||
|
@ -198,6 +211,7 @@ let
|
|||
fi
|
||||
|
||||
chmod +x $TMPDIR/install
|
||||
chmod +x $TMPDIR/create-darwin-volume.sh
|
||||
chmod +x $TMPDIR/install-darwin-multi-user.sh
|
||||
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
||||
chmod +x $TMPDIR/install-multi-user
|
||||
|
@ -210,11 +224,15 @@ let
|
|||
--absolute-names \
|
||||
--hard-dereference \
|
||||
--transform "s,$TMPDIR/install,$dir/install," \
|
||||
--transform "s,$TMPDIR/create-darwin-volume.sh,$dir/create-darwin-volume.sh," \
|
||||
--transform "s,$TMPDIR/reginfo,$dir/.reginfo," \
|
||||
--transform "s,$NIX_STORE,$dir/store,S" \
|
||||
$TMPDIR/install $TMPDIR/install-darwin-multi-user.sh \
|
||||
$TMPDIR/install \
|
||||
$TMPDIR/create-darwin-volume.sh \
|
||||
$TMPDIR/install-darwin-multi-user.sh \
|
||||
$TMPDIR/install-systemd-multi-user.sh \
|
||||
$TMPDIR/install-multi-user $TMPDIR/reginfo \
|
||||
$TMPDIR/install-multi-user \
|
||||
$TMPDIR/reginfo \
|
||||
$(cat ${installerClosureInfo}/store-paths)
|
||||
'');
|
||||
|
||||
|
@ -229,6 +247,11 @@ let
|
|||
|
||||
src = nix;
|
||||
|
||||
preConfigure =
|
||||
''
|
||||
ln -sfn ${vendoredCrates'}/vendor/ nix-rust/vendor
|
||||
'';
|
||||
|
||||
enableParallelBuilding = true;
|
||||
|
||||
buildInputs = buildDeps ++ propagatedDeps;
|
||||
|
|
185
scripts/create-darwin-volume.sh
Executable file
185
scripts/create-darwin-volume.sh
Executable file
|
@ -0,0 +1,185 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
root_disk() {
|
||||
diskutil info -plist /
|
||||
}
|
||||
|
||||
apfs_volumes_for() {
|
||||
disk=$1
|
||||
diskutil apfs list -plist "$disk"
|
||||
}
|
||||
|
||||
disk_identifier() {
|
||||
xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" 2>/dev/null
|
||||
}
|
||||
|
||||
volume_list_true() {
|
||||
key=$1
|
||||
xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='$key']/following-sibling::true[1]" 2> /dev/null
|
||||
}
|
||||
|
||||
volume_get_string() {
|
||||
key=$1 i=$2
|
||||
xpath "/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict[$i]/key[text()='$key']/following-sibling::string[1]/text()" 2> /dev/null
|
||||
}
|
||||
|
||||
find_nix_volume() {
|
||||
disk=$1
|
||||
i=1
|
||||
volumes=$(apfs_volumes_for "$disk")
|
||||
while true; do
|
||||
name=$(echo "$volumes" | volume_get_string "Name" "$i")
|
||||
if [ -z "$name" ]; then
|
||||
break
|
||||
fi
|
||||
case "$name" in
|
||||
[Nn]ix*)
|
||||
echo "$name"
|
||||
break
|
||||
;;
|
||||
esac
|
||||
i=$((i+1))
|
||||
done
|
||||
}
|
||||
|
||||
test_fstab() {
|
||||
grep -q "/nix apfs rw" /etc/fstab 2>/dev/null
|
||||
}
|
||||
|
||||
test_nix_symlink() {
|
||||
[ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_synthetic_conf() {
|
||||
grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
|
||||
}
|
||||
|
||||
test_nix() {
|
||||
test -d "/nix"
|
||||
}
|
||||
|
||||
test_t2_chip_present(){
|
||||
# Use xartutil to see if system has a t2 chip.
|
||||
#
|
||||
# This isn't well-documented on its own; until it is,
|
||||
# let's keep track of knowledge/assumptions.
|
||||
#
|
||||
# Warnings:
|
||||
# - Don't search "xart" if porn will cause you trouble :)
|
||||
# - Other xartutil flags do dangerous things. Don't run them
|
||||
# naively. If you must, search "xartutil" first.
|
||||
#
|
||||
# Assumptions:
|
||||
# - the "xART session seeds recovery utility"
|
||||
# appears to interact with xartstorageremoted
|
||||
# - `sudo xartutil --list` lists xART sessions
|
||||
# and their seeds and exits 0 if successful. If
|
||||
# not, it exits 1 and prints an error such as:
|
||||
# xartutil: ERROR: No supported link to the SEP present
|
||||
# - xART sessions/seeds are present when a T2 chip is
|
||||
# (and not, otherwise)
|
||||
# - the presence of a T2 chip means a newly-created
|
||||
# volume on the primary drive will be
|
||||
# encrypted at rest
|
||||
# - all together: `sudo xartutil --list`
|
||||
# should exit 0 if a new Nix Store volume will
|
||||
# be encrypted at rest, and exit 1 if not.
|
||||
sudo xartutil --list >/dev/null 2>/dev/null
|
||||
}
|
||||
|
||||
test_filevault_in_use() {
|
||||
disk=$1
|
||||
# list vols on disk | get value of Filevault key | value is true
|
||||
apfs_volumes_for "$disk" | volume_list_true FileVault | grep -q true
|
||||
}
|
||||
|
||||
# use after error msg for conditions we don't understand
|
||||
suggest_report_error(){
|
||||
# ex "error: something sad happened :(" >&2
|
||||
echo " please report this @ https://github.com/nixos/nix/issues" >&2
|
||||
}
|
||||
|
||||
main() {
|
||||
(
|
||||
echo ""
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo " | This installer will create a volume for the nix store and |"
|
||||
echo " | configure it to mount at /nix. Follow these steps to uninstall. |"
|
||||
echo " ------------------------------------------------------------------ "
|
||||
echo ""
|
||||
echo " 1. Remove the entry from fstab using 'sudo vifs'"
|
||||
echo " 2. Destroy the data volume using 'diskutil apfs deleteVolume'"
|
||||
echo " 3. Remove the 'nix' line from /etc/synthetic.conf or the file"
|
||||
echo ""
|
||||
) >&2
|
||||
|
||||
if test_nix_symlink; then
|
||||
echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2
|
||||
echo " /nix -> $(readlink "/nix")" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if ! test_synthetic_conf; then
|
||||
echo "Configuring /etc/synthetic.conf..." >&2
|
||||
echo nix | sudo tee -a /etc/synthetic.conf
|
||||
if ! test_synthetic_conf; then
|
||||
echo "error: failed to configure synthetic.conf;" >&2
|
||||
suggest_report_error
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! test_nix; then
|
||||
echo "Creating mountpoint for /nix..." >&2
|
||||
/System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B || true
|
||||
if ! test_nix; then
|
||||
sudo mkdir -p /nix 2>/dev/null || true
|
||||
fi
|
||||
if ! test_nix; then
|
||||
echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2
|
||||
suggest_report_error
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
disk=$(root_disk | disk_identifier)
|
||||
volume=$(find_nix_volume "$disk")
|
||||
if [ -z "$volume" ]; then
|
||||
echo "Creating a Nix Store volume..." >&2
|
||||
|
||||
if test_filevault_in_use "$disk"; then
|
||||
# TODO: Not sure if it's in-scope now, but `diskutil apfs list`
|
||||
# shows both filevault and encrypted at rest status, and it
|
||||
# may be the more semantic way to test for this? It'll show
|
||||
# `FileVault: No (Encrypted at rest)`
|
||||
# `FileVault: No`
|
||||
# `FileVault: Yes (Unlocked)`
|
||||
# and so on.
|
||||
if test_t2_chip_present; then
|
||||
echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2
|
||||
echo " is only encrypted at rest." >&2
|
||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
||||
else
|
||||
echo "error: refusing to create Nix store volume because the boot volume is" >&2
|
||||
echo " FileVault encrypted, but encryption-at-rest is not available." >&2
|
||||
echo " Manually create a volume for the store and re-run this script." >&2
|
||||
echo " See https://nixos.org/nix/manual/#sect-macos-installation" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
|
||||
volume="Nix Store"
|
||||
else
|
||||
echo "Using existing '$volume' volume" >&2
|
||||
fi
|
||||
|
||||
if ! test_fstab; then
|
||||
echo "Configuring /etc/fstab..." >&2
|
||||
label=$(echo "$volume" | sed 's/ /\\040/g')
|
||||
printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -13,22 +13,25 @@ set -o pipefail
|
|||
# however tracking which bits came from which would be impossible.
|
||||
|
||||
readonly ESC='\033[0m'
|
||||
readonly BOLD='\033[38;1m'
|
||||
readonly BLUE='\033[38;34m'
|
||||
readonly BLUE_UL='\033[38;4;34m'
|
||||
readonly GREEN='\033[38;32m'
|
||||
readonly GREEN_UL='\033[38;4;32m'
|
||||
readonly RED='\033[38;31m'
|
||||
readonly BOLD='\033[1m'
|
||||
readonly BLUE='\033[34m'
|
||||
readonly BLUE_UL='\033[4;34m'
|
||||
readonly GREEN='\033[32m'
|
||||
readonly GREEN_UL='\033[4;32m'
|
||||
readonly RED='\033[31m'
|
||||
|
||||
readonly NIX_USER_COUNT="32"
|
||||
# installer allows overriding build user count to speed up installation
|
||||
# as creating each user takes non-trivial amount of time on macos
|
||||
readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
|
||||
readonly NIX_BUILD_GROUP_ID="30000"
|
||||
readonly NIX_BUILD_GROUP_NAME="nixbld"
|
||||
readonly NIX_FIRST_BUILD_UID="30001"
|
||||
# Please don't change this. We don't support it, because the
|
||||
# default shell profile that comes with Nix doesn't support it.
|
||||
readonly NIX_ROOT="/nix"
|
||||
readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
|
||||
|
||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshrc")
|
||||
readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv")
|
||||
readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
|
||||
readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
|
||||
|
||||
|
@ -450,9 +453,11 @@ create_directories() {
|
|||
}
|
||||
|
||||
place_channel_configuration() {
|
||||
echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels"
|
||||
_sudo "to set up the default system channel (part 1)" \
|
||||
install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels"
|
||||
if [ -z "${NIX_INSTALLER_NO_CHANNEL_ADD:-}" ]; then
|
||||
echo "https://nixos.org/channels/nixpkgs-unstable nixpkgs" > "$SCRATCH/.nix-channels"
|
||||
_sudo "to set up the default system channel (part 1)" \
|
||||
install -m 0664 "$SCRATCH/.nix-channels" "$ROOT_HOME/.nix-channels"
|
||||
fi
|
||||
}
|
||||
|
||||
welcome_to_nix() {
|
||||
|
@ -567,7 +572,7 @@ install_from_extracted_nix() {
|
|||
cd "$EXTRACTED_NIX_PATH"
|
||||
|
||||
_sudo "to copy the basic Nix files to the new store at $NIX_ROOT/store" \
|
||||
rsync -rlpt ./store/* "$NIX_ROOT/store/"
|
||||
rsync -rlpt --chmod=-w ./store/* "$NIX_ROOT/store/"
|
||||
|
||||
if [ -d "$NIX_INSTALLED_NIX" ]; then
|
||||
echo " Alright! We have our first nix at $NIX_INSTALLED_NIX"
|
||||
|
@ -634,18 +639,20 @@ setup_default_profile() {
|
|||
export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
|
||||
fi
|
||||
|
||||
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
|
||||
# otherwise it will be lost in environments where sudo doesn't pass
|
||||
# all the environment variables by default.
|
||||
_sudo "to update the default channel in the default profile" \
|
||||
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
|
||||
|| channel_update_failed=1
|
||||
|
||||
if [ -z "${NIX_INSTALLER_NO_CHANNEL_ADD:-}" ]; then
|
||||
# Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
|
||||
# otherwise it will be lost in environments where sudo doesn't pass
|
||||
# all the environment variables by default.
|
||||
_sudo "to update the default channel in the default profile" \
|
||||
HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
|
||||
|| channel_update_failed=1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
place_nix_configuration() {
|
||||
cat <<EOF > "$SCRATCH/nix.conf"
|
||||
$NIX_EXTRA_CONF
|
||||
build-users-group = $NIX_BUILD_GROUP_NAME
|
||||
EOF
|
||||
_sudo "to place the default nix daemon configuration (part 2)" \
|
||||
|
|
|
@ -40,29 +40,85 @@ elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then
|
|||
fi
|
||||
|
||||
INSTALL_MODE=no-daemon
|
||||
# Trivially handle the --daemon / --no-daemon options
|
||||
if [ "x${1:-}" = "x--no-daemon" ]; then
|
||||
INSTALL_MODE=no-daemon
|
||||
elif [ "x${1:-}" = "x--daemon" ]; then
|
||||
INSTALL_MODE=daemon
|
||||
elif [ "x${1:-}" != "x" ]; then
|
||||
(
|
||||
echo "Nix Installer [--daemon|--no-daemon]"
|
||||
CREATE_DARWIN_VOLUME=0
|
||||
# handle the command line flags
|
||||
while [ $# -gt 0 ]; do
|
||||
case $1 in
|
||||
--daemon)
|
||||
INSTALL_MODE=daemon;;
|
||||
--no-daemon)
|
||||
INSTALL_MODE=no-daemon;;
|
||||
--no-channel-add)
|
||||
export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
|
||||
--daemon-user-count)
|
||||
export NIX_USER_COUNT=$2
|
||||
shift;;
|
||||
--no-modify-profile)
|
||||
NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
|
||||
--darwin-use-unencrypted-nix-store-volume)
|
||||
CREATE_DARWIN_VOLUME=1;;
|
||||
--nix-extra-conf-file)
|
||||
export NIX_EXTRA_CONF="$(cat $2)"
|
||||
shift;;
|
||||
*)
|
||||
(
|
||||
echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]"
|
||||
|
||||
echo "Choose installation method."
|
||||
echo ""
|
||||
echo " --daemon: Installs and configures a background daemon that manages the store,"
|
||||
echo " providing multi-user support and better isolation for local builds."
|
||||
echo " Both for security and reproducibility, this method is recommended if"
|
||||
echo " supported on your platform."
|
||||
echo " See https://nixos.org/nix/manual/#sect-multi-user-installation"
|
||||
echo ""
|
||||
echo " --no-daemon: Simple, single-user installation that does not require root and is"
|
||||
echo " trivial to uninstall."
|
||||
echo " (default)"
|
||||
echo ""
|
||||
) >&2
|
||||
exit
|
||||
echo "Choose installation method."
|
||||
echo ""
|
||||
echo " --daemon: Installs and configures a background daemon that manages the store,"
|
||||
echo " providing multi-user support and better isolation for local builds."
|
||||
echo " Both for security and reproducibility, this method is recommended if"
|
||||
echo " supported on your platform."
|
||||
echo " See https://nixos.org/nix/manual/#sect-multi-user-installation"
|
||||
echo ""
|
||||
echo " --no-daemon: Simple, single-user installation that does not require root and is"
|
||||
echo " trivial to uninstall."
|
||||
echo " (default)"
|
||||
echo ""
|
||||
echo " --no-channel-add: Don't add any channels. nixpkgs-unstable is installed by default."
|
||||
echo ""
|
||||
echo " --no-modify-profile: Skip channel installation. When not provided nixpkgs-unstable"
|
||||
echo " is installed by default."
|
||||
echo ""
|
||||
echo " --daemon-user-count: Number of build users to create. Defaults to 32."
|
||||
echo ""
|
||||
echo " --nix-extra-conf-file: Path to nix.conf to prepend when installing /etc/nix.conf"
|
||||
echo ""
|
||||
) >&2
|
||||
|
||||
# darwin and Catalina+
|
||||
if [ "$(uname -s)" = "Darwin" ] && [ "$macos_major" -gt 14 ]; then
|
||||
(
|
||||
echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
|
||||
echo " store and mount it at /nix. This is the recommended way to create"
|
||||
echo " /nix with a read-only / on macOS >=10.15."
|
||||
echo " See: https://nixos.org/nix/manual/#sect-macos-installation"
|
||||
echo ""
|
||||
) >&2
|
||||
fi
|
||||
exit;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
|
||||
printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
|
||||
"$self/create-darwin-volume.sh"
|
||||
fi
|
||||
|
||||
info=$(diskutil info -plist / | xpath "/plist/dict/key[text()='Writable']/following-sibling::true[1]" 2> /dev/null)
|
||||
if ! [ -e $dest ] && [ -n "$info" ] && [ "$macos_major" -gt 14 ]; then
|
||||
(
|
||||
echo ""
|
||||
echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
|
||||
echo "Use sh <(curl https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
|
||||
echo "See https://nixos.org/nix/manual/#sect-macos-installation"
|
||||
echo ""
|
||||
) >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$INSTALL_MODE" = "daemon" ]; then
|
||||
|
@ -130,13 +186,15 @@ if [ -z "$NIX_SSL_CERT_FILE" ] || ! [ -f "$NIX_SSL_CERT_FILE" ]; then
|
|||
fi
|
||||
|
||||
# Subscribe the user to the Nixpkgs channel and fetch it.
|
||||
if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then
|
||||
$nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
|
||||
fi
|
||||
if [ -z "$_NIX_INSTALLER_TEST" ]; then
|
||||
if ! $nix/bin/nix-channel --update nixpkgs; then
|
||||
echo "Fetching the nixpkgs channel failed. (Are you offline?)"
|
||||
echo "To try again later, run \"nix-channel --update nixpkgs\"."
|
||||
if [ -z "$NIX_INSTALLER_NO_CHANNEL_ADD" ]; then
|
||||
if ! $nix/bin/nix-channel --list | grep -q "^nixpkgs "; then
|
||||
$nix/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
|
||||
fi
|
||||
if [ -z "$_NIX_INSTALLER_TEST" ]; then
|
||||
if ! $nix/bin/nix-channel --update nixpkgs; then
|
||||
echo "Fetching the nixpkgs channel failed. (Are you offline?)"
|
||||
echo "To try again later, run \"nix-channel --update nixpkgs\"."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
@ -155,6 +213,17 @@ if [ -z "$NIX_INSTALLER_NO_MODIFY_PROFILE" ]; then
|
|||
break
|
||||
fi
|
||||
done
|
||||
for i in .zshenv .zshrc; do
|
||||
fn="$HOME/$i"
|
||||
if [ -w "$fn" ]; then
|
||||
if ! grep -q "$p" "$fn"; then
|
||||
echo "modifying $fn..." >&2
|
||||
echo "if [ -e $p ]; then . $p; fi # added by Nix installer" >> "$fn"
|
||||
fi
|
||||
added=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -z "$added" ]; then
|
||||
|
|
|
@ -36,6 +36,9 @@ tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.xz")"
|
|||
|
||||
require_util curl "download the binary tarball"
|
||||
require_util tar "unpack the binary tarball"
|
||||
if [ "$(uname -s)" != "Darwin" ]; then
|
||||
require_util xz "unpack the binary tarball"
|
||||
fi
|
||||
|
||||
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
|
||||
curl -L "$url" -o "$tarball" || oops "failed to download '$url'"
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include "store-api.hh"
|
||||
#include "derivations.hh"
|
||||
#include "local-store.hh"
|
||||
#include "legacy.hh"
|
||||
#include "../nix/legacy.hh"
|
||||
|
||||
using namespace nix;
|
||||
using std::cin;
|
||||
|
|
66
src/error-demo/error-demo.cc
Normal file
66
src/error-demo/error-demo.cc
Normal file
|
@ -0,0 +1,66 @@
|
|||
#include "error.hh"
|
||||
#include "nixexpr.hh"
|
||||
|
||||
#include <iostream>
|
||||
#include <optional>
|
||||
|
||||
int main()
|
||||
{
|
||||
using namespace nix;
|
||||
|
||||
// In each program where errors occur, this has to be set.
|
||||
ErrorInfo::programName = std::optional("error-demo");
|
||||
|
||||
// Error in a program; no hint and no nix code.
|
||||
printErrorInfo(
|
||||
ErrorInfo { .level = elError,
|
||||
.name = "name",
|
||||
.description = "error description",
|
||||
});
|
||||
|
||||
// Warning with name, description, and hint.
|
||||
// The hintfmt function makes all the substituted text yellow.
|
||||
printErrorInfo(
|
||||
ErrorInfo { .level = elWarning,
|
||||
.name = "name",
|
||||
.description = "error description",
|
||||
.hint = std::optional(
|
||||
hintfmt("there was a %1%", "warning")),
|
||||
});
|
||||
|
||||
|
||||
// Warning with nix file, line number, column, and the lines of
|
||||
// code where a warning occurred.
|
||||
SymbolTable testTable;
|
||||
auto problem_file = testTable.create("myfile.nix");
|
||||
|
||||
printErrorInfo(
|
||||
ErrorInfo{
|
||||
.level = elWarning,
|
||||
.name = "warning name",
|
||||
.description = "warning description",
|
||||
.hint = hintfmt("this hint has %1% templated %2%!!", "yellow", "values"),
|
||||
.nixCode = NixCode {
|
||||
.errPos = Pos(problem_file, 40, 13),
|
||||
.prevLineOfCode = std::nullopt,
|
||||
.errLineOfCode = "this is the problem line of code",
|
||||
.nextLineOfCode = std::nullopt
|
||||
}});
|
||||
|
||||
// Error with previous and next lines of code.
|
||||
printErrorInfo(
|
||||
ErrorInfo{
|
||||
.level = elError,
|
||||
.name = "error name",
|
||||
.description = "error description",
|
||||
.hint = hintfmt("this hint has %1% templated %2%!!", "yellow", "values"),
|
||||
.nixCode = NixCode {
|
||||
.errPos = Pos(problem_file, 40, 13),
|
||||
.prevLineOfCode = std::optional("previous line of code"),
|
||||
.errLineOfCode = "this is the problem line of code",
|
||||
.nextLineOfCode = std::optional("next line of code"),
|
||||
}});
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
12
src/error-demo/local.mk
Normal file
12
src/error-demo/local.mk
Normal file
|
@ -0,0 +1,12 @@
|
|||
programs += error-demo
|
||||
|
||||
error-demo_DIR := $(d)
|
||||
|
||||
error-demo_SOURCES := \
|
||||
$(wildcard $(d)/*.cc) \
|
||||
|
||||
error-demo_CXXFLAGS += -I src/libutil -I src/libexpr
|
||||
|
||||
error-demo_LIBS = libutil libexpr
|
||||
|
||||
error-demo_LDFLAGS = -pthread $(SODIUM_LIBS) $(EDITLINE_LIBS) $(BOOST_LDFLAGS) -lboost_context -lboost_thread -lboost_system
|
|
@ -32,15 +32,13 @@ static Strings parseAttrPath(const string & s)
|
|||
}
|
||||
|
||||
|
||||
Value * findAlongAttrPath(EvalState & state, const string & attrPath,
|
||||
std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attrPath,
|
||||
Bindings & autoArgs, Value & vIn)
|
||||
{
|
||||
Strings tokens = parseAttrPath(attrPath);
|
||||
|
||||
Error attrError =
|
||||
Error(format("attribute selection path '%1%' does not match expression") % attrPath);
|
||||
|
||||
Value * v = &vIn;
|
||||
Pos pos = noPos;
|
||||
|
||||
for (auto & attr : tokens) {
|
||||
|
||||
|
@ -70,8 +68,9 @@ Value * findAlongAttrPath(EvalState & state, const string & attrPath,
|
|||
|
||||
Bindings::iterator a = v->attrs->find(state.symbols.create(attr));
|
||||
if (a == v->attrs->end())
|
||||
throw Error(format("attribute '%1%' in selection path '%2%' not found") % attr % attrPath);
|
||||
throw AttrPathNotFound("attribute '%1%' in selection path '%2%' not found", attr, attrPath);
|
||||
v = &*a->value;
|
||||
pos = *a->pos;
|
||||
}
|
||||
|
||||
else if (apType == apIndex) {
|
||||
|
@ -82,14 +81,15 @@ Value * findAlongAttrPath(EvalState & state, const string & attrPath,
|
|||
% attrPath % showType(*v));
|
||||
|
||||
if (attrIndex >= v->listSize())
|
||||
throw Error(format("list index %1% in selection path '%2%' is out of range") % attrIndex % attrPath);
|
||||
throw AttrPathNotFound("list index %1% in selection path '%2%' is out of range", attrIndex, attrPath);
|
||||
|
||||
v = v->listElems()[attrIndex];
|
||||
pos = noPos;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return v;
|
||||
return {v, pos};
|
||||
}
|
||||
|
||||
|
||||
|
@ -98,9 +98,9 @@ Pos findDerivationFilename(EvalState & state, Value & v, std::string what)
|
|||
Value * v2;
|
||||
try {
|
||||
auto dummyArgs = state.allocBindings(0);
|
||||
v2 = findAlongAttrPath(state, "meta.position", *dummyArgs, v);
|
||||
v2 = findAlongAttrPath(state, "meta.position", *dummyArgs, v).first;
|
||||
} catch (Error &) {
|
||||
throw Error("package '%s' has no source location information", what);
|
||||
throw NoPositionInfo("package '%s' has no source location information", what);
|
||||
}
|
||||
|
||||
// FIXME: is it possible to extract the Pos object instead of doing this
|
||||
|
|
|
@ -7,7 +7,10 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
Value * findAlongAttrPath(EvalState & state, const string & attrPath,
|
||||
MakeError(AttrPathNotFound, Error);
|
||||
MakeError(NoPositionInfo, Error);
|
||||
|
||||
std::pair<Value *, Pos> findAlongAttrPath(EvalState & state, const string & attrPath,
|
||||
Bindings & autoArgs, Value & vIn);
|
||||
|
||||
/* Heuristic to find the filename and lineno or a nix value. */
|
||||
|
|
|
@ -43,6 +43,12 @@ Value * EvalState::allocAttr(Value & vAttrs, const Symbol & name)
|
|||
}
|
||||
|
||||
|
||||
Value * EvalState::allocAttr(Value & vAttrs, const std::string & name)
|
||||
{
|
||||
return allocAttr(vAttrs, symbols.create(name));
|
||||
}
|
||||
|
||||
|
||||
void Bindings::sort()
|
||||
{
|
||||
std::sort(begin(), end());
|
||||
|
|
|
@ -1,31 +1,36 @@
|
|||
#include "common-eval-args.hh"
|
||||
#include "shared.hh"
|
||||
#include "download.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "util.hh"
|
||||
#include "eval.hh"
|
||||
#include "fetchers.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
MixEvalArgs::MixEvalArgs()
|
||||
{
|
||||
mkFlag()
|
||||
.longName("arg")
|
||||
.description("argument to be passed to Nix functions")
|
||||
.labels({"name", "expr"})
|
||||
.handler([&](std::vector<std::string> ss) { autoArgs[ss[0]] = 'E' + ss[1]; });
|
||||
addFlag({
|
||||
.longName = "arg",
|
||||
.description = "argument to be passed to Nix functions",
|
||||
.labels = {"name", "expr"},
|
||||
.handler = {[&](std::string name, std::string expr) { autoArgs[name] = 'E' + expr; }}
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("argstr")
|
||||
.description("string-valued argument to be passed to Nix functions")
|
||||
.labels({"name", "string"})
|
||||
.handler([&](std::vector<std::string> ss) { autoArgs[ss[0]] = 'S' + ss[1]; });
|
||||
addFlag({
|
||||
.longName = "argstr",
|
||||
.description = "string-valued argument to be passed to Nix functions",
|
||||
.labels = {"name", "string"},
|
||||
.handler = {[&](std::string name, std::string s) { autoArgs[name] = 'S' + s; }},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.shortName('I')
|
||||
.longName("include")
|
||||
.description("add a path to the list of locations used to look up <...> file names")
|
||||
.label("path")
|
||||
.handler([&](std::string s) { searchPath.push_back(s); });
|
||||
addFlag({
|
||||
.longName = "include",
|
||||
.shortName = 'I',
|
||||
.description = "add a path to the list of locations used to look up <...> file names",
|
||||
.labels = {"path"},
|
||||
.handler = {[&](std::string s) { searchPath.push_back(s); }}
|
||||
});
|
||||
}
|
||||
|
||||
Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
|
||||
|
@ -46,9 +51,9 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
|
|||
Path lookupFileArg(EvalState & state, string s)
|
||||
{
|
||||
if (isUri(s)) {
|
||||
CachedDownloadRequest request(s);
|
||||
request.unpack = true;
|
||||
return getDownloader()->downloadCached(state.store, request).path;
|
||||
return state.store->toRealPath(
|
||||
fetchers::downloadTarball(
|
||||
state.store, resolveUri(s), "source", false).storePath);
|
||||
} else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
|
||||
Path p = s.substr(1, s.size() - 2);
|
||||
return state.findFile(p);
|
||||
|
|
|
@ -57,7 +57,7 @@ inline void EvalState::forceAttrs(Value & v)
|
|||
|
||||
inline void EvalState::forceAttrs(Value & v, const Pos & pos)
|
||||
{
|
||||
forceValue(v);
|
||||
forceValue(v, pos);
|
||||
if (v.type != tAttrs)
|
||||
throwTypeError("value is %1% while a set was expected, at %2%", v, pos);
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ inline void EvalState::forceList(Value & v)
|
|||
|
||||
inline void EvalState::forceList(Value & v, const Pos & pos)
|
||||
{
|
||||
forceValue(v);
|
||||
forceValue(v, pos);
|
||||
if (!v.isList())
|
||||
throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include "derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "download.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "json.hh"
|
||||
#include "function-trace.hh"
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
|||
|
||||
#if HAVE_BOEHMGC
|
||||
|
||||
#define GC_INCLUDE_NEW
|
||||
|
||||
#include <gc/gc.h>
|
||||
#include <gc/gc_cpp.h>
|
||||
|
||||
|
@ -56,6 +58,12 @@ static char * dupStringWithLen(const char * s, size_t size)
|
|||
}
|
||||
|
||||
|
||||
RootValue allocRootValue(Value * v)
|
||||
{
|
||||
return std::allocate_shared<Value *>(traceable_allocator<Value *>(), v);
|
||||
}
|
||||
|
||||
|
||||
static void printValue(std::ostream & str, std::set<const Value *> & active, const Value & v)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
@ -1256,7 +1264,7 @@ void ExprWith::eval(EvalState & state, Env & env, Value & v)
|
|||
|
||||
void ExprIf::eval(EvalState & state, Env & env, Value & v)
|
||||
{
|
||||
(state.evalBool(env, cond) ? then : else_)->eval(state, env, v);
|
||||
(state.evalBool(env, cond, pos) ? then : else_)->eval(state, env, v);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1502,7 +1510,7 @@ NixFloat EvalState::forceFloat(Value & v, const Pos & pos)
|
|||
|
||||
bool EvalState::forceBool(Value & v, const Pos & pos)
|
||||
{
|
||||
forceValue(v);
|
||||
forceValue(v, pos);
|
||||
if (v.type != tBool)
|
||||
throwTypeError("value is %1% while a Boolean was expected, at %2%", v, pos);
|
||||
return v.boolean;
|
||||
|
@ -1517,7 +1525,7 @@ bool EvalState::isFunctor(Value & fun)
|
|||
|
||||
void EvalState::forceFunction(Value & v, const Pos & pos)
|
||||
{
|
||||
forceValue(v);
|
||||
forceValue(v, pos);
|
||||
if (v.type != tLambda && v.type != tPrimOp && v.type != tPrimOpApp && !isFunctor(v))
|
||||
throwTypeError("value is %1% while a function was expected, at %2%", v, pos);
|
||||
}
|
||||
|
@ -1594,7 +1602,7 @@ std::optional<string> EvalState::tryAttrsToString(const Pos & pos, Value & v,
|
|||
string EvalState::coerceToString(const Pos & pos, Value & v, PathSet & context,
|
||||
bool coerceMore, bool copyToStore)
|
||||
{
|
||||
forceValue(v);
|
||||
forceValue(v, pos);
|
||||
|
||||
string s;
|
||||
|
||||
|
@ -1661,7 +1669,7 @@ string EvalState::copyPathToStore(PathSet & context, const Path & path)
|
|||
else {
|
||||
auto p = settings.readOnlyMode
|
||||
? store->computeStorePathForPath(std::string(baseNameOf(path)), checkSourcePath(path)).first
|
||||
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), true, htSHA256, defaultPathFilter, repair);
|
||||
: store->addToStore(std::string(baseNameOf(path)), checkSourcePath(path), FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, repair);
|
||||
dstPath = store->printStorePath(p);
|
||||
srcToStore.insert_or_assign(path, std::move(p));
|
||||
printMsg(lvlChatty, "copied source '%1%' -> '%2%'", path, dstPath);
|
||||
|
|
|
@ -272,6 +272,7 @@ public:
|
|||
Env & allocEnv(size_t size);
|
||||
|
||||
Value * allocAttr(Value & vAttrs, const Symbol & name);
|
||||
Value * allocAttr(Value & vAttrs, const std::string & name);
|
||||
|
||||
Bindings * allocBindings(size_t capacity);
|
||||
|
||||
|
@ -367,7 +368,7 @@ struct EvalSettings : Config
|
|||
"Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
|
||||
|
||||
Setting<bool> traceFunctionCalls{this, false, "trace-function-calls",
|
||||
"Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)"};
|
||||
"Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)."};
|
||||
};
|
||||
|
||||
extern EvalSettings evalSettings;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include "function-trace.hh"
|
||||
#include "logging.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include <nlohmann/json.hpp>
|
||||
|
||||
using json = nlohmann::json;
|
||||
using std::unique_ptr;
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -13,69 +12,69 @@ namespace nix {
|
|||
class JSONSax : nlohmann::json_sax<json> {
|
||||
class JSONState {
|
||||
protected:
|
||||
unique_ptr<JSONState> parent;
|
||||
Value * v;
|
||||
std::unique_ptr<JSONState> parent;
|
||||
RootValue v;
|
||||
public:
|
||||
virtual unique_ptr<JSONState> resolve(EvalState &)
|
||||
virtual std::unique_ptr<JSONState> resolve(EvalState &)
|
||||
{
|
||||
throw std::logic_error("tried to close toplevel json parser state");
|
||||
};
|
||||
explicit JSONState(unique_ptr<JSONState>&& p) : parent(std::move(p)), v(nullptr) {};
|
||||
explicit JSONState(Value* v) : v(v) {};
|
||||
JSONState(JSONState& p) = delete;
|
||||
Value& value(EvalState & state)
|
||||
}
|
||||
explicit JSONState(std::unique_ptr<JSONState> && p) : parent(std::move(p)) {}
|
||||
explicit JSONState(Value * v) : v(allocRootValue(v)) {}
|
||||
JSONState(JSONState & p) = delete;
|
||||
Value & value(EvalState & state)
|
||||
{
|
||||
if (v == nullptr)
|
||||
v = state.allocValue();
|
||||
return *v;
|
||||
};
|
||||
virtual ~JSONState() {};
|
||||
virtual void add() {};
|
||||
if (!v)
|
||||
v = allocRootValue(state.allocValue());
|
||||
return **v;
|
||||
}
|
||||
virtual ~JSONState() {}
|
||||
virtual void add() {}
|
||||
};
|
||||
|
||||
class JSONObjectState : public JSONState {
|
||||
using JSONState::JSONState;
|
||||
ValueMap attrs = ValueMap();
|
||||
virtual unique_ptr<JSONState> resolve(EvalState & state) override
|
||||
ValueMap attrs;
|
||||
std::unique_ptr<JSONState> resolve(EvalState & state) override
|
||||
{
|
||||
Value& v = parent->value(state);
|
||||
Value & v = parent->value(state);
|
||||
state.mkAttrs(v, attrs.size());
|
||||
for (auto & i : attrs)
|
||||
v.attrs->push_back(Attr(i.first, i.second));
|
||||
return std::move(parent);
|
||||
}
|
||||
virtual void add() override { v = nullptr; };
|
||||
void add() override { v = nullptr; }
|
||||
public:
|
||||
void key(string_t& name, EvalState & state)
|
||||
void key(string_t & name, EvalState & state)
|
||||
{
|
||||
attrs[state.symbols.create(name)] = &value(state);
|
||||
attrs.insert_or_assign(state.symbols.create(name), &value(state));
|
||||
}
|
||||
};
|
||||
|
||||
class JSONListState : public JSONState {
|
||||
ValueVector values = ValueVector();
|
||||
virtual unique_ptr<JSONState> resolve(EvalState & state) override
|
||||
ValueVector values;
|
||||
std::unique_ptr<JSONState> resolve(EvalState & state) override
|
||||
{
|
||||
Value& v = parent->value(state);
|
||||
Value & v = parent->value(state);
|
||||
state.mkList(v, values.size());
|
||||
for (size_t n = 0; n < values.size(); ++n) {
|
||||
v.listElems()[n] = values[n];
|
||||
}
|
||||
return std::move(parent);
|
||||
}
|
||||
virtual void add() override {
|
||||
values.push_back(v);
|
||||
void add() override {
|
||||
values.push_back(*v);
|
||||
v = nullptr;
|
||||
};
|
||||
}
|
||||
public:
|
||||
JSONListState(unique_ptr<JSONState>&& p, std::size_t reserve) : JSONState(std::move(p))
|
||||
JSONListState(std::unique_ptr<JSONState> && p, std::size_t reserve) : JSONState(std::move(p))
|
||||
{
|
||||
values.reserve(reserve);
|
||||
}
|
||||
};
|
||||
|
||||
EvalState & state;
|
||||
unique_ptr<JSONState> rs;
|
||||
std::unique_ptr<JSONState> rs;
|
||||
|
||||
template<typename T, typename... Args> inline bool handle_value(T f, Args... args)
|
||||
{
|
||||
|
@ -107,12 +106,12 @@ public:
|
|||
return handle_value(mkInt, val);
|
||||
}
|
||||
|
||||
bool number_float(number_float_t val, const string_t& s)
|
||||
bool number_float(number_float_t val, const string_t & s)
|
||||
{
|
||||
return handle_value(mkFloat, val);
|
||||
}
|
||||
|
||||
bool string(string_t& val)
|
||||
bool string(string_t & val)
|
||||
{
|
||||
return handle_value<void(Value&, const char*)>(mkString, val.c_str());
|
||||
}
|
||||
|
@ -123,7 +122,7 @@ public:
|
|||
return true;
|
||||
}
|
||||
|
||||
bool key(string_t& name)
|
||||
bool key(string_t & name)
|
||||
{
|
||||
dynamic_cast<JSONObjectState*>(rs.get())->key(name, state);
|
||||
return true;
|
||||
|
|
|
@ -6,7 +6,9 @@ libexpr_DIR := $(d)
|
|||
|
||||
libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc
|
||||
|
||||
libexpr_LIBS = libutil libstore libnixrust
|
||||
libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/libmain -I src/libexpr
|
||||
|
||||
libexpr_LIBS = libutil libstore libfetchers libnixrust
|
||||
|
||||
libexpr_LDFLAGS =
|
||||
ifneq ($(OS), FreeBSD)
|
||||
|
|
|
@ -209,9 +209,10 @@ struct ExprList : Expr
|
|||
|
||||
struct Formal
|
||||
{
|
||||
Pos pos;
|
||||
Symbol name;
|
||||
Expr * def;
|
||||
Formal(const Symbol & name, Expr * def) : name(name), def(def) { };
|
||||
Formal(const Pos & pos, const Symbol & name, Expr * def) : pos(pos), name(name), def(def) { };
|
||||
};
|
||||
|
||||
struct Formals
|
||||
|
@ -261,8 +262,9 @@ struct ExprWith : Expr
|
|||
|
||||
struct ExprIf : Expr
|
||||
{
|
||||
Pos pos;
|
||||
Expr * cond, * then, * else_;
|
||||
ExprIf(Expr * cond, Expr * then, Expr * else_) : cond(cond), then(then), else_(else_) { };
|
||||
ExprIf(const Pos & pos, Expr * cond, Expr * then, Expr * else_) : pos(pos), cond(cond), then(then), else_(else_) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
|
|
|
@ -335,7 +335,7 @@ expr_function
|
|||
;
|
||||
|
||||
expr_if
|
||||
: IF expr THEN expr ELSE expr { $$ = new ExprIf($2, $4, $6); }
|
||||
: IF expr THEN expr ELSE expr { $$ = new ExprIf(CUR_POS, $2, $4, $6); }
|
||||
| expr_op
|
||||
;
|
||||
|
||||
|
@ -531,8 +531,8 @@ formals
|
|||
;
|
||||
|
||||
formal
|
||||
: ID { $$ = new Formal(data->symbols.create($1), 0); }
|
||||
| ID '?' expr { $$ = new Formal(data->symbols.create($1), $3); }
|
||||
: ID { $$ = new Formal(CUR_POS, data->symbols.create($1), 0); }
|
||||
| ID '?' expr { $$ = new Formal(CUR_POS, data->symbols.create($1), $3); }
|
||||
;
|
||||
|
||||
%%
|
||||
|
@ -544,7 +544,8 @@ formal
|
|||
#include <unistd.h>
|
||||
|
||||
#include "eval.hh"
|
||||
#include "download.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "fetchers.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
|
||||
|
@ -687,10 +688,9 @@ std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathEl
|
|||
|
||||
if (isUri(elem.second)) {
|
||||
try {
|
||||
CachedDownloadRequest request(elem.second);
|
||||
request.unpack = true;
|
||||
res = { true, getDownloader()->downloadCached(store, request).path };
|
||||
} catch (DownloadError & e) {
|
||||
res = { true, store->toRealPath(fetchers::downloadTarball(
|
||||
store, resolveUri(elem.second), "source", false).storePath) };
|
||||
} catch (FileTransferError & e) {
|
||||
printError(format("warning: Nix search path entry '%1%' cannot be downloaded, ignoring") % elem.second);
|
||||
res = { false, "" };
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#include "archive.hh"
|
||||
#include "derivations.hh"
|
||||
#include "download.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "eval.hh"
|
||||
#include "globals.hh"
|
||||
|
@ -122,16 +121,16 @@ static void prim_scopedImport(EvalState & state, const Pos & pos, Value * * args
|
|||
}
|
||||
w.attrs->sort();
|
||||
|
||||
static Value * fun = nullptr;
|
||||
static RootValue fun;
|
||||
if (!fun) {
|
||||
fun = state.allocValue();
|
||||
fun = allocRootValue(state.allocValue());
|
||||
state.eval(state.parseExprFromString(
|
||||
#include "imported-drv-to-derivation.nix.gen.hh"
|
||||
, "/"), *fun);
|
||||
, "/"), **fun);
|
||||
}
|
||||
|
||||
state.forceFunction(*fun, pos);
|
||||
mkApp(v, *fun, w);
|
||||
state.forceFunction(**fun, pos);
|
||||
mkApp(v, **fun, w);
|
||||
state.forceAttrs(v, pos);
|
||||
} else {
|
||||
state.forceAttrs(*args[0]);
|
||||
|
@ -242,7 +241,7 @@ void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|||
/* Return a string representing the type of the expression. */
|
||||
static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
string t;
|
||||
switch (args[0]->type) {
|
||||
case tInt: t = "int"; break;
|
||||
|
@ -270,7 +269,7 @@ static void prim_typeOf(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
/* Determine whether the argument is the null value. */
|
||||
static void prim_isNull(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tNull);
|
||||
}
|
||||
|
||||
|
@ -278,7 +277,7 @@ static void prim_isNull(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
/* Determine whether the argument is a function. */
|
||||
static void prim_isFunction(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
bool res;
|
||||
switch (args[0]->type) {
|
||||
case tLambda:
|
||||
|
@ -297,21 +296,21 @@ static void prim_isFunction(EvalState & state, const Pos & pos, Value * * args,
|
|||
/* Determine whether the argument is an integer. */
|
||||
static void prim_isInt(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tInt);
|
||||
}
|
||||
|
||||
/* Determine whether the argument is a float. */
|
||||
static void prim_isFloat(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tFloat);
|
||||
}
|
||||
|
||||
/* Determine whether the argument is a string. */
|
||||
static void prim_isString(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tString);
|
||||
}
|
||||
|
||||
|
@ -319,14 +318,14 @@ static void prim_isString(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
/* Determine whether the argument is a Boolean. */
|
||||
static void prim_isBool(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tBool);
|
||||
}
|
||||
|
||||
/* Determine whether the argument is a path. */
|
||||
static void prim_isPath(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tPath);
|
||||
}
|
||||
|
||||
|
@ -383,7 +382,7 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
|
|||
args[0]->attrs->find(state.symbols.create("operator"));
|
||||
if (op == args[0]->attrs->end())
|
||||
throw EvalError(format("attribute 'operator' required, at %1%") % pos);
|
||||
state.forceValue(*op->value);
|
||||
state.forceValue(*op->value, pos);
|
||||
|
||||
/* Construct the closure by applying the operator to element of
|
||||
`workSet', adding the result to `workSet', continuing until
|
||||
|
@ -402,7 +401,7 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
|
|||
e->attrs->find(state.symbols.create("key"));
|
||||
if (key == e->attrs->end())
|
||||
throw EvalError(format("attribute 'key' required, at %1%") % pos);
|
||||
state.forceValue(*key->value);
|
||||
state.forceValue(*key->value, pos);
|
||||
|
||||
if (!doneKeys.insert(key->value).second) continue;
|
||||
res.push_back(e);
|
||||
|
@ -414,7 +413,7 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
|
|||
|
||||
/* Add the values returned by the operator to the work set. */
|
||||
for (unsigned int n = 0; n < call.listSize(); ++n) {
|
||||
state.forceValue(*call.listElems()[n]);
|
||||
state.forceValue(*call.listElems()[n], pos);
|
||||
workSet.push_back(call.listElems()[n]);
|
||||
}
|
||||
}
|
||||
|
@ -446,7 +445,7 @@ static void prim_throw(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
static void prim_addErrorContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
try {
|
||||
state.forceValue(*args[1]);
|
||||
state.forceValue(*args[1], pos);
|
||||
v = *args[1];
|
||||
} catch (Error & e) {
|
||||
PathSet context;
|
||||
|
@ -462,7 +461,7 @@ static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Val
|
|||
{
|
||||
state.mkAttrs(v, 2);
|
||||
try {
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
v.attrs->push_back(Attr(state.sValue, args[0]));
|
||||
mkBool(*state.allocAttr(v, state.symbols.create("success")), true);
|
||||
} catch (AssertionError & e) {
|
||||
|
@ -484,8 +483,8 @@ static void prim_getEnv(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
/* Evaluate the first argument, then return the second argument. */
|
||||
static void prim_seq(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[1]);
|
||||
state.forceValue(*args[0], pos);
|
||||
state.forceValue(*args[1], pos);
|
||||
v = *args[1];
|
||||
}
|
||||
|
||||
|
@ -495,7 +494,7 @@ static void prim_seq(EvalState & state, const Pos & pos, Value * * args, Value &
|
|||
static void prim_deepSeq(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValueDeep(*args[0]);
|
||||
state.forceValue(*args[1]);
|
||||
state.forceValue(*args[1], pos);
|
||||
v = *args[1];
|
||||
}
|
||||
|
||||
|
@ -504,12 +503,12 @@ static void prim_deepSeq(EvalState & state, const Pos & pos, Value * * args, Val
|
|||
return the second expression. Useful for debugging. */
|
||||
static void prim_trace(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
if (args[0]->type == tString)
|
||||
printError(format("trace: %1%") % args[0]->string.s);
|
||||
else
|
||||
printError(format("trace: %1%") % *args[0]);
|
||||
state.forceValue(*args[1]);
|
||||
state.forceValue(*args[1], pos);
|
||||
v = *args[1];
|
||||
}
|
||||
|
||||
|
@ -563,7 +562,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
|
||||
std::optional<std::string> outputHash;
|
||||
std::string outputHashAlgo;
|
||||
bool outputHashRecursive = false;
|
||||
auto ingestionMethod = FileIngestionMethod::Flat;
|
||||
|
||||
StringSet outputs;
|
||||
outputs.insert("out");
|
||||
|
@ -574,8 +573,8 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
vomit("processing attribute '%1%'", key);
|
||||
|
||||
auto handleHashMode = [&](const std::string & s) {
|
||||
if (s == "recursive") outputHashRecursive = true;
|
||||
else if (s == "flat") outputHashRecursive = false;
|
||||
if (s == "recursive") ingestionMethod = FileIngestionMethod::Recursive;
|
||||
else if (s == "flat") ingestionMethod = FileIngestionMethod::Flat;
|
||||
else throw EvalError("invalid value '%s' for 'outputHashMode' attribute, at %s", s, posDrvName);
|
||||
};
|
||||
|
||||
|
@ -600,7 +599,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
try {
|
||||
|
||||
if (ignoreNulls) {
|
||||
state.forceValue(*i->value);
|
||||
state.forceValue(*i->value, pos);
|
||||
if (i->value->type == tNull) continue;
|
||||
}
|
||||
|
||||
|
@ -722,11 +721,14 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
|
|||
HashType ht = outputHashAlgo.empty() ? htUnknown : parseHashType(outputHashAlgo);
|
||||
Hash h(*outputHash, ht);
|
||||
|
||||
auto outPath = state.store->makeFixedOutputPath(outputHashRecursive, h, drvName);
|
||||
auto outPath = state.store->makeFixedOutputPath(ingestionMethod, h, drvName);
|
||||
if (!jsonObject) drv.env["out"] = state.store->printStorePath(outPath);
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput(std::move(outPath),
|
||||
(outputHashRecursive ? "r:" : "") + printHashType(h.type),
|
||||
h.to_string(Base16, false)));
|
||||
drv.outputs.insert_or_assign("out", DerivationOutput {
|
||||
std::move(outPath),
|
||||
(ingestionMethod == FileIngestionMethod::Recursive ? "r:" : "")
|
||||
+ printHashType(h.type),
|
||||
h.to_string(Base16, false),
|
||||
});
|
||||
}
|
||||
|
||||
else {
|
||||
|
@ -1021,7 +1023,9 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
|
||||
for (auto path : context) {
|
||||
if (path.at(0) != '/')
|
||||
throw EvalError(format("in 'toFile': the file '%1%' cannot refer to derivation outputs, at %2%") % name % pos);
|
||||
throw EvalError(format(
|
||||
"in 'toFile': the file named '%1%' must not contain a reference "
|
||||
"to a derivation but contains (%2%), at %3%") % name % path % pos);
|
||||
refs.insert(state.store->parseStorePath(path));
|
||||
}
|
||||
|
||||
|
@ -1038,7 +1042,7 @@ static void prim_toFile(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
|
||||
|
||||
static void addPath(EvalState & state, const Pos & pos, const string & name, const Path & path_,
|
||||
Value * filterFun, bool recursive, const Hash & expectedHash, Value & v)
|
||||
Value * filterFun, FileIngestionMethod method, const Hash & expectedHash, Value & v)
|
||||
{
|
||||
const auto path = evalSettings.pureEval && expectedHash ?
|
||||
path_ :
|
||||
|
@ -1069,12 +1073,12 @@ static void addPath(EvalState & state, const Pos & pos, const string & name, con
|
|||
|
||||
std::optional<StorePath> expectedStorePath;
|
||||
if (expectedHash)
|
||||
expectedStorePath = state.store->makeFixedOutputPath(recursive, expectedHash, name);
|
||||
expectedStorePath = state.store->makeFixedOutputPath(method, expectedHash, name);
|
||||
Path dstPath;
|
||||
if (!expectedHash || !state.store->isValidPath(*expectedStorePath)) {
|
||||
dstPath = state.store->printStorePath(settings.readOnlyMode
|
||||
? state.store->computeStorePathForPath(name, path, recursive, htSHA256, filter).first
|
||||
: state.store->addToStore(name, path, recursive, htSHA256, filter, state.repair));
|
||||
? state.store->computeStorePathForPath(name, path, method, htSHA256, filter).first
|
||||
: state.store->addToStore(name, path, method, htSHA256, filter, state.repair));
|
||||
if (expectedHash && expectedStorePath != state.store->parseStorePath(dstPath))
|
||||
throw Error("store path mismatch in (possibly filtered) path added from '%s'", path);
|
||||
} else
|
||||
|
@ -1091,11 +1095,11 @@ static void prim_filterSource(EvalState & state, const Pos & pos, Value * * args
|
|||
if (!context.empty())
|
||||
throw EvalError(format("string '%1%' cannot refer to other paths, at %2%") % path % pos);
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
if (args[0]->type != tLambda)
|
||||
throw TypeError(format("first argument in call to 'filterSource' is not a function but %1%, at %2%") % showType(*args[0]) % pos);
|
||||
|
||||
addPath(state, pos, std::string(baseNameOf(path)), path, args[0], true, Hash(), v);
|
||||
addPath(state, pos, std::string(baseNameOf(path)), path, args[0], FileIngestionMethod::Recursive, Hash(), v);
|
||||
}
|
||||
|
||||
static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
|
@ -1104,7 +1108,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
Path path;
|
||||
string name;
|
||||
Value * filterFun = nullptr;
|
||||
auto recursive = true;
|
||||
auto method = FileIngestionMethod::Recursive;
|
||||
Hash expectedHash;
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
|
@ -1117,10 +1121,10 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
} else if (attr.name == state.sName)
|
||||
name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "filter") {
|
||||
state.forceValue(*attr.value);
|
||||
state.forceValue(*attr.value, pos);
|
||||
filterFun = attr.value;
|
||||
} else if (n == "recursive")
|
||||
recursive = state.forceBool(*attr.value, *attr.pos);
|
||||
method = FileIngestionMethod { state.forceBool(*attr.value, *attr.pos) };
|
||||
else if (n == "sha256")
|
||||
expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256);
|
||||
else
|
||||
|
@ -1131,7 +1135,7 @@ static void prim_path(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
if (name.empty())
|
||||
name = baseNameOf(path);
|
||||
|
||||
addPath(state, pos, name, path, filterFun, recursive, expectedHash, v);
|
||||
addPath(state, pos, name, path, filterFun, method, expectedHash, v);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1188,7 +1192,7 @@ void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
|||
throw EvalError(format("attribute '%1%' missing, at %2%") % attr % pos);
|
||||
// !!! add to stack trace?
|
||||
if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
|
||||
state.forceValue(*i->value);
|
||||
state.forceValue(*i->value, pos);
|
||||
v = *i->value;
|
||||
}
|
||||
|
||||
|
@ -1218,7 +1222,7 @@ static void prim_hasAttr(EvalState & state, const Pos & pos, Value * * args, Val
|
|||
/* Determine whether the argument is a set. */
|
||||
static void prim_isAttrs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->type == tAttrs);
|
||||
}
|
||||
|
||||
|
@ -1344,7 +1348,7 @@ static void prim_catAttrs(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
*/
|
||||
static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
if (args[0]->type != tLambda)
|
||||
throw TypeError(format("'functionArgs' requires a function, at %1%") % pos);
|
||||
|
||||
|
@ -1354,9 +1358,12 @@ static void prim_functionArgs(EvalState & state, const Pos & pos, Value * * args
|
|||
}
|
||||
|
||||
state.mkAttrs(v, args[0]->lambda.fun->formals->formals.size());
|
||||
for (auto & i : args[0]->lambda.fun->formals->formals)
|
||||
for (auto & i : args[0]->lambda.fun->formals->formals) {
|
||||
// !!! should optimise booleans (allocate only once)
|
||||
mkBool(*state.allocAttr(v, i.name), i.def);
|
||||
Value * value = state.allocValue();
|
||||
v.attrs->push_back(Attr(i.name, value, &i.pos));
|
||||
mkBool(*value, i.def);
|
||||
}
|
||||
v.attrs->sort();
|
||||
}
|
||||
|
||||
|
@ -1387,7 +1394,7 @@ static void prim_mapAttrs(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
/* Determine whether the argument is a list. */
|
||||
static void prim_isList(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[0], pos);
|
||||
mkBool(v, args[0]->isList());
|
||||
}
|
||||
|
||||
|
@ -1397,7 +1404,7 @@ static void elemAt(EvalState & state, const Pos & pos, Value & list, int n, Valu
|
|||
state.forceList(list, pos);
|
||||
if (n < 0 || (unsigned int) n >= list.listSize())
|
||||
throw Error(format("list index %1% is out of bounds, at %2%") % n % pos);
|
||||
state.forceValue(*list.listElems()[n]);
|
||||
state.forceValue(*list.listElems()[n], pos);
|
||||
v = *list.listElems()[n];
|
||||
}
|
||||
|
||||
|
@ -1520,9 +1527,9 @@ static void prim_foldlStrict(EvalState & state, const Pos & pos, Value * * args,
|
|||
vCur = n == args[2]->listSize() - 1 ? &v : state.allocValue();
|
||||
state.callFunction(vTmp, *args[2]->listElems()[n], *vCur, pos);
|
||||
}
|
||||
state.forceValue(v);
|
||||
state.forceValue(v, pos);
|
||||
} else {
|
||||
state.forceValue(*args[1]);
|
||||
state.forceValue(*args[1], pos);
|
||||
v = *args[1];
|
||||
}
|
||||
}
|
||||
|
@ -1587,7 +1594,7 @@ static void prim_sort(EvalState & state, const Pos & pos, Value * * args, Value
|
|||
auto len = args[1]->listSize();
|
||||
state.mkList(v, len);
|
||||
for (unsigned int n = 0; n < len; ++n) {
|
||||
state.forceValue(*args[1]->listElems()[n]);
|
||||
state.forceValue(*args[1]->listElems()[n], pos);
|
||||
v.listElems()[n] = args[1]->listElems()[n];
|
||||
}
|
||||
|
||||
|
@ -1622,7 +1629,7 @@ static void prim_partition(EvalState & state, const Pos & pos, Value * * args, V
|
|||
|
||||
for (unsigned int n = 0; n < len; ++n) {
|
||||
auto vElem = args[1]->listElems()[n];
|
||||
state.forceValue(*vElem);
|
||||
state.forceValue(*vElem, pos);
|
||||
Value res;
|
||||
state.callFunction(*args[0], *vElem, res, pos);
|
||||
if (state.forceBool(res, pos))
|
||||
|
@ -1753,8 +1760,8 @@ static void prim_bitXor(EvalState & state, const Pos & pos, Value * * args, Valu
|
|||
|
||||
static void prim_lessThan(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
state.forceValue(*args[0]);
|
||||
state.forceValue(*args[1]);
|
||||
state.forceValue(*args[0], pos);
|
||||
state.forceValue(*args[1], pos);
|
||||
CompareValues comp;
|
||||
mkBool(v, comp(args[0], args[1]));
|
||||
}
|
||||
|
@ -2045,68 +2052,6 @@ static void prim_splitVersion(EvalState & state, const Pos & pos, Value * * args
|
|||
}
|
||||
|
||||
|
||||
/*************************************************************
|
||||
* Networking
|
||||
*************************************************************/
|
||||
|
||||
|
||||
void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
|
||||
const string & who, bool unpack, const std::string & defaultName)
|
||||
{
|
||||
CachedDownloadRequest request("");
|
||||
request.unpack = unpack;
|
||||
request.name = defaultName;
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
|
||||
if (args[0]->type == tAttrs) {
|
||||
|
||||
state.forceAttrs(*args[0], pos);
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
string n(attr.name);
|
||||
if (n == "url")
|
||||
request.uri = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "sha256")
|
||||
request.expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256);
|
||||
else if (n == "name")
|
||||
request.name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else
|
||||
throw EvalError(format("unsupported argument '%1%' to '%2%', at %3%") % attr.name % who % attr.pos);
|
||||
}
|
||||
|
||||
if (request.uri.empty())
|
||||
throw EvalError(format("'url' argument required, at %1%") % pos);
|
||||
|
||||
} else
|
||||
request.uri = state.forceStringNoCtx(*args[0], pos);
|
||||
|
||||
state.checkURI(request.uri);
|
||||
|
||||
if (evalSettings.pureEval && !request.expectedHash)
|
||||
throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who);
|
||||
|
||||
auto res = getDownloader()->downloadCached(state.store, request);
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(res.path);
|
||||
|
||||
mkString(v, res.storePath, PathSet({res.storePath}));
|
||||
}
|
||||
|
||||
|
||||
static void prim_fetchurl(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
fetch(state, pos, args, v, "fetchurl", false, "");
|
||||
}
|
||||
|
||||
|
||||
static void prim_fetchTarball(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
fetch(state, pos, args, v, "fetchTarball", true, "source");
|
||||
}
|
||||
|
||||
|
||||
/*************************************************************
|
||||
* Primop registration
|
||||
*************************************************************/
|
||||
|
@ -2289,10 +2234,6 @@ void EvalState::createBaseEnv()
|
|||
addPrimOp("derivationStrict", 1, prim_derivationStrict);
|
||||
addPrimOp("placeholder", 1, prim_placeholder);
|
||||
|
||||
// Networking
|
||||
addPrimOp("__fetchurl", 1, prim_fetchurl);
|
||||
addPrimOp("fetchTarball", 1, prim_fetchTarball);
|
||||
|
||||
/* Add a wrapper around the derivation primop that computes the
|
||||
`drvPath' and `outPath' attributes lazily. */
|
||||
string path = canonPath(settings.nixDataDir + "/nix/corepkgs/derivation.nix", true);
|
||||
|
|
|
@ -20,6 +20,7 @@ struct RegisterPrimOp
|
|||
them. */
|
||||
/* Load a ValueInitializer from a DSO and return whatever it initializes */
|
||||
void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
|
||||
/* Execute a program and parse its output */
|
||||
void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
|
||||
|
|
|
@ -1,203 +1,19 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "download.hh"
|
||||
#include "store-api.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "hash.hh"
|
||||
#include "tarfile.hh"
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <regex>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace std::string_literals;
|
||||
#include "fetchers.hh"
|
||||
#include "url.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct GitInfo
|
||||
{
|
||||
Path storePath;
|
||||
std::string rev;
|
||||
std::string shortRev;
|
||||
uint64_t revCount = 0;
|
||||
};
|
||||
|
||||
std::regex revRegex("^[0-9a-fA-F]{40}$");
|
||||
|
||||
GitInfo exportGit(ref<Store> store, const std::string & uri,
|
||||
std::optional<std::string> ref, std::string rev,
|
||||
const std::string & name)
|
||||
{
|
||||
if (evalSettings.pureEval && rev == "")
|
||||
throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision");
|
||||
|
||||
if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) {
|
||||
|
||||
bool clean = true;
|
||||
|
||||
try {
|
||||
runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" });
|
||||
} catch (ExecError & e) {
|
||||
if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw;
|
||||
clean = false;
|
||||
}
|
||||
|
||||
if (!clean) {
|
||||
|
||||
/* This is an unclean working tree. So copy all tracked files. */
|
||||
GitInfo gitInfo;
|
||||
gitInfo.rev = "0000000000000000000000000000000000000000";
|
||||
gitInfo.shortRev = std::string(gitInfo.rev, 0, 7);
|
||||
|
||||
auto files = tokenizeString<std::set<std::string>>(
|
||||
runProgram("git", true, { "-C", uri, "ls-files", "-z" }), "\0"s);
|
||||
|
||||
PathFilter filter = [&](const Path & p) -> bool {
|
||||
assert(hasPrefix(p, uri));
|
||||
std::string file(p, uri.size() + 1);
|
||||
|
||||
auto st = lstat(p);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
auto prefix = file + "/";
|
||||
auto i = files.lower_bound(prefix);
|
||||
return i != files.end() && hasPrefix(*i, prefix);
|
||||
}
|
||||
|
||||
return files.count(file);
|
||||
};
|
||||
|
||||
gitInfo.storePath = store->printStorePath(store->addToStore("source", uri, true, htSHA256, filter));
|
||||
|
||||
return gitInfo;
|
||||
}
|
||||
|
||||
// clean working tree, but no ref or rev specified. Use 'HEAD'.
|
||||
rev = chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" }));
|
||||
ref = "HEAD"s;
|
||||
}
|
||||
|
||||
if (!ref) ref = "HEAD"s;
|
||||
|
||||
if (rev != "" && !std::regex_match(rev, revRegex))
|
||||
throw Error("invalid Git revision '%s'", rev);
|
||||
|
||||
deletePath(getCacheDir() + "/nix/git");
|
||||
|
||||
Path cacheDir = getCacheDir() + "/nix/gitv2/" + hashString(htSHA256, uri).to_string(Base32, false);
|
||||
|
||||
if (!pathExists(cacheDir)) {
|
||||
createDirs(dirOf(cacheDir));
|
||||
runProgram("git", true, { "init", "--bare", cacheDir });
|
||||
}
|
||||
|
||||
Path localRefFile;
|
||||
if (ref->compare(0, 5, "refs/") == 0)
|
||||
localRefFile = cacheDir + "/" + *ref;
|
||||
else
|
||||
localRefFile = cacheDir + "/refs/heads/" + *ref;
|
||||
|
||||
bool doFetch;
|
||||
time_t now = time(0);
|
||||
/* If a rev was specified, we need to fetch if it's not in the
|
||||
repo. */
|
||||
if (rev != "") {
|
||||
try {
|
||||
runProgram("git", true, { "-C", cacheDir, "cat-file", "-e", rev });
|
||||
doFetch = false;
|
||||
} catch (ExecError & e) {
|
||||
if (WIFEXITED(e.status)) {
|
||||
doFetch = true;
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If the local ref is older than ‘tarball-ttl’ seconds, do a
|
||||
git fetch to update the local ref to the remote ref. */
|
||||
struct stat st;
|
||||
doFetch = stat(localRefFile.c_str(), &st) != 0 ||
|
||||
(uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now;
|
||||
}
|
||||
if (doFetch)
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", uri));
|
||||
|
||||
// FIXME: git stderr messes up our progress indicator, so
|
||||
// we're using --quiet for now. Should process its stderr.
|
||||
runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) });
|
||||
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = now;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = now;
|
||||
times[1].tv_usec = 0;
|
||||
|
||||
utimes(localRefFile.c_str(), times);
|
||||
}
|
||||
|
||||
// FIXME: check whether rev is an ancestor of ref.
|
||||
GitInfo gitInfo;
|
||||
gitInfo.rev = rev != "" ? rev : chomp(readFile(localRefFile));
|
||||
gitInfo.shortRev = std::string(gitInfo.rev, 0, 7);
|
||||
|
||||
printTalkative("using revision %s of repo '%s'", gitInfo.rev, uri);
|
||||
|
||||
std::string storeLinkName = hashString(htSHA512, name + std::string("\0"s) + gitInfo.rev).to_string(Base32, false);
|
||||
Path storeLink = cacheDir + "/" + storeLinkName + ".link";
|
||||
PathLocks storeLinkLock({storeLink}, fmt("waiting for lock on '%1%'...", storeLink)); // FIXME: broken
|
||||
|
||||
try {
|
||||
auto json = nlohmann::json::parse(readFile(storeLink));
|
||||
|
||||
assert(json["name"] == name && json["rev"] == gitInfo.rev);
|
||||
|
||||
gitInfo.storePath = json["storePath"];
|
||||
|
||||
if (store->isValidPath(store->parseStorePath(gitInfo.storePath))) {
|
||||
gitInfo.revCount = json["revCount"];
|
||||
return gitInfo;
|
||||
}
|
||||
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT) throw;
|
||||
}
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
RunOptions gitOptions("git", { "-C", cacheDir, "archive", gitInfo.rev });
|
||||
gitOptions.standardOut = &sink;
|
||||
runProgram2(gitOptions);
|
||||
});
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmpDir(tmpDir, true);
|
||||
|
||||
unpackTarfile(*source, tmpDir);
|
||||
|
||||
gitInfo.storePath = store->printStorePath(store->addToStore(name, tmpDir));
|
||||
|
||||
gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", cacheDir, "rev-list", "--count", gitInfo.rev }));
|
||||
|
||||
nlohmann::json json;
|
||||
json["storePath"] = gitInfo.storePath;
|
||||
json["uri"] = uri;
|
||||
json["name"] = name;
|
||||
json["rev"] = gitInfo.rev;
|
||||
json["revCount"] = gitInfo.revCount;
|
||||
|
||||
writeFile(storeLink, json.dump());
|
||||
|
||||
return gitInfo;
|
||||
}
|
||||
|
||||
static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
std::string url;
|
||||
std::optional<std::string> ref;
|
||||
std::string rev;
|
||||
std::optional<Hash> rev;
|
||||
std::string name = "source";
|
||||
bool fetchSubmodules = false;
|
||||
PathSet context;
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
|
@ -213,9 +29,11 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
else if (n == "ref")
|
||||
ref = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "rev")
|
||||
rev = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
rev = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA1);
|
||||
else if (n == "name")
|
||||
name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "submodules")
|
||||
fetchSubmodules = state.forceBool(*attr.value, *attr.pos);
|
||||
else
|
||||
throw EvalError("unsupported argument '%s' to 'fetchGit', at %s", attr.name, *attr.pos);
|
||||
}
|
||||
|
@ -230,17 +48,36 @@ static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Va
|
|||
// whitelist. Ah well.
|
||||
state.checkURI(url);
|
||||
|
||||
auto gitInfo = exportGit(state.store, url, ref, rev, name);
|
||||
if (evalSettings.pureEval && !rev)
|
||||
throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision");
|
||||
|
||||
fetchers::Attrs attrs;
|
||||
attrs.insert_or_assign("type", "git");
|
||||
attrs.insert_or_assign("url", url.find("://") != std::string::npos ? url : "file://" + url);
|
||||
if (ref) attrs.insert_or_assign("ref", *ref);
|
||||
if (rev) attrs.insert_or_assign("rev", rev->gitRev());
|
||||
if (fetchSubmodules) attrs.insert_or_assign("submodules", true);
|
||||
auto input = fetchers::inputFromAttrs(attrs);
|
||||
|
||||
// FIXME: use name?
|
||||
auto [tree, input2] = input->fetchTree(state.store);
|
||||
|
||||
state.mkAttrs(v, 8);
|
||||
mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, PathSet({gitInfo.storePath}));
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.shortRev);
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount);
|
||||
auto storePath = state.store->printStorePath(tree.storePath);
|
||||
mkString(*state.allocAttr(v, state.sOutPath), storePath, PathSet({storePath}));
|
||||
// Backward compatibility: set 'rev' to
|
||||
// 0000000000000000000000000000000000000000 for a dirty tree.
|
||||
auto rev2 = input2->getRev().value_or(Hash(htSHA1));
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), rev2.gitRev());
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), rev2.gitShortRev());
|
||||
// Backward compatibility: set 'revCount' to 0 for a dirty tree.
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")),
|
||||
tree.info.revCount.value_or(0));
|
||||
mkBool(*state.allocAttr(v, state.symbols.create("submodules")), fetchSubmodules);
|
||||
v.attrs->sort();
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(state.store->toRealPath(gitInfo.storePath));
|
||||
state.allowedPaths->insert(tree.actualPath);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r("fetchGit", 1, prim_fetchGit);
|
||||
|
|
|
@ -1,174 +1,18 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "download.hh"
|
||||
#include "store-api.hh"
|
||||
#include "pathlocks.hh"
|
||||
|
||||
#include <sys/time.h>
|
||||
#include "fetchers.hh"
|
||||
#include "url.hh"
|
||||
|
||||
#include <regex>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct HgInfo
|
||||
{
|
||||
Path storePath;
|
||||
std::string branch;
|
||||
std::string rev;
|
||||
uint64_t revCount = 0;
|
||||
};
|
||||
|
||||
std::regex commitHashRegex("^[0-9a-fA-F]{40}$");
|
||||
|
||||
HgInfo exportMercurial(ref<Store> store, const std::string & uri,
|
||||
std::string rev, const std::string & name)
|
||||
{
|
||||
if (evalSettings.pureEval && rev == "")
|
||||
throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision");
|
||||
|
||||
if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) {
|
||||
|
||||
bool clean = runProgram("hg", true, { "status", "-R", uri, "--modified", "--added", "--removed" }) == "";
|
||||
|
||||
if (!clean) {
|
||||
|
||||
/* This is an unclean working tree. So copy all tracked
|
||||
files. */
|
||||
|
||||
printTalkative("copying unclean Mercurial working tree '%s'", uri);
|
||||
|
||||
HgInfo hgInfo;
|
||||
hgInfo.rev = "0000000000000000000000000000000000000000";
|
||||
hgInfo.branch = chomp(runProgram("hg", true, { "branch", "-R", uri }));
|
||||
|
||||
auto files = tokenizeString<std::set<std::string>>(
|
||||
runProgram("hg", true, { "status", "-R", uri, "--clean", "--modified", "--added", "--no-status", "--print0" }), "\0"s);
|
||||
|
||||
PathFilter filter = [&](const Path & p) -> bool {
|
||||
assert(hasPrefix(p, uri));
|
||||
std::string file(p, uri.size() + 1);
|
||||
|
||||
auto st = lstat(p);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
auto prefix = file + "/";
|
||||
auto i = files.lower_bound(prefix);
|
||||
return i != files.end() && hasPrefix(*i, prefix);
|
||||
}
|
||||
|
||||
return files.count(file);
|
||||
};
|
||||
|
||||
hgInfo.storePath = store->printStorePath(store->addToStore("source", uri, true, htSHA256, filter));
|
||||
|
||||
return hgInfo;
|
||||
}
|
||||
}
|
||||
|
||||
if (rev == "") rev = "default";
|
||||
|
||||
Path cacheDir = fmt("%s/nix/hg/%s", getCacheDir(), hashString(htSHA256, uri).to_string(Base32, false));
|
||||
|
||||
Path stampFile = fmt("%s/.hg/%s.stamp", cacheDir, hashString(htSHA512, rev).to_string(Base32, false));
|
||||
|
||||
/* If we haven't pulled this repo less than ‘tarball-ttl’ seconds,
|
||||
do so now. */
|
||||
time_t now = time(0);
|
||||
struct stat st;
|
||||
if (stat(stampFile.c_str(), &st) != 0 ||
|
||||
(uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now)
|
||||
{
|
||||
/* Except that if this is a commit hash that we already have,
|
||||
we don't have to pull again. */
|
||||
if (!(std::regex_match(rev, commitHashRegex)
|
||||
&& pathExists(cacheDir)
|
||||
&& runProgram(
|
||||
RunOptions("hg", { "log", "-R", cacheDir, "-r", rev, "--template", "1" })
|
||||
.killStderr(true)).second == "1"))
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", uri));
|
||||
|
||||
if (pathExists(cacheDir)) {
|
||||
try {
|
||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri });
|
||||
}
|
||||
catch (ExecError & e) {
|
||||
string transJournal = cacheDir + "/.hg/store/journal";
|
||||
/* hg throws "abandoned transaction" error only if this file exists */
|
||||
if (pathExists(transJournal)) {
|
||||
runProgram("hg", true, { "recover", "-R", cacheDir });
|
||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri });
|
||||
} else {
|
||||
throw ExecError(e.status, fmt("'hg pull' %s", statusToString(e.status)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
createDirs(dirOf(cacheDir));
|
||||
runProgram("hg", true, { "clone", "--noupdate", "--", uri, cacheDir });
|
||||
}
|
||||
}
|
||||
|
||||
writeFile(stampFile, "");
|
||||
}
|
||||
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(
|
||||
runProgram("hg", true, { "log", "-R", cacheDir, "-r", rev, "--template", "{node} {rev} {branch}" }));
|
||||
assert(tokens.size() == 3);
|
||||
|
||||
HgInfo hgInfo;
|
||||
hgInfo.rev = tokens[0];
|
||||
hgInfo.revCount = std::stoull(tokens[1]);
|
||||
hgInfo.branch = tokens[2];
|
||||
|
||||
std::string storeLinkName = hashString(htSHA512, name + std::string("\0"s) + hgInfo.rev).to_string(Base32, false);
|
||||
Path storeLink = fmt("%s/.hg/%s.link", cacheDir, storeLinkName);
|
||||
|
||||
try {
|
||||
auto json = nlohmann::json::parse(readFile(storeLink));
|
||||
|
||||
assert(json["name"] == name && json["rev"] == hgInfo.rev);
|
||||
|
||||
hgInfo.storePath = json["storePath"];
|
||||
|
||||
if (store->isValidPath(store->parseStorePath(hgInfo.storePath))) {
|
||||
printTalkative("using cached Mercurial store path '%s'", hgInfo.storePath);
|
||||
return hgInfo;
|
||||
}
|
||||
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT) throw;
|
||||
}
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmpDir(tmpDir, true);
|
||||
|
||||
runProgram("hg", true, { "archive", "-R", cacheDir, "-r", rev, tmpDir });
|
||||
|
||||
deletePath(tmpDir + "/.hg_archival.txt");
|
||||
|
||||
hgInfo.storePath = store->printStorePath(store->addToStore(name, tmpDir));
|
||||
|
||||
nlohmann::json json;
|
||||
json["storePath"] = hgInfo.storePath;
|
||||
json["uri"] = uri;
|
||||
json["name"] = name;
|
||||
json["branch"] = hgInfo.branch;
|
||||
json["rev"] = hgInfo.rev;
|
||||
json["revCount"] = hgInfo.revCount;
|
||||
|
||||
writeFile(storeLink, json.dump());
|
||||
|
||||
return hgInfo;
|
||||
}
|
||||
|
||||
static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
std::string url;
|
||||
std::string rev;
|
||||
std::optional<Hash> rev;
|
||||
std::optional<std::string> ref;
|
||||
std::string name = "source";
|
||||
PathSet context;
|
||||
|
||||
|
@ -182,8 +26,15 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
|
|||
string n(attr.name);
|
||||
if (n == "url")
|
||||
url = state.coerceToString(*attr.pos, *attr.value, context, false, false);
|
||||
else if (n == "rev")
|
||||
rev = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "rev") {
|
||||
// Ugly: unlike fetchGit, here the "rev" attribute can
|
||||
// be both a revision or a branch/tag name.
|
||||
auto value = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
if (std::regex_match(value, revRegex))
|
||||
rev = Hash(value, htSHA1);
|
||||
else
|
||||
ref = value;
|
||||
}
|
||||
else if (n == "name")
|
||||
name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else
|
||||
|
@ -200,18 +51,35 @@ static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * ar
|
|||
// whitelist. Ah well.
|
||||
state.checkURI(url);
|
||||
|
||||
auto hgInfo = exportMercurial(state.store, url, rev, name);
|
||||
if (evalSettings.pureEval && !rev)
|
||||
throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision");
|
||||
|
||||
fetchers::Attrs attrs;
|
||||
attrs.insert_or_assign("type", "hg");
|
||||
attrs.insert_or_assign("url", url.find("://") != std::string::npos ? url : "file://" + url);
|
||||
if (ref) attrs.insert_or_assign("ref", *ref);
|
||||
if (rev) attrs.insert_or_assign("rev", rev->gitRev());
|
||||
auto input = fetchers::inputFromAttrs(attrs);
|
||||
|
||||
// FIXME: use name
|
||||
auto [tree, input2] = input->fetchTree(state.store);
|
||||
|
||||
state.mkAttrs(v, 8);
|
||||
mkString(*state.allocAttr(v, state.sOutPath), hgInfo.storePath, PathSet({hgInfo.storePath}));
|
||||
mkString(*state.allocAttr(v, state.symbols.create("branch")), hgInfo.branch);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), hgInfo.rev);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), std::string(hgInfo.rev, 0, 12));
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), hgInfo.revCount);
|
||||
auto storePath = state.store->printStorePath(tree.storePath);
|
||||
mkString(*state.allocAttr(v, state.sOutPath), storePath, PathSet({storePath}));
|
||||
if (input2->getRef())
|
||||
mkString(*state.allocAttr(v, state.symbols.create("branch")), *input2->getRef());
|
||||
// Backward compatibility: set 'rev' to
|
||||
// 0000000000000000000000000000000000000000 for a dirty tree.
|
||||
auto rev2 = input2->getRev().value_or(Hash(htSHA1));
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), rev2.gitRev());
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), std::string(rev2.gitRev(), 0, 12));
|
||||
if (tree.info.revCount)
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *tree.info.revCount);
|
||||
v.attrs->sort();
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(state.store->toRealPath(hgInfo.storePath));
|
||||
state.allowedPaths->insert(tree.actualPath);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial);
|
||||
|
|
165
src/libexpr/primops/fetchTree.cc
Normal file
165
src/libexpr/primops/fetchTree.cc
Normal file
|
@ -0,0 +1,165 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "store-api.hh"
|
||||
#include "fetchers.hh"
|
||||
#include "filetransfer.hh"
|
||||
|
||||
#include <ctime>
|
||||
#include <iomanip>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void emitTreeAttrs(
|
||||
EvalState & state,
|
||||
const fetchers::Tree & tree,
|
||||
std::shared_ptr<const fetchers::Input> input,
|
||||
Value & v)
|
||||
{
|
||||
state.mkAttrs(v, 8);
|
||||
|
||||
auto storePath = state.store->printStorePath(tree.storePath);
|
||||
|
||||
mkString(*state.allocAttr(v, state.sOutPath), storePath, PathSet({storePath}));
|
||||
|
||||
assert(tree.info.narHash);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("narHash")),
|
||||
tree.info.narHash.to_string(SRI));
|
||||
|
||||
if (input->getRev()) {
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), input->getRev()->gitRev());
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), input->getRev()->gitShortRev());
|
||||
}
|
||||
|
||||
if (tree.info.revCount)
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), *tree.info.revCount);
|
||||
|
||||
if (tree.info.lastModified)
|
||||
mkString(*state.allocAttr(v, state.symbols.create("lastModified")),
|
||||
fmt("%s", std::put_time(std::gmtime(&*tree.info.lastModified), "%Y%m%d%H%M%S")));
|
||||
|
||||
v.attrs->sort();
|
||||
}
|
||||
|
||||
static void prim_fetchTree(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
settings.requireExperimentalFeature("flakes");
|
||||
|
||||
std::shared_ptr<const fetchers::Input> input;
|
||||
PathSet context;
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
|
||||
if (args[0]->type == tAttrs) {
|
||||
state.forceAttrs(*args[0], pos);
|
||||
|
||||
fetchers::Attrs attrs;
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
state.forceValue(*attr.value);
|
||||
if (attr.value->type == tString)
|
||||
attrs.emplace(attr.name, attr.value->string.s);
|
||||
else if (attr.value->type == tBool)
|
||||
attrs.emplace(attr.name, attr.value->boolean);
|
||||
else
|
||||
throw TypeError("fetchTree argument '%s' is %s while a string or Boolean is expected",
|
||||
attr.name, showType(*attr.value));
|
||||
}
|
||||
|
||||
if (!attrs.count("type"))
|
||||
throw Error("attribute 'type' is missing in call to 'fetchTree', at %s", pos);
|
||||
|
||||
input = fetchers::inputFromAttrs(attrs);
|
||||
} else
|
||||
input = fetchers::inputFromURL(state.coerceToString(pos, *args[0], context, false, false));
|
||||
|
||||
if (evalSettings.pureEval && !input->isImmutable())
|
||||
throw Error("in pure evaluation mode, 'fetchTree' requires an immutable input");
|
||||
|
||||
// FIXME: use fetchOrSubstituteTree
|
||||
auto [tree, input2] = input->fetchTree(state.store);
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(tree.actualPath);
|
||||
|
||||
emitTreeAttrs(state, tree, input2, v);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r("fetchTree", 1, prim_fetchTree);
|
||||
|
||||
static void fetch(EvalState & state, const Pos & pos, Value * * args, Value & v,
|
||||
const string & who, bool unpack, std::string name)
|
||||
{
|
||||
std::optional<std::string> url;
|
||||
std::optional<Hash> expectedHash;
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
|
||||
if (args[0]->type == tAttrs) {
|
||||
|
||||
state.forceAttrs(*args[0], pos);
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
string n(attr.name);
|
||||
if (n == "url")
|
||||
url = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "sha256")
|
||||
expectedHash = Hash(state.forceStringNoCtx(*attr.value, *attr.pos), htSHA256);
|
||||
else if (n == "name")
|
||||
name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else
|
||||
throw EvalError("unsupported argument '%s' to '%s', at %s",
|
||||
attr.name, who, *attr.pos);
|
||||
}
|
||||
|
||||
if (!url)
|
||||
throw EvalError("'url' argument required, at %s", pos);
|
||||
|
||||
} else
|
||||
url = state.forceStringNoCtx(*args[0], pos);
|
||||
|
||||
url = resolveUri(*url);
|
||||
|
||||
state.checkURI(*url);
|
||||
|
||||
if (name == "")
|
||||
name = baseNameOf(*url);
|
||||
|
||||
if (evalSettings.pureEval && !expectedHash)
|
||||
throw Error("in pure evaluation mode, '%s' requires a 'sha256' argument", who);
|
||||
|
||||
auto storePath =
|
||||
unpack
|
||||
? fetchers::downloadTarball(state.store, *url, name, (bool) expectedHash).storePath
|
||||
: fetchers::downloadFile(state.store, *url, name, (bool) expectedHash).storePath;
|
||||
|
||||
auto path = state.store->toRealPath(storePath);
|
||||
|
||||
if (expectedHash) {
|
||||
auto hash = unpack
|
||||
? state.store->queryPathInfo(storePath)->narHash
|
||||
: hashFile(htSHA256, path);
|
||||
if (hash != *expectedHash)
|
||||
throw Error((unsigned int) 102, "hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s",
|
||||
*url, expectedHash->to_string(), hash.to_string());
|
||||
}
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(path);
|
||||
|
||||
mkString(v, path, PathSet({path}));
|
||||
}
|
||||
|
||||
static void prim_fetchurl(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
fetch(state, pos, args, v, "fetchurl", false, "");
|
||||
}
|
||||
|
||||
static void prim_fetchTarball(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
fetch(state, pos, args, v, "fetchTarball", true, "source");
|
||||
}
|
||||
|
||||
static RegisterPrimOp r2("__fetchurl", 1, prim_fetchurl);
|
||||
static RegisterPrimOp r3("fetchTarball", 1, prim_fetchTarball);
|
||||
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
|
||||
#include "cpptoml/cpptoml.h"
|
||||
#include "../../cpptoml/cpptoml.h"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
|
|
@ -253,12 +253,17 @@ void mkPath(Value & v, const char * s);
|
|||
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
|
||||
typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
|
||||
typedef std::vector<Value *, traceable_allocator<Value *> > ValueVector;
|
||||
typedef std::map<Symbol, Value *, std::less<Symbol>, traceable_allocator<std::pair<const Symbol, Value *> > > ValueMap;
|
||||
#else
|
||||
typedef std::vector<Value *> ValueVector;
|
||||
typedef std::map<Symbol, Value *> ValueMap;
|
||||
#endif
|
||||
|
||||
|
||||
/* A value allocated in traceable memory. */
|
||||
typedef std::shared_ptr<Value *> RootValue;
|
||||
|
||||
RootValue allocRootValue(Value * v);
|
||||
|
||||
}
|
||||
|
|
107
src/libfetchers/attrs.cc
Normal file
107
src/libfetchers/attrs.cc
Normal file
|
@ -0,0 +1,107 @@
|
|||
#include "attrs.hh"
|
||||
#include "fetchers.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
Attrs jsonToAttrs(const nlohmann::json & json)
|
||||
{
|
||||
Attrs attrs;
|
||||
|
||||
for (auto & i : json.items()) {
|
||||
if (i.value().is_number())
|
||||
attrs.emplace(i.key(), i.value().get<int64_t>());
|
||||
else if (i.value().is_string())
|
||||
attrs.emplace(i.key(), i.value().get<std::string>());
|
||||
else if (i.value().is_boolean())
|
||||
attrs.emplace(i.key(), i.value().get<bool>());
|
||||
else
|
||||
throw Error("unsupported input attribute type in lock file");
|
||||
}
|
||||
|
||||
return attrs;
|
||||
}
|
||||
|
||||
nlohmann::json attrsToJson(const Attrs & attrs)
|
||||
{
|
||||
nlohmann::json json;
|
||||
for (auto & attr : attrs) {
|
||||
if (auto v = std::get_if<int64_t>(&attr.second)) {
|
||||
json[attr.first] = *v;
|
||||
} else if (auto v = std::get_if<std::string>(&attr.second)) {
|
||||
json[attr.first] = *v;
|
||||
} else if (auto v = std::get_if<Explicit<bool>>(&attr.second)) {
|
||||
json[attr.first] = v->t;
|
||||
} else abort();
|
||||
}
|
||||
return json;
|
||||
}
|
||||
|
||||
std::optional<std::string> maybeGetStrAttr(const Attrs & attrs, const std::string & name)
|
||||
{
|
||||
auto i = attrs.find(name);
|
||||
if (i == attrs.end()) return {};
|
||||
if (auto v = std::get_if<std::string>(&i->second))
|
||||
return *v;
|
||||
throw Error("input attribute '%s' is not a string %s", name, attrsToJson(attrs).dump());
|
||||
}
|
||||
|
||||
std::string getStrAttr(const Attrs & attrs, const std::string & name)
|
||||
{
|
||||
auto s = maybeGetStrAttr(attrs, name);
|
||||
if (!s)
|
||||
throw Error("input attribute '%s' is missing", name);
|
||||
return *s;
|
||||
}
|
||||
|
||||
std::optional<int64_t> maybeGetIntAttr(const Attrs & attrs, const std::string & name)
|
||||
{
|
||||
auto i = attrs.find(name);
|
||||
if (i == attrs.end()) return {};
|
||||
if (auto v = std::get_if<int64_t>(&i->second))
|
||||
return *v;
|
||||
throw Error("input attribute '%s' is not an integer", name);
|
||||
}
|
||||
|
||||
int64_t getIntAttr(const Attrs & attrs, const std::string & name)
|
||||
{
|
||||
auto s = maybeGetIntAttr(attrs, name);
|
||||
if (!s)
|
||||
throw Error("input attribute '%s' is missing", name);
|
||||
return *s;
|
||||
}
|
||||
|
||||
std::optional<bool> maybeGetBoolAttr(const Attrs & attrs, const std::string & name)
|
||||
{
|
||||
auto i = attrs.find(name);
|
||||
if (i == attrs.end()) return {};
|
||||
if (auto v = std::get_if<int64_t>(&i->second))
|
||||
return *v;
|
||||
throw Error("input attribute '%s' is not a Boolean", name);
|
||||
}
|
||||
|
||||
bool getBoolAttr(const Attrs & attrs, const std::string & name)
|
||||
{
|
||||
auto s = maybeGetBoolAttr(attrs, name);
|
||||
if (!s)
|
||||
throw Error("input attribute '%s' is missing", name);
|
||||
return *s;
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> attrsToQuery(const Attrs & attrs)
|
||||
{
|
||||
std::map<std::string, std::string> query;
|
||||
for (auto & attr : attrs) {
|
||||
if (auto v = std::get_if<int64_t>(&attr.second)) {
|
||||
query.insert_or_assign(attr.first, fmt("%d", *v));
|
||||
} else if (auto v = std::get_if<std::string>(&attr.second)) {
|
||||
query.insert_or_assign(attr.first, *v);
|
||||
} else if (auto v = std::get_if<Explicit<bool>>(&attr.second)) {
|
||||
query.insert_or_assign(attr.first, v->t ? "1" : "0");
|
||||
} else abort();
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
}
|
39
src/libfetchers/attrs.hh
Normal file
39
src/libfetchers/attrs.hh
Normal file
|
@ -0,0 +1,39 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
#include <variant>
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
/* Wrap bools to prevent string literals (i.e. 'char *') from being
|
||||
cast to a bool in Attr. */
|
||||
template<typename T>
|
||||
struct Explicit {
|
||||
T t;
|
||||
};
|
||||
|
||||
typedef std::variant<std::string, int64_t, Explicit<bool>> Attr;
|
||||
typedef std::map<std::string, Attr> Attrs;
|
||||
|
||||
Attrs jsonToAttrs(const nlohmann::json & json);
|
||||
|
||||
nlohmann::json attrsToJson(const Attrs & attrs);
|
||||
|
||||
std::optional<std::string> maybeGetStrAttr(const Attrs & attrs, const std::string & name);
|
||||
|
||||
std::string getStrAttr(const Attrs & attrs, const std::string & name);
|
||||
|
||||
std::optional<int64_t> maybeGetIntAttr(const Attrs & attrs, const std::string & name);
|
||||
|
||||
int64_t getIntAttr(const Attrs & attrs, const std::string & name);
|
||||
|
||||
std::optional<bool> maybeGetBoolAttr(const Attrs & attrs, const std::string & name);
|
||||
|
||||
bool getBoolAttr(const Attrs & attrs, const std::string & name);
|
||||
|
||||
std::map<std::string, std::string> attrsToQuery(const Attrs & attrs);
|
||||
|
||||
}
|
121
src/libfetchers/cache.cc
Normal file
121
src/libfetchers/cache.cc
Normal file
|
@ -0,0 +1,121 @@
|
|||
#include "cache.hh"
|
||||
#include "sqlite.hh"
|
||||
#include "sync.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
static const char * schema = R"sql(
|
||||
|
||||
create table if not exists Cache (
|
||||
input text not null,
|
||||
info text not null,
|
||||
path text not null,
|
||||
immutable integer not null,
|
||||
timestamp integer not null,
|
||||
primary key (input)
|
||||
);
|
||||
)sql";
|
||||
|
||||
struct CacheImpl : Cache
|
||||
{
|
||||
struct State
|
||||
{
|
||||
SQLite db;
|
||||
SQLiteStmt add, lookup;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
CacheImpl()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
auto dbPath = getCacheDir() + "/nix/fetcher-cache-v1.sqlite";
|
||||
createDirs(dirOf(dbPath));
|
||||
|
||||
state->db = SQLite(dbPath);
|
||||
state->db.isCache();
|
||||
state->db.exec(schema);
|
||||
|
||||
state->add.create(state->db,
|
||||
"insert or replace into Cache(input, info, path, immutable, timestamp) values (?, ?, ?, ?, ?)");
|
||||
|
||||
state->lookup.create(state->db,
|
||||
"select info, path, immutable, timestamp from Cache where input = ?");
|
||||
}
|
||||
|
||||
void add(
|
||||
ref<Store> store,
|
||||
const Attrs & inAttrs,
|
||||
const Attrs & infoAttrs,
|
||||
const StorePath & storePath,
|
||||
bool immutable) override
|
||||
{
|
||||
_state.lock()->add.use()
|
||||
(attrsToJson(inAttrs).dump())
|
||||
(attrsToJson(infoAttrs).dump())
|
||||
(store->printStorePath(storePath))
|
||||
(immutable)
|
||||
(time(0)).exec();
|
||||
}
|
||||
|
||||
std::optional<std::pair<Attrs, StorePath>> lookup(
|
||||
ref<Store> store,
|
||||
const Attrs & inAttrs) override
|
||||
{
|
||||
if (auto res = lookupExpired(store, inAttrs)) {
|
||||
if (!res->expired)
|
||||
return std::make_pair(std::move(res->infoAttrs), std::move(res->storePath));
|
||||
debug("ignoring expired cache entry '%s'",
|
||||
attrsToJson(inAttrs).dump());
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<Result> lookupExpired(
|
||||
ref<Store> store,
|
||||
const Attrs & inAttrs) override
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
auto inAttrsJson = attrsToJson(inAttrs).dump();
|
||||
|
||||
auto stmt(state->lookup.use()(inAttrsJson));
|
||||
if (!stmt.next()) {
|
||||
debug("did not find cache entry for '%s'", inAttrsJson);
|
||||
return {};
|
||||
}
|
||||
|
||||
auto infoJson = stmt.getStr(0);
|
||||
auto storePath = store->parseStorePath(stmt.getStr(1));
|
||||
auto immutable = stmt.getInt(2) != 0;
|
||||
auto timestamp = stmt.getInt(3);
|
||||
|
||||
store->addTempRoot(storePath);
|
||||
if (!store->isValidPath(storePath)) {
|
||||
// FIXME: we could try to substitute 'storePath'.
|
||||
debug("ignoring disappeared cache entry '%s'", inAttrsJson);
|
||||
return {};
|
||||
}
|
||||
|
||||
debug("using cache entry '%s' -> '%s', '%s'",
|
||||
inAttrsJson, infoJson, store->printStorePath(storePath));
|
||||
|
||||
return Result {
|
||||
.expired = !immutable && (settings.tarballTtl.get() == 0 || timestamp + settings.tarballTtl < time(0)),
|
||||
.infoAttrs = jsonToAttrs(nlohmann::json::parse(infoJson)),
|
||||
.storePath = std::move(storePath)
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
ref<Cache> getCache()
|
||||
{
|
||||
static auto cache = std::make_shared<CacheImpl>();
|
||||
return ref<Cache>(cache);
|
||||
}
|
||||
|
||||
}
|
34
src/libfetchers/cache.hh
Normal file
34
src/libfetchers/cache.hh
Normal file
|
@ -0,0 +1,34 @@
|
|||
#pragma once
|
||||
|
||||
#include "fetchers.hh"
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct Cache
|
||||
{
|
||||
virtual void add(
|
||||
ref<Store> store,
|
||||
const Attrs & inAttrs,
|
||||
const Attrs & infoAttrs,
|
||||
const StorePath & storePath,
|
||||
bool immutable) = 0;
|
||||
|
||||
virtual std::optional<std::pair<Attrs, StorePath>> lookup(
|
||||
ref<Store> store,
|
||||
const Attrs & inAttrs) = 0;
|
||||
|
||||
struct Result
|
||||
{
|
||||
bool expired = false;
|
||||
Attrs infoAttrs;
|
||||
StorePath storePath;
|
||||
};
|
||||
|
||||
virtual std::optional<Result> lookupExpired(
|
||||
ref<Store> store,
|
||||
const Attrs & inAttrs) = 0;
|
||||
};
|
||||
|
||||
ref<Cache> getCache();
|
||||
|
||||
}
|
75
src/libfetchers/fetchers.cc
Normal file
75
src/libfetchers/fetchers.cc
Normal file
|
@ -0,0 +1,75 @@
|
|||
#include "fetchers.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
std::unique_ptr<std::vector<std::unique_ptr<InputScheme>>> inputSchemes = nullptr;
|
||||
|
||||
void registerInputScheme(std::unique_ptr<InputScheme> && inputScheme)
|
||||
{
|
||||
if (!inputSchemes) inputSchemes = std::make_unique<std::vector<std::unique_ptr<InputScheme>>>();
|
||||
inputSchemes->push_back(std::move(inputScheme));
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url)
|
||||
{
|
||||
for (auto & inputScheme : *inputSchemes) {
|
||||
auto res = inputScheme->inputFromURL(url);
|
||||
if (res) return res;
|
||||
}
|
||||
throw Error("input '%s' is unsupported", url.url);
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromURL(const std::string & url)
|
||||
{
|
||||
return inputFromURL(parseURL(url));
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs)
|
||||
{
|
||||
auto attrs2(attrs);
|
||||
attrs2.erase("narHash");
|
||||
for (auto & inputScheme : *inputSchemes) {
|
||||
auto res = inputScheme->inputFromAttrs(attrs2);
|
||||
if (res) {
|
||||
if (auto narHash = maybeGetStrAttr(attrs, "narHash"))
|
||||
// FIXME: require SRI hash.
|
||||
res->narHash = Hash(*narHash);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
throw Error("input '%s' is unsupported", attrsToJson(attrs));
|
||||
}
|
||||
|
||||
Attrs Input::toAttrs() const
|
||||
{
|
||||
auto attrs = toAttrsInternal();
|
||||
if (narHash)
|
||||
attrs.emplace("narHash", narHash->to_string(SRI));
|
||||
attrs.emplace("type", type());
|
||||
return attrs;
|
||||
}
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> Input::fetchTree(ref<Store> store) const
|
||||
{
|
||||
auto [tree, input] = fetchTreeInternal(store);
|
||||
|
||||
if (tree.actualPath == "")
|
||||
tree.actualPath = store->toRealPath(tree.storePath);
|
||||
|
||||
if (!tree.info.narHash)
|
||||
tree.info.narHash = store->queryPathInfo(tree.storePath)->narHash;
|
||||
|
||||
if (input->narHash)
|
||||
assert(input->narHash == tree.info.narHash);
|
||||
|
||||
if (narHash && narHash != input->narHash)
|
||||
throw Error("NAR hash mismatch in input '%s' (%s), expected '%s', got '%s'",
|
||||
to_string(), tree.actualPath, narHash->to_string(SRI), input->narHash->to_string(SRI));
|
||||
|
||||
return {std::move(tree), input};
|
||||
}
|
||||
|
||||
}
|
103
src/libfetchers/fetchers.hh
Normal file
103
src/libfetchers/fetchers.hh
Normal file
|
@ -0,0 +1,103 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "path.hh"
|
||||
#include "tree-info.hh"
|
||||
#include "attrs.hh"
|
||||
#include "url.hh"
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace nix { class Store; }
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct Input;
|
||||
|
||||
struct Tree
|
||||
{
|
||||
Path actualPath;
|
||||
StorePath storePath;
|
||||
TreeInfo info;
|
||||
};
|
||||
|
||||
struct Input : std::enable_shared_from_this<Input>
|
||||
{
|
||||
std::optional<Hash> narHash; // FIXME: implement
|
||||
|
||||
virtual std::string type() const = 0;
|
||||
|
||||
virtual ~Input() { }
|
||||
|
||||
virtual bool operator ==(const Input & other) const { return false; }
|
||||
|
||||
/* Check whether this is a "direct" input, that is, not
|
||||
one that goes through a registry. */
|
||||
virtual bool isDirect() const { return true; }
|
||||
|
||||
/* Check whether this is an "immutable" input, that is,
|
||||
one that contains a commit hash or content hash. */
|
||||
virtual bool isImmutable() const { return (bool) narHash; }
|
||||
|
||||
virtual bool contains(const Input & other) const { return false; }
|
||||
|
||||
virtual std::optional<std::string> getRef() const { return {}; }
|
||||
|
||||
virtual std::optional<Hash> getRev() const { return {}; }
|
||||
|
||||
virtual ParsedURL toURL() const = 0;
|
||||
|
||||
std::string to_string() const
|
||||
{
|
||||
return toURL().to_string();
|
||||
}
|
||||
|
||||
Attrs toAttrs() const;
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> fetchTree(ref<Store> store) const;
|
||||
|
||||
private:
|
||||
|
||||
virtual std::pair<Tree, std::shared_ptr<const Input>> fetchTreeInternal(ref<Store> store) const = 0;
|
||||
|
||||
virtual Attrs toAttrsInternal() const = 0;
|
||||
};
|
||||
|
||||
struct InputScheme
|
||||
{
|
||||
virtual ~InputScheme() { }
|
||||
|
||||
virtual std::unique_ptr<Input> inputFromURL(const ParsedURL & url) = 0;
|
||||
|
||||
virtual std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs) = 0;
|
||||
};
|
||||
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url);
|
||||
|
||||
std::unique_ptr<Input> inputFromURL(const std::string & url);
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs);
|
||||
|
||||
void registerInputScheme(std::unique_ptr<InputScheme> && fetcher);
|
||||
|
||||
struct DownloadFileResult
|
||||
{
|
||||
StorePath storePath;
|
||||
std::string etag;
|
||||
std::string effectiveUrl;
|
||||
};
|
||||
|
||||
DownloadFileResult downloadFile(
|
||||
ref<Store> store,
|
||||
const std::string & url,
|
||||
const std::string & name,
|
||||
bool immutable);
|
||||
|
||||
Tree downloadTarball(
|
||||
ref<Store> store,
|
||||
const std::string & url,
|
||||
const std::string & name,
|
||||
bool immutable);
|
||||
|
||||
}
|
441
src/libfetchers/git.cc
Normal file
441
src/libfetchers/git.cc
Normal file
|
@ -0,0 +1,441 @@
|
|||
#include "fetchers.hh"
|
||||
#include "cache.hh"
|
||||
#include "globals.hh"
|
||||
#include "tarfile.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
static std::string readHead(const Path & path)
|
||||
{
|
||||
return chomp(runProgram("git", true, { "-C", path, "rev-parse", "--abbrev-ref", "HEAD" }));
|
||||
}
|
||||
|
||||
static bool isNotDotGitDirectory(const Path & path)
|
||||
{
|
||||
static const std::regex gitDirRegex("^(?:.*/)?\\.git$");
|
||||
|
||||
return not std::regex_match(path, gitDirRegex);
|
||||
}
|
||||
|
||||
struct GitInput : Input
|
||||
{
|
||||
ParsedURL url;
|
||||
std::optional<std::string> ref;
|
||||
std::optional<Hash> rev;
|
||||
bool shallow = false;
|
||||
bool submodules = false;
|
||||
|
||||
GitInput(const ParsedURL & url) : url(url)
|
||||
{ }
|
||||
|
||||
std::string type() const override { return "git"; }
|
||||
|
||||
bool operator ==(const Input & other) const override
|
||||
{
|
||||
auto other2 = dynamic_cast<const GitInput *>(&other);
|
||||
return
|
||||
other2
|
||||
&& url == other2->url
|
||||
&& rev == other2->rev
|
||||
&& ref == other2->ref;
|
||||
}
|
||||
|
||||
bool isImmutable() const override
|
||||
{
|
||||
return (bool) rev || narHash;
|
||||
}
|
||||
|
||||
std::optional<std::string> getRef() const override { return ref; }
|
||||
|
||||
std::optional<Hash> getRev() const override { return rev; }
|
||||
|
||||
ParsedURL toURL() const override
|
||||
{
|
||||
ParsedURL url2(url);
|
||||
if (url2.scheme != "git") url2.scheme = "git+" + url2.scheme;
|
||||
if (rev) url2.query.insert_or_assign("rev", rev->gitRev());
|
||||
if (ref) url2.query.insert_or_assign("ref", *ref);
|
||||
if (shallow) url2.query.insert_or_assign("shallow", "1");
|
||||
return url2;
|
||||
}
|
||||
|
||||
Attrs toAttrsInternal() const override
|
||||
{
|
||||
Attrs attrs;
|
||||
attrs.emplace("url", url.to_string());
|
||||
if (ref)
|
||||
attrs.emplace("ref", *ref);
|
||||
if (rev)
|
||||
attrs.emplace("rev", rev->gitRev());
|
||||
if (shallow)
|
||||
attrs.emplace("shallow", true);
|
||||
if (submodules)
|
||||
attrs.emplace("submodules", true);
|
||||
return attrs;
|
||||
}
|
||||
|
||||
std::pair<bool, std::string> getActualUrl() const
|
||||
{
|
||||
// Don't clone file:// URIs (but otherwise treat them the
|
||||
// same as remote URIs, i.e. don't use the working tree or
|
||||
// HEAD).
|
||||
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing
|
||||
bool isLocal = url.scheme == "file" && !forceHttp;
|
||||
return {isLocal, isLocal ? url.path : url.base};
|
||||
}
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> fetchTreeInternal(nix::ref<Store> store) const override
|
||||
{
|
||||
auto name = "source";
|
||||
|
||||
auto input = std::make_shared<GitInput>(*this);
|
||||
|
||||
assert(!rev || rev->type == htSHA1);
|
||||
|
||||
std::string cacheType = "git";
|
||||
if (shallow) cacheType += "-shallow";
|
||||
if (submodules) cacheType += "-submodules";
|
||||
|
||||
auto getImmutableAttrs = [&]()
|
||||
{
|
||||
return Attrs({
|
||||
{"type", cacheType},
|
||||
{"name", name},
|
||||
{"rev", input->rev->gitRev()},
|
||||
});
|
||||
};
|
||||
|
||||
auto makeResult = [&](const Attrs & infoAttrs, StorePath && storePath)
|
||||
-> std::pair<Tree, std::shared_ptr<const Input>>
|
||||
{
|
||||
assert(input->rev);
|
||||
assert(!rev || rev == input->rev);
|
||||
return {
|
||||
Tree {
|
||||
.actualPath = store->toRealPath(storePath),
|
||||
.storePath = std::move(storePath),
|
||||
.info = TreeInfo {
|
||||
.revCount = shallow ? std::nullopt : std::optional(getIntAttr(infoAttrs, "revCount")),
|
||||
.lastModified = getIntAttr(infoAttrs, "lastModified"),
|
||||
},
|
||||
},
|
||||
input
|
||||
};
|
||||
};
|
||||
|
||||
if (rev) {
|
||||
if (auto res = getCache()->lookup(store, getImmutableAttrs()))
|
||||
return makeResult(res->first, std::move(res->second));
|
||||
}
|
||||
|
||||
auto [isLocal, actualUrl_] = getActualUrl();
|
||||
auto actualUrl = actualUrl_; // work around clang bug
|
||||
|
||||
// If this is a local directory and no ref or revision is
|
||||
// given, then allow the use of an unclean working tree.
|
||||
if (!input->ref && !input->rev && isLocal) {
|
||||
bool clean = false;
|
||||
|
||||
/* Check whether this repo has any commits. There are
|
||||
probably better ways to do this. */
|
||||
auto gitDir = actualUrl + "/.git";
|
||||
auto commonGitDir = chomp(runProgram(
|
||||
"git",
|
||||
true,
|
||||
{ "-C", actualUrl, "rev-parse", "--git-common-dir" }
|
||||
));
|
||||
if (commonGitDir != ".git")
|
||||
gitDir = commonGitDir;
|
||||
|
||||
bool haveCommits = !readDirectory(gitDir + "/refs/heads").empty();
|
||||
|
||||
try {
|
||||
if (haveCommits) {
|
||||
runProgram("git", true, { "-C", actualUrl, "diff-index", "--quiet", "HEAD", "--" });
|
||||
clean = true;
|
||||
}
|
||||
} catch (ExecError & e) {
|
||||
if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw;
|
||||
}
|
||||
|
||||
if (!clean) {
|
||||
|
||||
/* This is an unclean working tree. So copy all tracked files. */
|
||||
|
||||
if (!settings.allowDirty)
|
||||
throw Error("Git tree '%s' is dirty", actualUrl);
|
||||
|
||||
if (settings.warnDirty)
|
||||
warn("Git tree '%s' is dirty", actualUrl);
|
||||
|
||||
auto gitOpts = Strings({ "-C", actualUrl, "ls-files", "-z" });
|
||||
if (submodules)
|
||||
gitOpts.emplace_back("--recurse-submodules");
|
||||
|
||||
auto files = tokenizeString<std::set<std::string>>(
|
||||
runProgram("git", true, gitOpts), "\0"s);
|
||||
|
||||
PathFilter filter = [&](const Path & p) -> bool {
|
||||
assert(hasPrefix(p, actualUrl));
|
||||
std::string file(p, actualUrl.size() + 1);
|
||||
|
||||
auto st = lstat(p);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
auto prefix = file + "/";
|
||||
auto i = files.lower_bound(prefix);
|
||||
return i != files.end() && hasPrefix(*i, prefix);
|
||||
}
|
||||
|
||||
return files.count(file);
|
||||
};
|
||||
|
||||
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
auto tree = Tree {
|
||||
.actualPath = store->printStorePath(storePath),
|
||||
.storePath = std::move(storePath),
|
||||
.info = TreeInfo {
|
||||
// FIXME: maybe we should use the timestamp of the last
|
||||
// modified dirty file?
|
||||
.lastModified = haveCommits ? std::stoull(runProgram("git", true, { "-C", actualUrl, "log", "-1", "--format=%ct", "HEAD" })) : 0,
|
||||
}
|
||||
};
|
||||
|
||||
return {std::move(tree), input};
|
||||
}
|
||||
}
|
||||
|
||||
if (!input->ref) input->ref = isLocal ? readHead(actualUrl) : "master";
|
||||
|
||||
Attrs mutableAttrs({
|
||||
{"type", cacheType},
|
||||
{"name", name},
|
||||
{"url", actualUrl},
|
||||
{"ref", *input->ref},
|
||||
});
|
||||
|
||||
Path repoDir;
|
||||
|
||||
if (isLocal) {
|
||||
|
||||
if (!input->rev)
|
||||
input->rev = Hash(chomp(runProgram("git", true, { "-C", actualUrl, "rev-parse", *input->ref })), htSHA1);
|
||||
|
||||
repoDir = actualUrl;
|
||||
|
||||
} else {
|
||||
|
||||
if (auto res = getCache()->lookup(store, mutableAttrs)) {
|
||||
auto rev2 = Hash(getStrAttr(res->first, "rev"), htSHA1);
|
||||
if (!rev || rev == rev2) {
|
||||
input->rev = rev2;
|
||||
return makeResult(res->first, std::move(res->second));
|
||||
}
|
||||
}
|
||||
|
||||
Path cacheDir = getCacheDir() + "/nix/gitv3/" + hashString(htSHA256, actualUrl).to_string(Base32, false);
|
||||
repoDir = cacheDir;
|
||||
|
||||
if (!pathExists(cacheDir)) {
|
||||
createDirs(dirOf(cacheDir));
|
||||
runProgram("git", true, { "init", "--bare", repoDir });
|
||||
}
|
||||
|
||||
Path localRefFile =
|
||||
input->ref->compare(0, 5, "refs/") == 0
|
||||
? cacheDir + "/" + *input->ref
|
||||
: cacheDir + "/refs/heads/" + *input->ref;
|
||||
|
||||
bool doFetch;
|
||||
time_t now = time(0);
|
||||
|
||||
/* If a rev was specified, we need to fetch if it's not in the
|
||||
repo. */
|
||||
if (input->rev) {
|
||||
try {
|
||||
runProgram("git", true, { "-C", repoDir, "cat-file", "-e", input->rev->gitRev() });
|
||||
doFetch = false;
|
||||
} catch (ExecError & e) {
|
||||
if (WIFEXITED(e.status)) {
|
||||
doFetch = true;
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If the local ref is older than ‘tarball-ttl’ seconds, do a
|
||||
git fetch to update the local ref to the remote ref. */
|
||||
struct stat st;
|
||||
doFetch = stat(localRefFile.c_str(), &st) != 0 ||
|
||||
(uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now;
|
||||
}
|
||||
|
||||
if (doFetch) {
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", actualUrl));
|
||||
|
||||
// FIXME: git stderr messes up our progress indicator, so
|
||||
// we're using --quiet for now. Should process its stderr.
|
||||
try {
|
||||
auto fetchRef = input->ref->compare(0, 5, "refs/") == 0
|
||||
? *input->ref
|
||||
: "refs/heads/" + *input->ref;
|
||||
runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
|
||||
} catch (Error & e) {
|
||||
if (!pathExists(localRefFile)) throw;
|
||||
warn("could not update local clone of Git repository '%s'; continuing with the most recent version", actualUrl);
|
||||
}
|
||||
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = now;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = now;
|
||||
times[1].tv_usec = 0;
|
||||
|
||||
utimes(localRefFile.c_str(), times);
|
||||
}
|
||||
|
||||
if (!input->rev)
|
||||
input->rev = Hash(chomp(readFile(localRefFile)), htSHA1);
|
||||
}
|
||||
|
||||
bool isShallow = chomp(runProgram("git", true, { "-C", repoDir, "rev-parse", "--is-shallow-repository" })) == "true";
|
||||
|
||||
if (isShallow && !shallow)
|
||||
throw Error("'%s' is a shallow Git repository, but a non-shallow repository is needed", actualUrl);
|
||||
|
||||
// FIXME: check whether rev is an ancestor of ref.
|
||||
|
||||
printTalkative("using revision %s of repo '%s'", input->rev->gitRev(), actualUrl);
|
||||
|
||||
/* Now that we know the ref, check again whether we have it in
|
||||
the store. */
|
||||
if (auto res = getCache()->lookup(store, getImmutableAttrs()))
|
||||
return makeResult(res->first, std::move(res->second));
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmpDir(tmpDir, true);
|
||||
PathFilter filter = defaultPathFilter;
|
||||
|
||||
if (submodules) {
|
||||
Path tmpGitDir = createTempDir();
|
||||
AutoDelete delTmpGitDir(tmpGitDir, true);
|
||||
|
||||
runProgram("git", true, { "init", tmpDir, "--separate-git-dir", tmpGitDir });
|
||||
// TODO: repoDir might lack the ref (it only checks if rev
|
||||
// exists, see FIXME above) so use a big hammer and fetch
|
||||
// everything to ensure we get the rev.
|
||||
runProgram("git", true, { "-C", tmpDir, "fetch", "--quiet", "--force",
|
||||
"--update-head-ok", "--", repoDir, "refs/*:refs/*" });
|
||||
|
||||
runProgram("git", true, { "-C", tmpDir, "checkout", "--quiet", input->rev->gitRev() });
|
||||
runProgram("git", true, { "-C", tmpDir, "remote", "add", "origin", actualUrl });
|
||||
runProgram("git", true, { "-C", tmpDir, "submodule", "--quiet", "update", "--init", "--recursive" });
|
||||
|
||||
filter = isNotDotGitDirectory;
|
||||
} else {
|
||||
// FIXME: should pipe this, or find some better way to extract a
|
||||
// revision.
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
RunOptions gitOptions("git", { "-C", repoDir, "archive", input->rev->gitRev() });
|
||||
gitOptions.standardOut = &sink;
|
||||
runProgram2(gitOptions);
|
||||
});
|
||||
|
||||
unpackTarfile(*source, tmpDir);
|
||||
}
|
||||
|
||||
auto storePath = store->addToStore(name, tmpDir, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
auto lastModified = std::stoull(runProgram("git", true, { "-C", repoDir, "log", "-1", "--format=%ct", input->rev->gitRev() }));
|
||||
|
||||
Attrs infoAttrs({
|
||||
{"rev", input->rev->gitRev()},
|
||||
{"lastModified", lastModified},
|
||||
});
|
||||
|
||||
if (!shallow)
|
||||
infoAttrs.insert_or_assign("revCount",
|
||||
std::stoull(runProgram("git", true, { "-C", repoDir, "rev-list", "--count", input->rev->gitRev() })));
|
||||
|
||||
if (!this->rev)
|
||||
getCache()->add(
|
||||
store,
|
||||
mutableAttrs,
|
||||
infoAttrs,
|
||||
storePath,
|
||||
false);
|
||||
|
||||
getCache()->add(
|
||||
store,
|
||||
getImmutableAttrs(),
|
||||
infoAttrs,
|
||||
storePath,
|
||||
true);
|
||||
|
||||
return makeResult(infoAttrs, std::move(storePath));
|
||||
}
|
||||
};
|
||||
|
||||
struct GitInputScheme : InputScheme
|
||||
{
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url) override
|
||||
{
|
||||
if (url.scheme != "git" &&
|
||||
url.scheme != "git+http" &&
|
||||
url.scheme != "git+https" &&
|
||||
url.scheme != "git+ssh" &&
|
||||
url.scheme != "git+file") return nullptr;
|
||||
|
||||
auto url2(url);
|
||||
if (hasPrefix(url2.scheme, "git+")) url2.scheme = std::string(url2.scheme, 4);
|
||||
url2.query.clear();
|
||||
|
||||
Attrs attrs;
|
||||
attrs.emplace("type", "git");
|
||||
|
||||
for (auto &[name, value] : url.query) {
|
||||
if (name == "rev" || name == "ref")
|
||||
attrs.emplace(name, value);
|
||||
else
|
||||
url2.query.emplace(name, value);
|
||||
}
|
||||
|
||||
attrs.emplace("url", url2.to_string());
|
||||
|
||||
return inputFromAttrs(attrs);
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs) override
|
||||
{
|
||||
if (maybeGetStrAttr(attrs, "type") != "git") return {};
|
||||
|
||||
for (auto & [name, value] : attrs)
|
||||
if (name != "type" && name != "url" && name != "ref" && name != "rev" && name != "shallow" && name != "submodules")
|
||||
throw Error("unsupported Git input attribute '%s'", name);
|
||||
|
||||
auto input = std::make_unique<GitInput>(parseURL(getStrAttr(attrs, "url")));
|
||||
if (auto ref = maybeGetStrAttr(attrs, "ref")) {
|
||||
if (std::regex_search(*ref, badGitRefRegex))
|
||||
throw BadURL("invalid Git branch/tag name '%s'", *ref);
|
||||
input->ref = *ref;
|
||||
}
|
||||
if (auto rev = maybeGetStrAttr(attrs, "rev"))
|
||||
input->rev = Hash(*rev, htSHA1);
|
||||
|
||||
input->shallow = maybeGetBoolAttr(attrs, "shallow").value_or(false);
|
||||
|
||||
input->submodules = maybeGetBoolAttr(attrs, "submodules").value_or(false);
|
||||
|
||||
return input;
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<GitInputScheme>()); });
|
||||
|
||||
}
|
195
src/libfetchers/github.cc
Normal file
195
src/libfetchers/github.cc
Normal file
|
@ -0,0 +1,195 @@
|
|||
#include "filetransfer.hh"
|
||||
#include "cache.hh"
|
||||
#include "fetchers.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
std::regex ownerRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript);
|
||||
std::regex repoRegex("[a-zA-Z][a-zA-Z0-9_-]*", std::regex::ECMAScript);
|
||||
|
||||
struct GitHubInput : Input
|
||||
{
|
||||
std::string owner;
|
||||
std::string repo;
|
||||
std::optional<std::string> ref;
|
||||
std::optional<Hash> rev;
|
||||
|
||||
std::string type() const override { return "github"; }
|
||||
|
||||
bool operator ==(const Input & other) const override
|
||||
{
|
||||
auto other2 = dynamic_cast<const GitHubInput *>(&other);
|
||||
return
|
||||
other2
|
||||
&& owner == other2->owner
|
||||
&& repo == other2->repo
|
||||
&& rev == other2->rev
|
||||
&& ref == other2->ref;
|
||||
}
|
||||
|
||||
bool isImmutable() const override
|
||||
{
|
||||
return (bool) rev || narHash;
|
||||
}
|
||||
|
||||
std::optional<std::string> getRef() const override { return ref; }
|
||||
|
||||
std::optional<Hash> getRev() const override { return rev; }
|
||||
|
||||
ParsedURL toURL() const override
|
||||
{
|
||||
auto path = owner + "/" + repo;
|
||||
assert(!(ref && rev));
|
||||
if (ref) path += "/" + *ref;
|
||||
if (rev) path += "/" + rev->to_string(Base16, false);
|
||||
return ParsedURL {
|
||||
.scheme = "github",
|
||||
.path = path,
|
||||
};
|
||||
}
|
||||
|
||||
Attrs toAttrsInternal() const override
|
||||
{
|
||||
Attrs attrs;
|
||||
attrs.emplace("owner", owner);
|
||||
attrs.emplace("repo", repo);
|
||||
if (ref)
|
||||
attrs.emplace("ref", *ref);
|
||||
if (rev)
|
||||
attrs.emplace("rev", rev->gitRev());
|
||||
return attrs;
|
||||
}
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> fetchTreeInternal(nix::ref<Store> store) const override
|
||||
{
|
||||
auto rev = this->rev;
|
||||
auto ref = this->ref.value_or("master");
|
||||
|
||||
if (!rev) {
|
||||
auto url = fmt("https://api.github.com/repos/%s/%s/commits/%s",
|
||||
owner, repo, ref);
|
||||
auto json = nlohmann::json::parse(
|
||||
readFile(
|
||||
store->toRealPath(
|
||||
downloadFile(store, url, "source", false).storePath)));
|
||||
rev = Hash(json["sha"], htSHA1);
|
||||
debug("HEAD revision for '%s' is %s", url, rev->gitRev());
|
||||
}
|
||||
|
||||
auto input = std::make_shared<GitHubInput>(*this);
|
||||
input->ref = {};
|
||||
input->rev = *rev;
|
||||
|
||||
Attrs immutableAttrs({
|
||||
{"type", "git-tarball"},
|
||||
{"rev", rev->gitRev()},
|
||||
});
|
||||
|
||||
if (auto res = getCache()->lookup(store, immutableAttrs)) {
|
||||
return {
|
||||
Tree{
|
||||
.actualPath = store->toRealPath(res->second),
|
||||
.storePath = std::move(res->second),
|
||||
.info = TreeInfo {
|
||||
.lastModified = getIntAttr(res->first, "lastModified"),
|
||||
},
|
||||
},
|
||||
input
|
||||
};
|
||||
}
|
||||
|
||||
// FIXME: use regular /archive URLs instead? api.github.com
|
||||
// might have stricter rate limits.
|
||||
|
||||
auto url = fmt("https://api.github.com/repos/%s/%s/tarball/%s",
|
||||
owner, repo, rev->to_string(Base16, false));
|
||||
|
||||
std::string accessToken = settings.githubAccessToken.get();
|
||||
if (accessToken != "")
|
||||
url += "?access_token=" + accessToken;
|
||||
|
||||
auto tree = downloadTarball(store, url, "source", true);
|
||||
|
||||
getCache()->add(
|
||||
store,
|
||||
immutableAttrs,
|
||||
{
|
||||
{"rev", rev->gitRev()},
|
||||
{"lastModified", *tree.info.lastModified}
|
||||
},
|
||||
tree.storePath,
|
||||
true);
|
||||
|
||||
return {std::move(tree), input};
|
||||
}
|
||||
};
|
||||
|
||||
struct GitHubInputScheme : InputScheme
|
||||
{
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url) override
|
||||
{
|
||||
if (url.scheme != "github") return nullptr;
|
||||
|
||||
auto path = tokenizeString<std::vector<std::string>>(url.path, "/");
|
||||
auto input = std::make_unique<GitHubInput>();
|
||||
|
||||
if (path.size() == 2) {
|
||||
} else if (path.size() == 3) {
|
||||
if (std::regex_match(path[2], revRegex))
|
||||
input->rev = Hash(path[2], htSHA1);
|
||||
else if (std::regex_match(path[2], refRegex))
|
||||
input->ref = path[2];
|
||||
else
|
||||
throw BadURL("in GitHub URL '%s', '%s' is not a commit hash or branch/tag name", url.url, path[2]);
|
||||
} else
|
||||
throw BadURL("GitHub URL '%s' is invalid", url.url);
|
||||
|
||||
for (auto &[name, value] : url.query) {
|
||||
if (name == "rev") {
|
||||
if (input->rev)
|
||||
throw BadURL("GitHub URL '%s' contains multiple commit hashes", url.url);
|
||||
input->rev = Hash(value, htSHA1);
|
||||
}
|
||||
else if (name == "ref") {
|
||||
if (!std::regex_match(value, refRegex))
|
||||
throw BadURL("GitHub URL '%s' contains an invalid branch/tag name", url.url);
|
||||
if (input->ref)
|
||||
throw BadURL("GitHub URL '%s' contains multiple branch/tag names", url.url);
|
||||
input->ref = value;
|
||||
}
|
||||
}
|
||||
|
||||
if (input->ref && input->rev)
|
||||
throw BadURL("GitHub URL '%s' contains both a commit hash and a branch/tag name", url.url);
|
||||
|
||||
input->owner = path[0];
|
||||
input->repo = path[1];
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs) override
|
||||
{
|
||||
if (maybeGetStrAttr(attrs, "type") != "github") return {};
|
||||
|
||||
for (auto & [name, value] : attrs)
|
||||
if (name != "type" && name != "owner" && name != "repo" && name != "ref" && name != "rev")
|
||||
throw Error("unsupported GitHub input attribute '%s'", name);
|
||||
|
||||
auto input = std::make_unique<GitHubInput>();
|
||||
input->owner = getStrAttr(attrs, "owner");
|
||||
input->repo = getStrAttr(attrs, "repo");
|
||||
input->ref = maybeGetStrAttr(attrs, "ref");
|
||||
if (auto rev = maybeGetStrAttr(attrs, "rev"))
|
||||
input->rev = Hash(*rev, htSHA1);
|
||||
return input;
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<GitHubInputScheme>()); });
|
||||
|
||||
}
|
11
src/libfetchers/local.mk
Normal file
11
src/libfetchers/local.mk
Normal file
|
@ -0,0 +1,11 @@
|
|||
libraries += libfetchers
|
||||
|
||||
libfetchers_NAME = libnixfetchers
|
||||
|
||||
libfetchers_DIR := $(d)
|
||||
|
||||
libfetchers_SOURCES := $(wildcard $(d)/*.cc)
|
||||
|
||||
libfetchers_CXXFLAGS += -I src/libutil -I src/libstore
|
||||
|
||||
libfetchers_LIBS = libutil libstore libnixrust
|
303
src/libfetchers/mercurial.cc
Normal file
303
src/libfetchers/mercurial.cc
Normal file
|
@ -0,0 +1,303 @@
|
|||
#include "fetchers.hh"
|
||||
#include "cache.hh"
|
||||
#include "globals.hh"
|
||||
#include "tarfile.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct MercurialInput : Input
|
||||
{
|
||||
ParsedURL url;
|
||||
std::optional<std::string> ref;
|
||||
std::optional<Hash> rev;
|
||||
|
||||
MercurialInput(const ParsedURL & url) : url(url)
|
||||
{ }
|
||||
|
||||
std::string type() const override { return "hg"; }
|
||||
|
||||
bool operator ==(const Input & other) const override
|
||||
{
|
||||
auto other2 = dynamic_cast<const MercurialInput *>(&other);
|
||||
return
|
||||
other2
|
||||
&& url == other2->url
|
||||
&& rev == other2->rev
|
||||
&& ref == other2->ref;
|
||||
}
|
||||
|
||||
bool isImmutable() const override
|
||||
{
|
||||
return (bool) rev || narHash;
|
||||
}
|
||||
|
||||
std::optional<std::string> getRef() const override { return ref; }
|
||||
|
||||
std::optional<Hash> getRev() const override { return rev; }
|
||||
|
||||
ParsedURL toURL() const override
|
||||
{
|
||||
ParsedURL url2(url);
|
||||
url2.scheme = "hg+" + url2.scheme;
|
||||
if (rev) url2.query.insert_or_assign("rev", rev->gitRev());
|
||||
if (ref) url2.query.insert_or_assign("ref", *ref);
|
||||
return url;
|
||||
}
|
||||
|
||||
Attrs toAttrsInternal() const override
|
||||
{
|
||||
Attrs attrs;
|
||||
attrs.emplace("url", url.to_string());
|
||||
if (ref)
|
||||
attrs.emplace("ref", *ref);
|
||||
if (rev)
|
||||
attrs.emplace("rev", rev->gitRev());
|
||||
return attrs;
|
||||
}
|
||||
|
||||
std::pair<bool, std::string> getActualUrl() const
|
||||
{
|
||||
bool isLocal = url.scheme == "file";
|
||||
return {isLocal, isLocal ? url.path : url.base};
|
||||
}
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> fetchTreeInternal(nix::ref<Store> store) const override
|
||||
{
|
||||
auto name = "source";
|
||||
|
||||
auto input = std::make_shared<MercurialInput>(*this);
|
||||
|
||||
auto [isLocal, actualUrl_] = getActualUrl();
|
||||
auto actualUrl = actualUrl_; // work around clang bug
|
||||
|
||||
// FIXME: return lastModified.
|
||||
|
||||
// FIXME: don't clone local repositories.
|
||||
|
||||
if (!input->ref && !input->rev && isLocal && pathExists(actualUrl + "/.hg")) {
|
||||
|
||||
bool clean = runProgram("hg", true, { "status", "-R", actualUrl, "--modified", "--added", "--removed" }) == "";
|
||||
|
||||
if (!clean) {
|
||||
|
||||
/* This is an unclean working tree. So copy all tracked
|
||||
files. */
|
||||
|
||||
if (!settings.allowDirty)
|
||||
throw Error("Mercurial tree '%s' is unclean", actualUrl);
|
||||
|
||||
if (settings.warnDirty)
|
||||
warn("Mercurial tree '%s' is unclean", actualUrl);
|
||||
|
||||
input->ref = chomp(runProgram("hg", true, { "branch", "-R", actualUrl }));
|
||||
|
||||
auto files = tokenizeString<std::set<std::string>>(
|
||||
runProgram("hg", true, { "status", "-R", actualUrl, "--clean", "--modified", "--added", "--no-status", "--print0" }), "\0"s);
|
||||
|
||||
PathFilter filter = [&](const Path & p) -> bool {
|
||||
assert(hasPrefix(p, actualUrl));
|
||||
std::string file(p, actualUrl.size() + 1);
|
||||
|
||||
auto st = lstat(p);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
auto prefix = file + "/";
|
||||
auto i = files.lower_bound(prefix);
|
||||
return i != files.end() && hasPrefix(*i, prefix);
|
||||
}
|
||||
|
||||
return files.count(file);
|
||||
};
|
||||
|
||||
auto storePath = store->addToStore("source", actualUrl, FileIngestionMethod::Recursive, htSHA256, filter);
|
||||
|
||||
return {Tree {
|
||||
.actualPath = store->printStorePath(storePath),
|
||||
.storePath = std::move(storePath),
|
||||
}, input};
|
||||
}
|
||||
}
|
||||
|
||||
if (!input->ref) input->ref = "default";
|
||||
|
||||
auto getImmutableAttrs = [&]()
|
||||
{
|
||||
return Attrs({
|
||||
{"type", "hg"},
|
||||
{"name", name},
|
||||
{"rev", input->rev->gitRev()},
|
||||
});
|
||||
};
|
||||
|
||||
auto makeResult = [&](const Attrs & infoAttrs, StorePath && storePath)
|
||||
-> std::pair<Tree, std::shared_ptr<const Input>>
|
||||
{
|
||||
assert(input->rev);
|
||||
assert(!rev || rev == input->rev);
|
||||
return {
|
||||
Tree{
|
||||
.actualPath = store->toRealPath(storePath),
|
||||
.storePath = std::move(storePath),
|
||||
.info = TreeInfo {
|
||||
.revCount = getIntAttr(infoAttrs, "revCount"),
|
||||
},
|
||||
},
|
||||
input
|
||||
};
|
||||
};
|
||||
|
||||
if (input->rev) {
|
||||
if (auto res = getCache()->lookup(store, getImmutableAttrs()))
|
||||
return makeResult(res->first, std::move(res->second));
|
||||
}
|
||||
|
||||
assert(input->rev || input->ref);
|
||||
auto revOrRef = input->rev ? input->rev->gitRev() : *input->ref;
|
||||
|
||||
Attrs mutableAttrs({
|
||||
{"type", "hg"},
|
||||
{"name", name},
|
||||
{"url", actualUrl},
|
||||
{"ref", *input->ref},
|
||||
});
|
||||
|
||||
if (auto res = getCache()->lookup(store, mutableAttrs)) {
|
||||
auto rev2 = Hash(getStrAttr(res->first, "rev"), htSHA1);
|
||||
if (!rev || rev == rev2) {
|
||||
input->rev = rev2;
|
||||
return makeResult(res->first, std::move(res->second));
|
||||
}
|
||||
}
|
||||
|
||||
Path cacheDir = fmt("%s/nix/hg/%s", getCacheDir(), hashString(htSHA256, actualUrl).to_string(Base32, false));
|
||||
|
||||
/* If this is a commit hash that we already have, we don't
|
||||
have to pull again. */
|
||||
if (!(input->rev
|
||||
&& pathExists(cacheDir)
|
||||
&& runProgram(
|
||||
RunOptions("hg", { "log", "-R", cacheDir, "-r", input->rev->gitRev(), "--template", "1" })
|
||||
.killStderr(true)).second == "1"))
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", actualUrl));
|
||||
|
||||
if (pathExists(cacheDir)) {
|
||||
try {
|
||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", actualUrl });
|
||||
}
|
||||
catch (ExecError & e) {
|
||||
string transJournal = cacheDir + "/.hg/store/journal";
|
||||
/* hg throws "abandoned transaction" error only if this file exists */
|
||||
if (pathExists(transJournal)) {
|
||||
runProgram("hg", true, { "recover", "-R", cacheDir });
|
||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", actualUrl });
|
||||
} else {
|
||||
throw ExecError(e.status, fmt("'hg pull' %s", statusToString(e.status)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
createDirs(dirOf(cacheDir));
|
||||
runProgram("hg", true, { "clone", "--noupdate", "--", actualUrl, cacheDir });
|
||||
}
|
||||
}
|
||||
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(
|
||||
runProgram("hg", true, { "log", "-R", cacheDir, "-r", revOrRef, "--template", "{node} {rev} {branch}" }));
|
||||
assert(tokens.size() == 3);
|
||||
|
||||
input->rev = Hash(tokens[0], htSHA1);
|
||||
auto revCount = std::stoull(tokens[1]);
|
||||
input->ref = tokens[2];
|
||||
|
||||
if (auto res = getCache()->lookup(store, getImmutableAttrs()))
|
||||
return makeResult(res->first, std::move(res->second));
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmpDir(tmpDir, true);
|
||||
|
||||
runProgram("hg", true, { "archive", "-R", cacheDir, "-r", input->rev->gitRev(), tmpDir });
|
||||
|
||||
deletePath(tmpDir + "/.hg_archival.txt");
|
||||
|
||||
auto storePath = store->addToStore(name, tmpDir);
|
||||
|
||||
Attrs infoAttrs({
|
||||
{"rev", input->rev->gitRev()},
|
||||
{"revCount", (int64_t) revCount},
|
||||
});
|
||||
|
||||
if (!this->rev)
|
||||
getCache()->add(
|
||||
store,
|
||||
mutableAttrs,
|
||||
infoAttrs,
|
||||
storePath,
|
||||
false);
|
||||
|
||||
getCache()->add(
|
||||
store,
|
||||
getImmutableAttrs(),
|
||||
infoAttrs,
|
||||
storePath,
|
||||
true);
|
||||
|
||||
return makeResult(infoAttrs, std::move(storePath));
|
||||
}
|
||||
};
|
||||
|
||||
struct MercurialInputScheme : InputScheme
|
||||
{
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url) override
|
||||
{
|
||||
if (url.scheme != "hg+http" &&
|
||||
url.scheme != "hg+https" &&
|
||||
url.scheme != "hg+ssh" &&
|
||||
url.scheme != "hg+file") return nullptr;
|
||||
|
||||
auto url2(url);
|
||||
url2.scheme = std::string(url2.scheme, 3);
|
||||
url2.query.clear();
|
||||
|
||||
Attrs attrs;
|
||||
attrs.emplace("type", "hg");
|
||||
|
||||
for (auto &[name, value] : url.query) {
|
||||
if (name == "rev" || name == "ref")
|
||||
attrs.emplace(name, value);
|
||||
else
|
||||
url2.query.emplace(name, value);
|
||||
}
|
||||
|
||||
attrs.emplace("url", url2.to_string());
|
||||
|
||||
return inputFromAttrs(attrs);
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs) override
|
||||
{
|
||||
if (maybeGetStrAttr(attrs, "type") != "hg") return {};
|
||||
|
||||
for (auto & [name, value] : attrs)
|
||||
if (name != "type" && name != "url" && name != "ref" && name != "rev")
|
||||
throw Error("unsupported Mercurial input attribute '%s'", name);
|
||||
|
||||
auto input = std::make_unique<MercurialInput>(parseURL(getStrAttr(attrs, "url")));
|
||||
if (auto ref = maybeGetStrAttr(attrs, "ref")) {
|
||||
if (!std::regex_match(*ref, refRegex))
|
||||
throw BadURL("invalid Mercurial branch/tag name '%s'", *ref);
|
||||
input->ref = *ref;
|
||||
}
|
||||
if (auto rev = maybeGetStrAttr(attrs, "rev"))
|
||||
input->rev = Hash(*rev, htSHA1);
|
||||
return input;
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<MercurialInputScheme>()); });
|
||||
|
||||
}
|
148
src/libfetchers/path.cc
Normal file
148
src/libfetchers/path.cc
Normal file
|
@ -0,0 +1,148 @@
|
|||
#include "fetchers.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct PathInput : Input
|
||||
{
|
||||
Path path;
|
||||
|
||||
/* Allow the user to pass in "fake" tree info attributes. This is
|
||||
useful for making a pinned tree work the same as the repository
|
||||
from which is exported
|
||||
(e.g. path:/nix/store/...-source?lastModified=1585388205&rev=b0c285...). */
|
||||
std::optional<Hash> rev;
|
||||
std::optional<uint64_t> revCount;
|
||||
std::optional<time_t> lastModified;
|
||||
|
||||
std::string type() const override { return "path"; }
|
||||
|
||||
std::optional<Hash> getRev() const override { return rev; }
|
||||
|
||||
bool operator ==(const Input & other) const override
|
||||
{
|
||||
auto other2 = dynamic_cast<const PathInput *>(&other);
|
||||
return
|
||||
other2
|
||||
&& path == other2->path
|
||||
&& rev == other2->rev
|
||||
&& revCount == other2->revCount
|
||||
&& lastModified == other2->lastModified;
|
||||
}
|
||||
|
||||
bool isImmutable() const override
|
||||
{
|
||||
return (bool) narHash;
|
||||
}
|
||||
|
||||
ParsedURL toURL() const override
|
||||
{
|
||||
auto query = attrsToQuery(toAttrsInternal());
|
||||
query.erase("path");
|
||||
return ParsedURL {
|
||||
.scheme = "path",
|
||||
.path = path,
|
||||
.query = query,
|
||||
};
|
||||
}
|
||||
|
||||
Attrs toAttrsInternal() const override
|
||||
{
|
||||
Attrs attrs;
|
||||
attrs.emplace("path", path);
|
||||
if (rev)
|
||||
attrs.emplace("rev", rev->gitRev());
|
||||
if (revCount)
|
||||
attrs.emplace("revCount", *revCount);
|
||||
if (lastModified)
|
||||
attrs.emplace("lastModified", *lastModified);
|
||||
return attrs;
|
||||
}
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> fetchTreeInternal(nix::ref<Store> store) const override
|
||||
{
|
||||
auto input = std::make_shared<PathInput>(*this);
|
||||
|
||||
// FIXME: check whether access to 'path' is allowed.
|
||||
|
||||
auto storePath = store->maybeParseStorePath(path);
|
||||
|
||||
if (storePath)
|
||||
store->addTempRoot(*storePath);
|
||||
|
||||
if (!storePath || storePath->name() != "source" || !store->isValidPath(*storePath))
|
||||
// FIXME: try to substitute storePath.
|
||||
storePath = store->addToStore("source", path);
|
||||
|
||||
return
|
||||
{
|
||||
Tree {
|
||||
.actualPath = store->toRealPath(*storePath),
|
||||
.storePath = std::move(*storePath),
|
||||
.info = TreeInfo {
|
||||
.revCount = revCount,
|
||||
.lastModified = lastModified
|
||||
}
|
||||
},
|
||||
input
|
||||
};
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct PathInputScheme : InputScheme
|
||||
{
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url) override
|
||||
{
|
||||
if (url.scheme != "path") return nullptr;
|
||||
|
||||
auto input = std::make_unique<PathInput>();
|
||||
input->path = url.path;
|
||||
|
||||
for (auto & [name, value] : url.query)
|
||||
if (name == "rev")
|
||||
input->rev = Hash(value, htSHA1);
|
||||
else if (name == "revCount") {
|
||||
uint64_t revCount;
|
||||
if (!string2Int(value, revCount))
|
||||
throw Error("path URL '%s' has invalid parameter '%s'", url.to_string(), name);
|
||||
input->revCount = revCount;
|
||||
}
|
||||
else if (name == "lastModified") {
|
||||
time_t lastModified;
|
||||
if (!string2Int(value, lastModified))
|
||||
throw Error("path URL '%s' has invalid parameter '%s'", url.to_string(), name);
|
||||
input->lastModified = lastModified;
|
||||
}
|
||||
else
|
||||
throw Error("path URL '%s' has unsupported parameter '%s'", url.to_string(), name);
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs) override
|
||||
{
|
||||
if (maybeGetStrAttr(attrs, "type") != "path") return {};
|
||||
|
||||
auto input = std::make_unique<PathInput>();
|
||||
input->path = getStrAttr(attrs, "path");
|
||||
|
||||
for (auto & [name, value] : attrs)
|
||||
if (name == "rev")
|
||||
input->rev = Hash(getStrAttr(attrs, "rev"), htSHA1);
|
||||
else if (name == "revCount")
|
||||
input->revCount = getIntAttr(attrs, "revCount");
|
||||
else if (name == "lastModified")
|
||||
input->lastModified = getIntAttr(attrs, "lastModified");
|
||||
else if (name == "type" || name == "path")
|
||||
;
|
||||
else
|
||||
throw Error("unsupported path input attribute '%s'", name);
|
||||
|
||||
return input;
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<PathInputScheme>()); });
|
||||
|
||||
}
|
276
src/libfetchers/tarball.cc
Normal file
276
src/libfetchers/tarball.cc
Normal file
|
@ -0,0 +1,276 @@
|
|||
#include "fetchers.hh"
|
||||
#include "cache.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "globals.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "tarfile.hh"
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
DownloadFileResult downloadFile(
|
||||
ref<Store> store,
|
||||
const std::string & url,
|
||||
const std::string & name,
|
||||
bool immutable)
|
||||
{
|
||||
// FIXME: check store
|
||||
|
||||
Attrs inAttrs({
|
||||
{"type", "file"},
|
||||
{"url", url},
|
||||
{"name", name},
|
||||
});
|
||||
|
||||
auto cached = getCache()->lookupExpired(store, inAttrs);
|
||||
|
||||
auto useCached = [&]() -> DownloadFileResult
|
||||
{
|
||||
return {
|
||||
.storePath = std::move(cached->storePath),
|
||||
.etag = getStrAttr(cached->infoAttrs, "etag"),
|
||||
.effectiveUrl = getStrAttr(cached->infoAttrs, "url")
|
||||
};
|
||||
};
|
||||
|
||||
if (cached && !cached->expired)
|
||||
return useCached();
|
||||
|
||||
FileTransferRequest request(url);
|
||||
if (cached)
|
||||
request.expectedETag = getStrAttr(cached->infoAttrs, "etag");
|
||||
FileTransferResult res;
|
||||
try {
|
||||
res = getFileTransfer()->download(request);
|
||||
} catch (FileTransferError & e) {
|
||||
if (cached) {
|
||||
warn("%s; using cached version", e.msg());
|
||||
return useCached();
|
||||
} else
|
||||
throw;
|
||||
}
|
||||
|
||||
// FIXME: write to temporary file.
|
||||
|
||||
Attrs infoAttrs({
|
||||
{"etag", res.etag},
|
||||
{"url", res.effectiveUri},
|
||||
});
|
||||
|
||||
std::optional<StorePath> storePath;
|
||||
|
||||
if (res.cached) {
|
||||
assert(cached);
|
||||
assert(request.expectedETag == res.etag);
|
||||
storePath = std::move(cached->storePath);
|
||||
} else {
|
||||
StringSink sink;
|
||||
dumpString(*res.data, sink);
|
||||
auto hash = hashString(htSHA256, *res.data);
|
||||
ValidPathInfo info(store->makeFixedOutputPath(FileIngestionMethod::Flat, hash, name));
|
||||
info.narHash = hashString(htSHA256, *sink.s);
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = makeFixedOutputCA(FileIngestionMethod::Flat, hash);
|
||||
auto source = StringSource { *sink.s };
|
||||
store->addToStore(info, source, NoRepair, NoCheckSigs);
|
||||
storePath = std::move(info.path);
|
||||
}
|
||||
|
||||
getCache()->add(
|
||||
store,
|
||||
inAttrs,
|
||||
infoAttrs,
|
||||
*storePath,
|
||||
immutable);
|
||||
|
||||
if (url != res.effectiveUri)
|
||||
getCache()->add(
|
||||
store,
|
||||
{
|
||||
{"type", "file"},
|
||||
{"url", res.effectiveUri},
|
||||
{"name", name},
|
||||
},
|
||||
infoAttrs,
|
||||
*storePath,
|
||||
immutable);
|
||||
|
||||
return {
|
||||
.storePath = std::move(*storePath),
|
||||
.etag = res.etag,
|
||||
.effectiveUrl = res.effectiveUri,
|
||||
};
|
||||
}
|
||||
|
||||
Tree downloadTarball(
|
||||
ref<Store> store,
|
||||
const std::string & url,
|
||||
const std::string & name,
|
||||
bool immutable)
|
||||
{
|
||||
Attrs inAttrs({
|
||||
{"type", "tarball"},
|
||||
{"url", url},
|
||||
{"name", name},
|
||||
});
|
||||
|
||||
auto cached = getCache()->lookupExpired(store, inAttrs);
|
||||
|
||||
if (cached && !cached->expired)
|
||||
return Tree {
|
||||
.actualPath = store->toRealPath(cached->storePath),
|
||||
.storePath = std::move(cached->storePath),
|
||||
.info = TreeInfo {
|
||||
.lastModified = getIntAttr(cached->infoAttrs, "lastModified"),
|
||||
},
|
||||
};
|
||||
|
||||
auto res = downloadFile(store, url, name, immutable);
|
||||
|
||||
std::optional<StorePath> unpackedStorePath;
|
||||
time_t lastModified;
|
||||
|
||||
if (cached && res.etag != "" && getStrAttr(cached->infoAttrs, "etag") == res.etag) {
|
||||
unpackedStorePath = std::move(cached->storePath);
|
||||
lastModified = getIntAttr(cached->infoAttrs, "lastModified");
|
||||
} else {
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete autoDelete(tmpDir, true);
|
||||
unpackTarfile(store->toRealPath(res.storePath), tmpDir);
|
||||
auto members = readDirectory(tmpDir);
|
||||
if (members.size() != 1)
|
||||
throw nix::Error("tarball '%s' contains an unexpected number of top-level files", url);
|
||||
auto topDir = tmpDir + "/" + members.begin()->name;
|
||||
lastModified = lstat(topDir).st_mtime;
|
||||
unpackedStorePath = store->addToStore(name, topDir, FileIngestionMethod::Recursive, htSHA256, defaultPathFilter, NoRepair);
|
||||
}
|
||||
|
||||
Attrs infoAttrs({
|
||||
{"lastModified", lastModified},
|
||||
{"etag", res.etag},
|
||||
});
|
||||
|
||||
getCache()->add(
|
||||
store,
|
||||
inAttrs,
|
||||
infoAttrs,
|
||||
*unpackedStorePath,
|
||||
immutable);
|
||||
|
||||
return Tree {
|
||||
.actualPath = store->toRealPath(*unpackedStorePath),
|
||||
.storePath = std::move(*unpackedStorePath),
|
||||
.info = TreeInfo {
|
||||
.lastModified = lastModified,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
struct TarballInput : Input
|
||||
{
|
||||
ParsedURL url;
|
||||
std::optional<Hash> hash;
|
||||
|
||||
TarballInput(const ParsedURL & url) : url(url)
|
||||
{ }
|
||||
|
||||
std::string type() const override { return "tarball"; }
|
||||
|
||||
bool operator ==(const Input & other) const override
|
||||
{
|
||||
auto other2 = dynamic_cast<const TarballInput *>(&other);
|
||||
return
|
||||
other2
|
||||
&& to_string() == other2->to_string()
|
||||
&& hash == other2->hash;
|
||||
}
|
||||
|
||||
bool isImmutable() const override
|
||||
{
|
||||
return hash || narHash;
|
||||
}
|
||||
|
||||
ParsedURL toURL() const override
|
||||
{
|
||||
auto url2(url);
|
||||
// NAR hashes are preferred over file hashes since tar/zip files
|
||||
// don't have a canonical representation.
|
||||
if (narHash)
|
||||
url2.query.insert_or_assign("narHash", narHash->to_string(SRI));
|
||||
else if (hash)
|
||||
url2.query.insert_or_assign("hash", hash->to_string(SRI));
|
||||
return url2;
|
||||
}
|
||||
|
||||
Attrs toAttrsInternal() const override
|
||||
{
|
||||
Attrs attrs;
|
||||
attrs.emplace("url", url.to_string());
|
||||
if (hash)
|
||||
attrs.emplace("hash", hash->to_string(SRI));
|
||||
return attrs;
|
||||
}
|
||||
|
||||
std::pair<Tree, std::shared_ptr<const Input>> fetchTreeInternal(nix::ref<Store> store) const override
|
||||
{
|
||||
auto tree = downloadTarball(store, url.to_string(), "source", false);
|
||||
|
||||
auto input = std::make_shared<TarballInput>(*this);
|
||||
input->narHash = store->queryPathInfo(tree.storePath)->narHash;
|
||||
|
||||
return {std::move(tree), input};
|
||||
}
|
||||
};
|
||||
|
||||
struct TarballInputScheme : InputScheme
|
||||
{
|
||||
std::unique_ptr<Input> inputFromURL(const ParsedURL & url) override
|
||||
{
|
||||
if (url.scheme != "file" && url.scheme != "http" && url.scheme != "https") return nullptr;
|
||||
|
||||
if (!hasSuffix(url.path, ".zip")
|
||||
&& !hasSuffix(url.path, ".tar")
|
||||
&& !hasSuffix(url.path, ".tar.gz")
|
||||
&& !hasSuffix(url.path, ".tar.xz")
|
||||
&& !hasSuffix(url.path, ".tar.bz2"))
|
||||
return nullptr;
|
||||
|
||||
auto input = std::make_unique<TarballInput>(url);
|
||||
|
||||
auto hash = input->url.query.find("hash");
|
||||
if (hash != input->url.query.end()) {
|
||||
// FIXME: require SRI hash.
|
||||
input->hash = Hash(hash->second);
|
||||
input->url.query.erase(hash);
|
||||
}
|
||||
|
||||
auto narHash = input->url.query.find("narHash");
|
||||
if (narHash != input->url.query.end()) {
|
||||
// FIXME: require SRI hash.
|
||||
input->narHash = Hash(narHash->second);
|
||||
input->url.query.erase(narHash);
|
||||
}
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
std::unique_ptr<Input> inputFromAttrs(const Attrs & attrs) override
|
||||
{
|
||||
if (maybeGetStrAttr(attrs, "type") != "tarball") return {};
|
||||
|
||||
for (auto & [name, value] : attrs)
|
||||
if (name != "type" && name != "url" && name != "hash")
|
||||
throw Error("unsupported tarball input attribute '%s'", name);
|
||||
|
||||
auto input = std::make_unique<TarballInput>(parseURL(getStrAttr(attrs, "url")));
|
||||
if (auto hash = maybeGetStrAttr(attrs, "hash"))
|
||||
// FIXME: require SRI hash.
|
||||
input->hash = Hash(*hash);
|
||||
|
||||
return input;
|
||||
}
|
||||
};
|
||||
|
||||
static auto r1 = OnStartup([] { registerInputScheme(std::make_unique<TarballInputScheme>()); });
|
||||
|
||||
}
|
14
src/libfetchers/tree-info.cc
Normal file
14
src/libfetchers/tree-info.cc
Normal file
|
@ -0,0 +1,14 @@
|
|||
#include "tree-info.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
StorePath TreeInfo::computeStorePath(Store & store) const
|
||||
{
|
||||
assert(narHash);
|
||||
return store.makeFixedOutputPath(FileIngestionMethod::Recursive, narHash, "source");
|
||||
}
|
||||
|
||||
}
|
29
src/libfetchers/tree-info.hh
Normal file
29
src/libfetchers/tree-info.hh
Normal file
|
@ -0,0 +1,29 @@
|
|||
#pragma once
|
||||
|
||||
#include "path.hh"
|
||||
#include "hash.hh"
|
||||
|
||||
#include <nlohmann/json_fwd.hpp>
|
||||
|
||||
namespace nix { class Store; }
|
||||
|
||||
namespace nix::fetchers {
|
||||
|
||||
struct TreeInfo
|
||||
{
|
||||
Hash narHash;
|
||||
std::optional<uint64_t> revCount;
|
||||
std::optional<time_t> lastModified;
|
||||
|
||||
bool operator ==(const TreeInfo & other) const
|
||||
{
|
||||
return
|
||||
narHash == other.narHash
|
||||
&& revCount == other.revCount
|
||||
&& lastModified == other.lastModified;
|
||||
}
|
||||
|
||||
StorePath computeStorePath(Store & store) const;
|
||||
};
|
||||
|
||||
}
|
|
@ -6,43 +6,47 @@ namespace nix {
|
|||
MixCommonArgs::MixCommonArgs(const string & programName)
|
||||
: programName(programName)
|
||||
{
|
||||
mkFlag()
|
||||
.longName("verbose")
|
||||
.shortName('v')
|
||||
.description("increase verbosity level")
|
||||
.handler([]() { verbosity = (Verbosity) (verbosity + 1); });
|
||||
addFlag({
|
||||
.longName = "verbose",
|
||||
.shortName = 'v',
|
||||
.description = "increase verbosity level",
|
||||
.handler = {[]() { verbosity = (Verbosity) (verbosity + 1); }},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("quiet")
|
||||
.description("decrease verbosity level")
|
||||
.handler([]() { verbosity = verbosity > lvlError ? (Verbosity) (verbosity - 1) : lvlError; });
|
||||
addFlag({
|
||||
.longName = "quiet",
|
||||
.description = "decrease verbosity level",
|
||||
.handler = {[]() { verbosity = verbosity > lvlError ? (Verbosity) (verbosity - 1) : lvlError; }},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("debug")
|
||||
.description("enable debug output")
|
||||
.handler([]() { verbosity = lvlDebug; });
|
||||
addFlag({
|
||||
.longName = "debug",
|
||||
.description = "enable debug output",
|
||||
.handler = {[]() { verbosity = lvlDebug; }},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("option")
|
||||
.labels({"name", "value"})
|
||||
.description("set a Nix configuration option (overriding nix.conf)")
|
||||
.arity(2)
|
||||
.handler([](std::vector<std::string> ss) {
|
||||
addFlag({
|
||||
.longName = "option",
|
||||
.description = "set a Nix configuration option (overriding nix.conf)",
|
||||
.labels = {"name", "value"},
|
||||
.handler = {[](std::string name, std::string value) {
|
||||
try {
|
||||
globalConfig.set(ss[0], ss[1]);
|
||||
globalConfig.set(name, value);
|
||||
} catch (UsageError & e) {
|
||||
warn(e.what());
|
||||
}
|
||||
});
|
||||
}},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("max-jobs")
|
||||
.shortName('j')
|
||||
.label("jobs")
|
||||
.description("maximum number of parallel builds")
|
||||
.handler([=](std::string s) {
|
||||
addFlag({
|
||||
.longName = "max-jobs",
|
||||
.shortName = 'j',
|
||||
.description = "maximum number of parallel builds",
|
||||
.labels = Strings{"jobs"},
|
||||
.handler = {[=](std::string s) {
|
||||
settings.set("max-jobs", s);
|
||||
});
|
||||
}}
|
||||
});
|
||||
|
||||
std::string cat = "config";
|
||||
globalConfig.convertToArgs(*this, cat);
|
||||
|
|
|
@ -6,6 +6,8 @@ libmain_DIR := $(d)
|
|||
|
||||
libmain_SOURCES := $(wildcard $(d)/*.cc)
|
||||
|
||||
libmain_CXXFLAGS += -I src/libutil -I src/libstore
|
||||
|
||||
libmain_LDFLAGS = $(OPENSSL_LIBS)
|
||||
|
||||
libmain_LIBS = libstore libutil
|
||||
|
|
|
@ -155,7 +155,7 @@ void initNix()
|
|||
sshd). This breaks build users because they don't have access
|
||||
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
||||
#if __APPLE__
|
||||
if (getuid() == 0 && hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/"))
|
||||
if (hasPrefix(getEnv("TMPDIR").value_or("/tmp"), "/var/folders/"))
|
||||
unsetenv("TMPDIR");
|
||||
#endif
|
||||
}
|
||||
|
@ -165,28 +165,32 @@ LegacyArgs::LegacyArgs(const std::string & programName,
|
|||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
|
||||
: MixCommonArgs(programName), parseArg(parseArg)
|
||||
{
|
||||
mkFlag()
|
||||
.longName("no-build-output")
|
||||
.shortName('Q')
|
||||
.description("do not show build output")
|
||||
.set(&settings.verboseBuild, false);
|
||||
addFlag({
|
||||
.longName = "no-build-output",
|
||||
.shortName = 'Q',
|
||||
.description = "do not show build output",
|
||||
.handler = {&settings.verboseBuild, false},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("keep-failed")
|
||||
.shortName('K')
|
||||
.description("keep temporary directories of failed builds")
|
||||
.set(&(bool&) settings.keepFailed, true);
|
||||
addFlag({
|
||||
.longName = "keep-failed",
|
||||
.shortName ='K',
|
||||
.description = "keep temporary directories of failed builds",
|
||||
.handler = {&(bool&) settings.keepFailed, true},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("keep-going")
|
||||
.shortName('k')
|
||||
.description("keep going after a build fails")
|
||||
.set(&(bool&) settings.keepGoing, true);
|
||||
addFlag({
|
||||
.longName = "keep-going",
|
||||
.shortName ='k',
|
||||
.description = "keep going after a build fails",
|
||||
.handler = {&(bool&) settings.keepGoing, true},
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("fallback")
|
||||
.description("build from source if substitution fails")
|
||||
.set(&(bool&) settings.tryFallback, true);
|
||||
addFlag({
|
||||
.longName = "fallback",
|
||||
.description = "build from source if substitution fails",
|
||||
.handler = {&(bool&) settings.tryFallback, true},
|
||||
});
|
||||
|
||||
auto intSettingAlias = [&](char shortName, const std::string & longName,
|
||||
const std::string & description, const std::string & dest) {
|
||||
|
@ -205,11 +209,12 @@ LegacyArgs::LegacyArgs(const std::string & programName,
|
|||
mkFlag(0, "no-gc-warning", "disable warning about not using '--add-root'",
|
||||
&gcWarning, false);
|
||||
|
||||
mkFlag()
|
||||
.longName("store")
|
||||
.label("store-uri")
|
||||
.description("URI of the Nix store to use")
|
||||
.dest(&(std::string&) settings.storeUri);
|
||||
addFlag({
|
||||
.longName = "store",
|
||||
.description = "URI of the Nix store to use",
|
||||
.labels = {"store-uri"},
|
||||
.handler = {&(std::string&) settings.storeUri},
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -260,7 +265,10 @@ void printVersion(const string & programName)
|
|||
cfg.push_back("signed-caches");
|
||||
#endif
|
||||
std::cout << "Features: " << concatStringsSep(", ", cfg) << "\n";
|
||||
std::cout << "Configuration file: " << settings.nixConfDir + "/nix.conf" << "\n";
|
||||
std::cout << "System configuration file: " << settings.nixConfDir + "/nix.conf" << "\n";
|
||||
std::cout << "User configuration files: " <<
|
||||
concatStringsSep(":", settings.nixUserConfFiles)
|
||||
<< "\n";
|
||||
std::cout << "Store directory: " << settings.nixStore << "\n";
|
||||
std::cout << "State directory: " << settings.nixStateDir << "\n";
|
||||
}
|
||||
|
|
|
@ -113,9 +113,12 @@ void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
|||
diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
{
|
||||
// FIXME: See if we can use the original source to reduce memory usage.
|
||||
auto nar = make_ref<std::string>(narSource.drain());
|
||||
|
||||
if (!repair && isValidPath(info.path)) return;
|
||||
|
||||
/* Verify that all references are valid. This may do some .narinfo
|
||||
|
@ -327,7 +330,7 @@ void BinaryCacheStore::queryPathInfoUncached(const StorePath & storePath,
|
|||
}
|
||||
|
||||
StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
// FIXME: some cut&paste from LocalStore::addToStore().
|
||||
|
||||
|
@ -336,7 +339,7 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
small files. */
|
||||
StringSink sink;
|
||||
Hash h;
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
dumpPath(srcPath, sink, filter);
|
||||
h = hashString(hashAlgo, *sink.s);
|
||||
} else {
|
||||
|
@ -345,9 +348,10 @@ StorePath BinaryCacheStore::addToStore(const string & name, const Path & srcPath
|
|||
h = hashString(hashAlgo, s);
|
||||
}
|
||||
|
||||
ValidPathInfo info(makeFixedOutputPath(recursive, h, name));
|
||||
ValidPathInfo info(makeFixedOutputPath(method, h, name));
|
||||
|
||||
addToStore(info, sink.s, repair, CheckSigs, nullptr);
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs, nullptr);
|
||||
|
||||
return std::move(info.path);
|
||||
}
|
||||
|
@ -361,7 +365,8 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
|
|||
if (repair || !isValidPath(info.path)) {
|
||||
StringSink sink;
|
||||
dumpString(s, sink);
|
||||
addToStore(info, sink.s, repair, CheckSigs, nullptr);
|
||||
auto source = StringSource { *sink.s };
|
||||
addToStore(info, source, repair, CheckSigs, nullptr);
|
||||
}
|
||||
|
||||
return std::move(info.path);
|
||||
|
|
|
@ -74,12 +74,12 @@ public:
|
|||
std::optional<StorePath> queryPathFromHashPart(const std::string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
void addToStore(const ValidPathInfo & info, Source & narSource,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override;
|
||||
|
||||
StorePath addTextToStore(const string & name, const string & s,
|
||||
|
|
|
@ -6,7 +6,8 @@
|
|||
#include "archive.hh"
|
||||
#include "affinity.hh"
|
||||
#include "builtins.hh"
|
||||
#include "download.hh"
|
||||
#include "builtins/buildenv.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "finally.hh"
|
||||
#include "compression.hh"
|
||||
#include "json.hh"
|
||||
|
@ -32,7 +33,6 @@
|
|||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
|
@ -42,6 +42,7 @@
|
|||
#include <errno.h>
|
||||
#include <cstring>
|
||||
#include <termios.h>
|
||||
#include <poll.h>
|
||||
|
||||
#include <pwd.h>
|
||||
#include <grp.h>
|
||||
|
@ -360,7 +361,7 @@ public:
|
|||
{
|
||||
actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, runningBuilds, failedBuilds);
|
||||
actSubstitutions.progress(doneSubstitutions, expectedSubstitutions + doneSubstitutions, runningSubstitutions, failedSubstitutions);
|
||||
act.setExpected(actDownload, expectedDownloadSize + doneDownloadSize);
|
||||
act.setExpected(actFileTransfer, expectedDownloadSize + doneDownloadSize);
|
||||
act.setExpected(actCopyPath, expectedNarSize + doneNarSize);
|
||||
}
|
||||
};
|
||||
|
@ -506,9 +507,10 @@ private:
|
|||
Path fnUserLock;
|
||||
AutoCloseFD fdUserLock;
|
||||
|
||||
bool isEnabled = false;
|
||||
string user;
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
uid_t uid = 0;
|
||||
gid_t gid = 0;
|
||||
std::vector<gid_t> supplementaryGIDs;
|
||||
|
||||
public:
|
||||
|
@ -521,7 +523,9 @@ public:
|
|||
uid_t getGID() { assert(gid); return gid; }
|
||||
std::vector<gid_t> getSupplementaryGIDs() { return supplementaryGIDs; }
|
||||
|
||||
bool enabled() { return uid != 0; }
|
||||
bool findFreeUser();
|
||||
|
||||
bool enabled() { return isEnabled; }
|
||||
|
||||
};
|
||||
|
||||
|
@ -529,6 +533,11 @@ public:
|
|||
UserLock::UserLock()
|
||||
{
|
||||
assert(settings.buildUsersGroup != "");
|
||||
createDirs(settings.nixStateDir + "/userpool");
|
||||
}
|
||||
|
||||
bool UserLock::findFreeUser() {
|
||||
if (enabled()) return true;
|
||||
|
||||
/* Get the members of the build-users-group. */
|
||||
struct group * gr = getgrnam(settings.buildUsersGroup.get().c_str());
|
||||
|
@ -558,7 +567,6 @@ UserLock::UserLock()
|
|||
throw Error(format("the user '%1%' in the group '%2%' does not exist")
|
||||
% i % settings.buildUsersGroup);
|
||||
|
||||
createDirs(settings.nixStateDir + "/userpool");
|
||||
|
||||
fnUserLock = (format("%1%/userpool/%2%") % settings.nixStateDir % pw->pw_uid).str();
|
||||
|
||||
|
@ -589,16 +597,13 @@ UserLock::UserLock()
|
|||
supplementaryGIDs.resize(ngroups);
|
||||
#endif
|
||||
|
||||
return;
|
||||
isEnabled = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
throw Error(format("all build users are currently in use; "
|
||||
"consider creating additional users and adding them to the '%1%' group")
|
||||
% settings.buildUsersGroup);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void UserLock::kill()
|
||||
{
|
||||
killUser(uid);
|
||||
|
@ -927,6 +932,7 @@ private:
|
|||
void closureRepaired();
|
||||
void inputsRealised();
|
||||
void tryToBuild();
|
||||
void tryLocalBuild();
|
||||
void buildDone();
|
||||
|
||||
/* Is the build hook willing to perform the build? */
|
||||
|
@ -998,6 +1004,8 @@ private:
|
|||
Goal::amDone(result);
|
||||
}
|
||||
|
||||
void started();
|
||||
|
||||
void done(BuildResult::Status status, const string & msg = "");
|
||||
|
||||
StorePathSet exportReferences(const StorePathSet & storePaths);
|
||||
|
@ -1385,6 +1393,19 @@ void DerivationGoal::inputsRealised()
|
|||
result = BuildResult();
|
||||
}
|
||||
|
||||
void DerivationGoal::started() {
|
||||
auto msg = fmt(
|
||||
buildMode == bmRepair ? "repairing outputs of '%s'" :
|
||||
buildMode == bmCheck ? "checking outputs of '%s'" :
|
||||
nrRounds > 1 ? "building '%s' (round %d/%d)" :
|
||||
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
|
||||
fmt("building '%s'", worker.store.printStorePath(drvPath));
|
||||
if (hook) msg += fmt(" on '%s'", machineName);
|
||||
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
|
||||
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
|
||||
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
|
||||
worker.updateProgress();
|
||||
}
|
||||
|
||||
void DerivationGoal::tryToBuild()
|
||||
{
|
||||
|
@ -1397,7 +1418,7 @@ void DerivationGoal::tryToBuild()
|
|||
few seconds and then retry this goal. */
|
||||
PathSet lockFiles;
|
||||
for (auto & outPath : drv->outputPaths())
|
||||
lockFiles.insert(worker.store.toRealPath(worker.store.printStorePath(outPath)));
|
||||
lockFiles.insert(worker.store.Store::toRealPath(outPath));
|
||||
|
||||
if (!outputLocks.lockPaths(lockFiles, "", false)) {
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
|
@ -1428,7 +1449,7 @@ void DerivationGoal::tryToBuild()
|
|||
for (auto & i : drv->outputs) {
|
||||
if (worker.store.isValidPath(i.second.path)) continue;
|
||||
debug("removing invalid path '%s'", worker.store.printStorePath(i.second.path));
|
||||
deletePath(worker.store.toRealPath(worker.store.printStorePath(i.second.path)));
|
||||
deletePath(worker.store.Store::toRealPath(i.second.path));
|
||||
}
|
||||
|
||||
/* Don't do a remote build if the derivation has the attribute
|
||||
|
@ -1436,20 +1457,6 @@ void DerivationGoal::tryToBuild()
|
|||
supported for local builds. */
|
||||
bool buildLocally = buildMode != bmNormal || parsedDrv->willBuildLocally();
|
||||
|
||||
auto started = [&]() {
|
||||
auto msg = fmt(
|
||||
buildMode == bmRepair ? "repairing outputs of '%s'" :
|
||||
buildMode == bmCheck ? "checking outputs of '%s'" :
|
||||
nrRounds > 1 ? "building '%s' (round %d/%d)" :
|
||||
"building '%s'", worker.store.printStorePath(drvPath), curRound, nrRounds);
|
||||
fmt("building '%s'", worker.store.printStorePath(drvPath));
|
||||
if (hook) msg += fmt(" on '%s'", machineName);
|
||||
act = std::make_unique<Activity>(*logger, lvlInfo, actBuild, msg,
|
||||
Logger::Fields{worker.store.printStorePath(drvPath), hook ? machineName : "", curRound, nrRounds});
|
||||
mcRunningBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds);
|
||||
worker.updateProgress();
|
||||
};
|
||||
|
||||
/* Is the build hook willing to accept this job? */
|
||||
if (!buildLocally) {
|
||||
switch (tryBuildHook()) {
|
||||
|
@ -1482,6 +1489,34 @@ void DerivationGoal::tryToBuild()
|
|||
return;
|
||||
}
|
||||
|
||||
state = &DerivationGoal::tryLocalBuild;
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
void DerivationGoal::tryLocalBuild() {
|
||||
|
||||
/* If `build-users-group' is not empty, then we have to build as
|
||||
one of the members of that group. */
|
||||
if (settings.buildUsersGroup != "" && getuid() == 0) {
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
if (!buildUser) buildUser = std::make_unique<UserLock>();
|
||||
|
||||
if (buildUser->findFreeUser()) {
|
||||
/* Make sure that no other processes are executing under this
|
||||
uid. */
|
||||
buildUser->kill();
|
||||
} else {
|
||||
debug("waiting for build users");
|
||||
worker.waitForAWhile(shared_from_this());
|
||||
return;
|
||||
}
|
||||
#else
|
||||
/* Don't know how to block the creation of setuid/setgid
|
||||
binaries on this platform. */
|
||||
throw Error("build users are not supported on this platform for security reasons");
|
||||
#endif
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
/* Okay, we have to build. */
|
||||
|
@ -1679,13 +1714,14 @@ void DerivationGoal::buildDone()
|
|||
}
|
||||
|
||||
if (buildMode == bmCheck) {
|
||||
deleteTmpDir(true);
|
||||
done(BuildResult::Built);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Delete unused redirected outputs (when doing hash rewriting). */
|
||||
for (auto & i : redirectedOutputs)
|
||||
deletePath(worker.store.toRealPath(worker.store.printStorePath(i.second)));
|
||||
deletePath(worker.store.Store::toRealPath(i.second));
|
||||
|
||||
/* Delete the chroot (if we were using one). */
|
||||
autoDelChroot.reset(); /* this runs the destructor */
|
||||
|
@ -1904,7 +1940,7 @@ void DerivationGoal::startBuilder()
|
|||
concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()),
|
||||
worker.store.printStorePath(drvPath),
|
||||
settings.thisSystem,
|
||||
concatStringsSep(", ", settings.systemFeatures));
|
||||
concatStringsSep<StringSet>(", ", settings.systemFeatures));
|
||||
|
||||
if (drv->isBuiltin())
|
||||
preloadNSS();
|
||||
|
@ -1941,22 +1977,6 @@ void DerivationGoal::startBuilder()
|
|||
#endif
|
||||
}
|
||||
|
||||
/* If `build-users-group' is not empty, then we have to build as
|
||||
one of the members of that group. */
|
||||
if (settings.buildUsersGroup != "" && getuid() == 0) {
|
||||
#if defined(__linux__) || defined(__APPLE__)
|
||||
buildUser = std::make_unique<UserLock>();
|
||||
|
||||
/* Make sure that no other processes are executing under this
|
||||
uid. */
|
||||
buildUser->kill();
|
||||
#else
|
||||
/* Don't know how to block the creation of setuid/setgid
|
||||
binaries on this platform. */
|
||||
throw Error("build users are not supported on this platform for security reasons");
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Create a temporary directory where the build will take
|
||||
place. */
|
||||
tmpDir = createTempDir("", "nix-build-" + std::string(drvPath.name()), false, false, 0700);
|
||||
|
@ -2071,7 +2091,7 @@ void DerivationGoal::startBuilder()
|
|||
environment using bind-mounts. We put it in the Nix store
|
||||
to ensure that we can create hard-links to non-directory
|
||||
inputs in the fake Nix store in the chroot (see below). */
|
||||
chrootRootDir = worker.store.toRealPath(worker.store.printStorePath(drvPath)) + ".chroot";
|
||||
chrootRootDir = worker.store.Store::toRealPath(drvPath) + ".chroot";
|
||||
deletePath(chrootRootDir);
|
||||
|
||||
/* Clean up the chroot directory automatically. */
|
||||
|
@ -2160,7 +2180,7 @@ void DerivationGoal::startBuilder()
|
|||
if (needsHashRewrite()) {
|
||||
|
||||
if (pathExists(homeDir))
|
||||
throw Error(format("directory '%1%' exists; please remove it") % homeDir);
|
||||
throw Error(format("home directory '%1%' exists; please remove it to assure purity of builds without sandboxing") % homeDir);
|
||||
|
||||
/* We're not doing a chroot build, but we have some valid
|
||||
output paths. Since we can't just overwrite or delete
|
||||
|
@ -2248,10 +2268,13 @@ void DerivationGoal::startBuilder()
|
|||
|
||||
if (chown(slaveName.c_str(), buildUser->getUID(), 0))
|
||||
throw SysError("changing owner of pseudoterminal slave");
|
||||
} else {
|
||||
}
|
||||
#if __APPLE__
|
||||
else {
|
||||
if (grantpt(builderOut.readSide.get()))
|
||||
throw SysError("granting access to pseudoterminal slave");
|
||||
}
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
// Mount the pt in the sandbox so that the "tty" command works.
|
||||
|
@ -2464,7 +2487,7 @@ void DerivationGoal::initTmpDir() {
|
|||
auto hash = hashString(htSHA256, i.first);
|
||||
string fn = ".attr-" + hash.to_string(Base32, false);
|
||||
Path p = tmpDir + "/" + fn;
|
||||
writeFile(p, i.second);
|
||||
writeFile(p, rewriteStrings(i.second, inputRewrites));
|
||||
chownToBuilder(p);
|
||||
env[i.first + "Path"] = tmpDirInSandbox + "/" + fn;
|
||||
}
|
||||
|
@ -2550,7 +2573,7 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
|
|||
|
||||
void DerivationGoal::writeStructuredAttrs()
|
||||
{
|
||||
auto & structuredAttrs = parsedDrv->getStructuredAttrs();
|
||||
auto structuredAttrs = parsedDrv->getStructuredAttrs();
|
||||
if (!structuredAttrs) return;
|
||||
|
||||
auto json = *structuredAttrs;
|
||||
|
@ -2711,7 +2734,7 @@ struct RestrictedStore : public LocalFSStore
|
|||
{ throw Error("queryPathFromHashPart"); }
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override
|
||||
{ throw Error("addToStore"); }
|
||||
|
||||
|
@ -2724,9 +2747,9 @@ struct RestrictedStore : public LocalFSStore
|
|||
}
|
||||
|
||||
StorePath addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override
|
||||
{
|
||||
auto path = next->addToStoreFromDump(dump, name, recursive, hashAlgo, repair);
|
||||
auto path = next->addToStoreFromDump(dump, name, method, hashAlgo, repair);
|
||||
goal.addDependency(path);
|
||||
return path;
|
||||
}
|
||||
|
@ -2916,7 +2939,7 @@ void DerivationGoal::addDependency(const StorePath & path)
|
|||
|
||||
#if __linux__
|
||||
|
||||
Path source = worker.store.toRealPath(worker.store.printStorePath(path));
|
||||
Path source = worker.store.Store::toRealPath(path);
|
||||
Path target = chrootRootDir + worker.store.printStorePath(path);
|
||||
debug("bind-mounting %s -> %s", target, source);
|
||||
|
||||
|
@ -3150,7 +3173,7 @@ void DerivationGoal::runChild()
|
|||
// Only use nss functions to resolve hosts and
|
||||
// services. Don’t use it for anything else that may
|
||||
// be configured for this system. This limits the
|
||||
// potential impurities introduced in fixed outputs.
|
||||
// potential impurities introduced in fixed-outputs.
|
||||
writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n");
|
||||
|
||||
ss.push_back("/etc/services");
|
||||
|
@ -3535,6 +3558,29 @@ StorePathSet parseReferenceSpecifiers(Store & store, const BasicDerivation & drv
|
|||
}
|
||||
|
||||
|
||||
static void moveCheckToStore(const Path & src, const Path & dst)
|
||||
{
|
||||
/* For the rename of directory to succeed, we must be running as root or
|
||||
the directory must be made temporarily writable (to update the
|
||||
directory's parent link ".."). */
|
||||
struct stat st;
|
||||
if (lstat(src.c_str(), &st) == -1) {
|
||||
throw SysError(format("getting attributes of path '%1%'") % src);
|
||||
}
|
||||
|
||||
bool changePerm = (geteuid() && S_ISDIR(st.st_mode) && !(st.st_mode & S_IWUSR));
|
||||
|
||||
if (changePerm)
|
||||
chmod_(src, st.st_mode | S_IWUSR);
|
||||
|
||||
if (rename(src.c_str(), dst.c_str()))
|
||||
throw SysError(format("renaming '%1%' to '%2%'") % src % dst);
|
||||
|
||||
if (changePerm)
|
||||
chmod_(dst, st.st_mode);
|
||||
}
|
||||
|
||||
|
||||
void DerivationGoal::registerOutputs()
|
||||
{
|
||||
/* When using a build hook, the build hook can register the output
|
||||
|
@ -3578,7 +3624,7 @@ void DerivationGoal::registerOutputs()
|
|||
if (needsHashRewrite()) {
|
||||
auto r = redirectedOutputs.find(i.second.path);
|
||||
if (r != redirectedOutputs.end()) {
|
||||
auto redirected = worker.store.toRealPath(worker.store.printStorePath(r->second));
|
||||
auto redirected = worker.store.Store::toRealPath(r->second);
|
||||
if (buildMode == bmRepair
|
||||
&& redirectedBadOutputs.count(i.second.path)
|
||||
&& pathExists(redirected))
|
||||
|
@ -3646,21 +3692,24 @@ void DerivationGoal::registerOutputs()
|
|||
|
||||
if (i.second.hashAlgo != "") {
|
||||
|
||||
bool recursive; HashType ht;
|
||||
i.second.parseHashType(recursive, ht);
|
||||
FileIngestionMethod outputHashMode; HashType ht;
|
||||
i.second.parseHashType(outputHashMode, ht);
|
||||
|
||||
if (!recursive) {
|
||||
if (outputHashMode == FileIngestionMethod::Flat) {
|
||||
/* The output path should be a regular file without execute permission. */
|
||||
if (!S_ISREG(st.st_mode) || (st.st_mode & S_IXUSR) != 0)
|
||||
throw BuildError(
|
||||
format("output path '%1%' should be a non-executable regular file") % path);
|
||||
format("output path '%1%' should be a non-executable regular file "
|
||||
"since recursive hashing is not enabled (outputHashMode=flat)") % path);
|
||||
}
|
||||
|
||||
/* Check the hash. In hash mode, move the path produced by
|
||||
the derivation to its content-addressed location. */
|
||||
Hash h2 = recursive ? hashPath(ht, actualPath).first : hashFile(ht, actualPath);
|
||||
Hash h2 = outputHashMode == FileIngestionMethod::Recursive
|
||||
? hashPath(ht, actualPath).first
|
||||
: hashFile(ht, actualPath);
|
||||
|
||||
auto dest = worker.store.makeFixedOutputPath(recursive, h2, i.second.path.name());
|
||||
auto dest = worker.store.makeFixedOutputPath(outputHashMode, h2, i.second.path.name());
|
||||
|
||||
// true if either floating CA, or incorrect fixed hash.
|
||||
bool needsMove = true;
|
||||
|
@ -3682,7 +3731,7 @@ void DerivationGoal::registerOutputs()
|
|||
}
|
||||
|
||||
if (needsMove) {
|
||||
Path actualDest = worker.store.toRealPath(worker.store.printStorePath(dest));
|
||||
Path actualDest = worker.store.Store::toRealPath(dest);
|
||||
|
||||
if (worker.store.isValidPath(dest))
|
||||
std::rethrow_exception(delayedException);
|
||||
|
@ -3700,7 +3749,7 @@ void DerivationGoal::registerOutputs()
|
|||
else
|
||||
assert(worker.store.parseStorePath(path) == dest);
|
||||
|
||||
ca = makeFixedOutputCA(recursive, h2);
|
||||
ca = makeFixedOutputCA(outputHashMode, h2);
|
||||
}
|
||||
|
||||
/* Get rid of all weird permissions. This also checks that
|
||||
|
@ -3724,8 +3773,7 @@ void DerivationGoal::registerOutputs()
|
|||
if (settings.runDiffHook || settings.keepFailed) {
|
||||
Path dst = worker.store.toRealPath(path + checkSuffix);
|
||||
deletePath(dst);
|
||||
if (rename(actualPath.c_str(), dst.c_str()))
|
||||
throw SysError(format("renaming '%1%' to '%2%'") % actualPath % dst);
|
||||
moveCheckToStore(actualPath, dst);
|
||||
|
||||
handleDiffHook(
|
||||
buildUser ? buildUser->getUID() : getuid(),
|
||||
|
@ -3733,10 +3781,10 @@ void DerivationGoal::registerOutputs()
|
|||
path, dst, worker.store.printStorePath(drvPath), tmpDir);
|
||||
|
||||
throw NotDeterministic("derivation '%s' may not be deterministic: output '%s' differs from '%s'",
|
||||
worker.store.printStorePath(drvPath), path, dst);
|
||||
worker.store.printStorePath(drvPath), worker.store.toRealPath(path), dst);
|
||||
} else
|
||||
throw NotDeterministic("derivation '%s' may not be deterministic: output '%s' differs",
|
||||
worker.store.printStorePath(drvPath), path);
|
||||
worker.store.printStorePath(drvPath), worker.store.toRealPath(path));
|
||||
}
|
||||
|
||||
/* Since we verified the build, it's now ultimately trusted. */
|
||||
|
@ -3922,7 +3970,9 @@ void DerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
|
|||
|
||||
auto spec = parseReferenceSpecifiers(worker.store, *drv, *value);
|
||||
|
||||
auto used = recursive ? cloneStorePathSet(getClosure(info.path).first) : cloneStorePathSet(info.references);
|
||||
auto used = recursive
|
||||
? cloneStorePathSet(getClosure(info.path).first)
|
||||
: cloneStorePathSet(info.references);
|
||||
|
||||
if (recursive && checks.ignoreSelfRefs)
|
||||
used.erase(info.path);
|
||||
|
@ -4776,8 +4826,7 @@ void Worker::waitForInput()
|
|||
terminated. */
|
||||
|
||||
bool useTimeout = false;
|
||||
struct timeval timeout;
|
||||
timeout.tv_usec = 0;
|
||||
long timeout = 0;
|
||||
auto before = steady_time_point::clock::now();
|
||||
|
||||
/* If we're monitoring for silence on stdout/stderr, or if there
|
||||
|
@ -4795,7 +4844,7 @@ void Worker::waitForInput()
|
|||
nearest = std::min(nearest, i.timeStarted + std::chrono::seconds(settings.buildTimeout));
|
||||
}
|
||||
if (nearest != steady_time_point::max()) {
|
||||
timeout.tv_sec = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
|
||||
timeout = std::max(1L, (long) std::chrono::duration_cast<std::chrono::seconds>(nearest - before).count());
|
||||
useTimeout = true;
|
||||
}
|
||||
|
||||
|
@ -4804,32 +4853,30 @@ void Worker::waitForInput()
|
|||
if (!waitingForAWhile.empty()) {
|
||||
useTimeout = true;
|
||||
if (lastWokenUp == steady_time_point::min())
|
||||
printError("waiting for locks or build slots...");
|
||||
printError("waiting for locks, build slots or build users...");
|
||||
if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) lastWokenUp = before;
|
||||
timeout.tv_sec = std::max(1L,
|
||||
timeout = std::max(1L,
|
||||
(long) std::chrono::duration_cast<std::chrono::seconds>(
|
||||
lastWokenUp + std::chrono::seconds(settings.pollInterval) - before).count());
|
||||
} else lastWokenUp = steady_time_point::min();
|
||||
|
||||
if (useTimeout)
|
||||
vomit("sleeping %d seconds", timeout.tv_sec);
|
||||
vomit("sleeping %d seconds", timeout);
|
||||
|
||||
/* Use select() to wait for the input side of any logger pipe to
|
||||
become `available'. Note that `available' (i.e., non-blocking)
|
||||
includes EOF. */
|
||||
fd_set fds;
|
||||
FD_ZERO(&fds);
|
||||
int fdMax = 0;
|
||||
std::vector<struct pollfd> pollStatus;
|
||||
std::map <int, int> fdToPollStatus;
|
||||
for (auto & i : children) {
|
||||
for (auto & j : i.fds) {
|
||||
if (j >= FD_SETSIZE)
|
||||
throw Error("reached FD_SETSIZE limit");
|
||||
FD_SET(j, &fds);
|
||||
if (j >= fdMax) fdMax = j + 1;
|
||||
pollStatus.push_back((struct pollfd) { .fd = j, .events = POLLIN });
|
||||
fdToPollStatus[j] = pollStatus.size() - 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (select(fdMax, &fds, 0, 0, useTimeout ? &timeout : 0) == -1) {
|
||||
if (poll(pollStatus.data(), pollStatus.size(),
|
||||
useTimeout ? timeout * 1000 : -1) == -1) {
|
||||
if (errno == EINTR) return;
|
||||
throw SysError("waiting for input");
|
||||
}
|
||||
|
@ -4850,7 +4897,7 @@ void Worker::waitForInput()
|
|||
set<int> fds2(j->fds);
|
||||
std::vector<unsigned char> buffer(4096);
|
||||
for (auto & k : fds2) {
|
||||
if (FD_ISSET(k, &fds)) {
|
||||
if (pollStatus.at(fdToPollStatus.at(k)).revents) {
|
||||
ssize_t rd = read(k, buffer.data(), buffer.size());
|
||||
// FIXME: is there a cleaner way to handle pt close
|
||||
// than EIO? Is this even standard?
|
||||
|
|
|
@ -6,7 +6,6 @@ namespace nix {
|
|||
|
||||
// TODO: make pluggable.
|
||||
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
|
||||
void builtinBuildenv(const BasicDerivation & drv);
|
||||
void builtinUnpackChannel(const BasicDerivation & drv);
|
||||
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include "builtins.hh"
|
||||
#include "buildenv.hh"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
@ -7,16 +7,14 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
typedef std::map<Path,int> Priorities;
|
||||
|
||||
// FIXME: change into local variables.
|
||||
|
||||
static Priorities priorities;
|
||||
|
||||
static unsigned long symlinks;
|
||||
struct State
|
||||
{
|
||||
std::map<Path, int> priorities;
|
||||
unsigned long symlinks = 0;
|
||||
};
|
||||
|
||||
/* For each activated package, create symlinks */
|
||||
static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
|
||||
static void createLinks(State & state, const Path & srcDir, const Path & dstDir, int priority)
|
||||
{
|
||||
DirEntries srcFiles;
|
||||
|
||||
|
@ -67,7 +65,7 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
|
|||
auto res = lstat(dstFile.c_str(), &dstSt);
|
||||
if (res == 0) {
|
||||
if (S_ISDIR(dstSt.st_mode)) {
|
||||
createLinks(srcFile, dstFile, priority);
|
||||
createLinks(state, srcFile, dstFile, priority);
|
||||
continue;
|
||||
} else if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto target = canonPath(dstFile, true);
|
||||
|
@ -77,8 +75,8 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
|
|||
throw SysError(format("unlinking '%1%'") % dstFile);
|
||||
if (mkdir(dstFile.c_str(), 0755) == -1)
|
||||
throw SysError(format("creating directory '%1%'"));
|
||||
createLinks(target, dstFile, priorities[dstFile]);
|
||||
createLinks(srcFile, dstFile, priority);
|
||||
createLinks(state, target, dstFile, state.priorities[dstFile]);
|
||||
createLinks(state, srcFile, dstFile, priority);
|
||||
continue;
|
||||
}
|
||||
} else if (errno != ENOENT)
|
||||
|
@ -90,7 +88,7 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
|
|||
auto res = lstat(dstFile.c_str(), &dstSt);
|
||||
if (res == 0) {
|
||||
if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto prevPriority = priorities[dstFile];
|
||||
auto prevPriority = state.priorities[dstFile];
|
||||
if (prevPriority == priority)
|
||||
throw Error(
|
||||
"packages '%1%' and '%2%' have the same priority %3%; "
|
||||
|
@ -109,67 +107,30 @@ static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
|
|||
}
|
||||
|
||||
createSymlink(srcFile, dstFile);
|
||||
priorities[dstFile] = priority;
|
||||
symlinks++;
|
||||
state.priorities[dstFile] = priority;
|
||||
state.symlinks++;
|
||||
}
|
||||
}
|
||||
|
||||
typedef std::set<Path> FileProp;
|
||||
|
||||
static FileProp done;
|
||||
static FileProp postponed = FileProp{};
|
||||
|
||||
static Path out;
|
||||
|
||||
static void addPkg(const Path & pkgDir, int priority)
|
||||
void buildProfile(const Path & out, Packages && pkgs)
|
||||
{
|
||||
if (!done.insert(pkgDir).second) return;
|
||||
createLinks(pkgDir, out, priority);
|
||||
State state;
|
||||
|
||||
try {
|
||||
for (const auto & p : tokenizeString<std::vector<string>>(
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
|
||||
if (!done.count(p))
|
||||
postponed.insert(p);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
|
||||
}
|
||||
}
|
||||
std::set<Path> done, postponed;
|
||||
|
||||
struct Package {
|
||||
Path path;
|
||||
bool active;
|
||||
int priority;
|
||||
Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
};
|
||||
auto addPkg = [&](const Path & pkgDir, int priority) {
|
||||
if (!done.insert(pkgDir).second) return;
|
||||
createLinks(state, pkgDir, out, priority);
|
||||
|
||||
typedef std::vector<Package> Packages;
|
||||
|
||||
void builtinBuildenv(const BasicDerivation & drv)
|
||||
{
|
||||
auto getAttr = [&](const string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
out = getAttr("out");
|
||||
createDirs(out);
|
||||
|
||||
/* Convert the stuff we get from the environment back into a
|
||||
* coherent data type. */
|
||||
Packages pkgs;
|
||||
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
|
||||
while (!derivations.empty()) {
|
||||
/* !!! We're trusting the caller to structure derivations env var correctly */
|
||||
auto active = derivations.front(); derivations.pop_front();
|
||||
auto priority = stoi(derivations.front()); derivations.pop_front();
|
||||
auto outputs = stoi(derivations.front()); derivations.pop_front();
|
||||
for (auto n = 0; n < outputs; n++) {
|
||||
auto path = derivations.front(); derivations.pop_front();
|
||||
pkgs.emplace_back(path, active != "false", priority);
|
||||
try {
|
||||
for (const auto & p : tokenizeString<std::vector<string>>(
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
|
||||
if (!done.count(p))
|
||||
postponed.insert(p);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Symlink to the packages that have been installed explicitly by the
|
||||
* user. Process in priority order to reduce unnecessary
|
||||
|
@ -189,13 +150,42 @@ void builtinBuildenv(const BasicDerivation & drv)
|
|||
*/
|
||||
auto priorityCounter = 1000;
|
||||
while (!postponed.empty()) {
|
||||
auto pkgDirs = postponed;
|
||||
postponed = FileProp{};
|
||||
std::set<Path> pkgDirs;
|
||||
postponed.swap(pkgDirs);
|
||||
for (const auto & pkgDir : pkgDirs)
|
||||
addPkg(pkgDir, priorityCounter++);
|
||||
}
|
||||
|
||||
printError("created %d symlinks in user environment", symlinks);
|
||||
debug("created %d symlinks in user environment", state.symlinks);
|
||||
}
|
||||
|
||||
void builtinBuildenv(const BasicDerivation & drv)
|
||||
{
|
||||
auto getAttr = [&](const string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
Path out = getAttr("out");
|
||||
createDirs(out);
|
||||
|
||||
/* Convert the stuff we get from the environment back into a
|
||||
* coherent data type. */
|
||||
Packages pkgs;
|
||||
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
|
||||
while (!derivations.empty()) {
|
||||
/* !!! We're trusting the caller to structure derivations env var correctly */
|
||||
auto active = derivations.front(); derivations.pop_front();
|
||||
auto priority = stoi(derivations.front()); derivations.pop_front();
|
||||
auto outputs = stoi(derivations.front()); derivations.pop_front();
|
||||
for (auto n = 0; n < outputs; n++) {
|
||||
auto path = derivations.front(); derivations.pop_front();
|
||||
pkgs.emplace_back(path, active != "false", priority);
|
||||
}
|
||||
}
|
||||
|
||||
buildProfile(out, std::move(pkgs));
|
||||
|
||||
createSymlink(getAttr("manifest"), out + "/manifest.nix");
|
||||
}
|
||||
|
|
21
src/libstore/builtins/buildenv.hh
Normal file
21
src/libstore/builtins/buildenv.hh
Normal file
|
@ -0,0 +1,21 @@
|
|||
#pragma once
|
||||
|
||||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct Package {
|
||||
Path path;
|
||||
bool active;
|
||||
int priority;
|
||||
Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
};
|
||||
|
||||
typedef std::vector<Package> Packages;
|
||||
|
||||
void buildProfile(const Path & out, Packages && pkgs);
|
||||
|
||||
void builtinBuildenv(const BasicDerivation & drv);
|
||||
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
#include "builtins.hh"
|
||||
#include "download.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "compression.hh"
|
||||
|
@ -26,9 +26,9 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
auto mainUrl = getAttr("url");
|
||||
bool unpack = get(drv.env, "unpack").value_or("") == "1";
|
||||
|
||||
/* Note: have to use a fresh downloader here because we're in
|
||||
/* Note: have to use a fresh fileTransfer here because we're in
|
||||
a forked process. */
|
||||
auto downloader = makeDownloader();
|
||||
auto fileTransfer = makeFileTransfer();
|
||||
|
||||
auto fetch = [&](const std::string & url) {
|
||||
|
||||
|
@ -36,13 +36,13 @@ void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
|||
|
||||
/* No need to do TLS verification, because we check the hash of
|
||||
the result anyway. */
|
||||
DownloadRequest request(url);
|
||||
FileTransferRequest request(url);
|
||||
request.verifyTLS = false;
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
|
||||
downloader->download(std::move(request), *decompressor);
|
||||
fileTransfer->download(std::move(request), *decompressor);
|
||||
decompressor->finish();
|
||||
});
|
||||
|
||||
|
|
|
@ -355,20 +355,24 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
}
|
||||
|
||||
case wopAddToStore: {
|
||||
bool fixed, recursive;
|
||||
std::string s, baseName;
|
||||
from >> baseName >> fixed /* obsolete */ >> recursive >> s;
|
||||
/* Compatibility hack. */
|
||||
if (!fixed) {
|
||||
s = "sha256";
|
||||
recursive = true;
|
||||
FileIngestionMethod method;
|
||||
{
|
||||
bool fixed, recursive;
|
||||
from >> baseName >> fixed /* obsolete */ >> recursive >> s;
|
||||
method = FileIngestionMethod { recursive };
|
||||
/* Compatibility hack. */
|
||||
if (!fixed) {
|
||||
s = "sha256";
|
||||
method = FileIngestionMethod::Recursive;
|
||||
}
|
||||
}
|
||||
HashType hashAlgo = parseHashType(s);
|
||||
|
||||
TeeSource savedNAR(from);
|
||||
RetrieveRegularNARSink savedRegular;
|
||||
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
/* Get the entire NAR dump from the client and save it to
|
||||
a string so that we can pass it to
|
||||
addToStoreFromDump(). */
|
||||
|
@ -380,7 +384,11 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
|
|||
logger->startWork();
|
||||
if (!savedRegular.regular) throw Error("regular file expected");
|
||||
|
||||
auto path = store->addToStoreFromDump(recursive ? *savedNAR.data : savedRegular.s, baseName, recursive, hashAlgo);
|
||||
auto path = store->addToStoreFromDump(
|
||||
method == FileIngestionMethod::Recursive ? *savedNAR.data : savedRegular.s,
|
||||
baseName,
|
||||
method,
|
||||
hashAlgo);
|
||||
logger->stopWork();
|
||||
|
||||
to << store->printStorePath(path);
|
||||
|
|
|
@ -13,13 +13,13 @@ HashType parseHashAlgo(const string & s) {
|
|||
return parseHashType(s);
|
||||
}
|
||||
|
||||
void DerivationOutput::parseHashType(bool & recursive, HashType & hashType) const
|
||||
void DerivationOutput::parseHashType(FileIngestionMethod & recursive, HashType & hashType) const
|
||||
{
|
||||
recursive = false;
|
||||
recursive = FileIngestionMethod::Flat;
|
||||
string algo = hashAlgo;
|
||||
|
||||
if (string(algo, 0, 2) == "r:") {
|
||||
recursive = true;
|
||||
recursive = FileIngestionMethod::Recursive;
|
||||
algo = string(algo, 2);
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@ void DerivationOutput::parseHashType(bool & recursive, HashType & hashType) cons
|
|||
hashType = hashType_loc;
|
||||
}
|
||||
|
||||
void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
|
||||
void DerivationOutput::parseHashInfo(FileIngestionMethod & recursive, Hash & hash) const
|
||||
{
|
||||
HashType hashType;
|
||||
parseHashType(recursive, hashType);
|
||||
|
@ -75,7 +75,7 @@ bool BasicDerivation::isBuiltin() const
|
|||
|
||||
|
||||
StorePath writeDerivation(ref<Store> store,
|
||||
const Derivation & drv, const string & name, RepairFlag repair)
|
||||
const Derivation & drv, std::string_view name, RepairFlag repair)
|
||||
{
|
||||
auto references = cloneStorePathSet(drv.inputSrcs);
|
||||
for (auto & i : drv.inputDrvs)
|
||||
|
@ -83,8 +83,8 @@ StorePath writeDerivation(ref<Store> store,
|
|||
/* Note that the outputs of a derivation are *not* references
|
||||
(that can be missing (of course) and should not necessarily be
|
||||
held during a garbage collection). */
|
||||
string suffix = name + drvExtension;
|
||||
string contents = drv.unparse(*store, false);
|
||||
auto suffix = std::string(name) + drvExtension;
|
||||
auto contents = drv.unparse(*store, false);
|
||||
return settings.readOnlyMode
|
||||
? store->computeStorePathForText(suffix, contents, references)
|
||||
: store->addTextToStore(suffix, contents, references, repair);
|
||||
|
@ -409,7 +409,7 @@ Hash hashDerivationModulo(Store & store, const Derivation & drv, bool maskOutput
|
|||
if (h == drvHashes.end()) {
|
||||
assert(store.isValidPath(i.first));
|
||||
h = drvHashes.insert_or_assign(i.first.clone(), hashDerivationModulo(store,
|
||||
readDerivation(store, store.toRealPath(store.printStorePath(i.first))), false)).first;
|
||||
readDerivation(store, store.toRealPath(i.first)), false)).first;
|
||||
}
|
||||
inputs2.insert_or_assign(h->second.to_string(Base16, false), i.second);
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ struct DerivationOutput
|
|||
, hashAlgo(std::move(hashAlgo))
|
||||
, hash(std::move(hash))
|
||||
{ }
|
||||
void parseHashType(bool & recursive, HashType & hashType) const;
|
||||
void parseHashInfo(bool & recursive, Hash & hash) const;
|
||||
void parseHashType(FileIngestionMethod & recursive, HashType & hashType) const;
|
||||
void parseHashInfo(FileIngestionMethod & recursive, Hash & hash) const;
|
||||
};
|
||||
|
||||
typedef std::map<string, DerivationOutput> DerivationOutputs;
|
||||
|
@ -95,7 +95,7 @@ class Store;
|
|||
|
||||
/* Write a derivation to the Nix store, and return its path. */
|
||||
StorePath writeDerivation(ref<Store> store,
|
||||
const Derivation & drv, const string & name, RepairFlag repair = NoRepair);
|
||||
const Derivation & drv, std::string_view name, RepairFlag repair = NoRepair);
|
||||
|
||||
/* Read a derivation from a file. */
|
||||
Derivation readDerivation(const Store & store, const Path & drvPath);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include "serialise.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
@ -100,7 +101,9 @@ StorePaths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> acces
|
|||
if (readInt(source) == 1)
|
||||
readString(source);
|
||||
|
||||
addToStore(info, tee.source.data, NoRepair, checkSigs, accessor);
|
||||
// Can't use underlying source, which would have been exhausted
|
||||
auto source = StringSource { *tee.source.data };
|
||||
addToStore(info, source, NoRepair, checkSigs, accessor);
|
||||
|
||||
res.push_back(info.path.clone());
|
||||
}
|
||||
|
|
|
@ -1,14 +1,10 @@
|
|||
#include "download.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
#include "hash.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "s3.hh"
|
||||
#include "compression.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "finally.hh"
|
||||
#include "tarfile.hh"
|
||||
|
||||
#ifdef ENABLE_S3
|
||||
#include <aws/core/client/ClientConfiguration.h>
|
||||
|
@ -31,13 +27,9 @@ using namespace std::string_literals;
|
|||
|
||||
namespace nix {
|
||||
|
||||
DownloadSettings downloadSettings;
|
||||
FileTransferSettings fileTransferSettings;
|
||||
|
||||
static GlobalConfig::Register r1(&downloadSettings);
|
||||
|
||||
CachedDownloadRequest::CachedDownloadRequest(const std::string & uri)
|
||||
: uri(uri), ttl(settings.tarballTtl)
|
||||
{ }
|
||||
static GlobalConfig::Register r1(&fileTransferSettings);
|
||||
|
||||
std::string resolveUri(const std::string & uri)
|
||||
{
|
||||
|
@ -47,21 +39,21 @@ std::string resolveUri(const std::string & uri)
|
|||
return uri;
|
||||
}
|
||||
|
||||
struct CurlDownloader : public Downloader
|
||||
struct curlFileTransfer : public FileTransfer
|
||||
{
|
||||
CURLM * curlm = 0;
|
||||
|
||||
std::random_device rd;
|
||||
std::mt19937 mt19937;
|
||||
|
||||
struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
|
||||
struct TransferItem : public std::enable_shared_from_this<TransferItem>
|
||||
{
|
||||
CurlDownloader & downloader;
|
||||
DownloadRequest request;
|
||||
DownloadResult result;
|
||||
curlFileTransfer & fileTransfer;
|
||||
FileTransferRequest request;
|
||||
FileTransferResult result;
|
||||
Activity act;
|
||||
bool done = false; // whether either the success or failure function has been called
|
||||
Callback<DownloadResult> callback;
|
||||
Callback<FileTransferResult> callback;
|
||||
CURL * req = 0;
|
||||
bool active = false; // whether the handle has been added to the multi object
|
||||
std::string status;
|
||||
|
@ -80,19 +72,26 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
curl_off_t writtenToSink = 0;
|
||||
|
||||
DownloadItem(CurlDownloader & downloader,
|
||||
const DownloadRequest & request,
|
||||
Callback<DownloadResult> && callback)
|
||||
: downloader(downloader)
|
||||
TransferItem(curlFileTransfer & fileTransfer,
|
||||
const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> && callback)
|
||||
: fileTransfer(fileTransfer)
|
||||
, request(request)
|
||||
, act(*logger, lvlTalkative, actDownload,
|
||||
, act(*logger, lvlTalkative, actFileTransfer,
|
||||
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
||||
{request.uri}, request.parentAct)
|
||||
, callback(std::move(callback))
|
||||
, finalSink([this](const unsigned char * data, size_t len) {
|
||||
if (this->request.dataCallback) {
|
||||
writtenToSink += len;
|
||||
this->request.dataCallback((char *) data, len);
|
||||
long httpStatus = 0;
|
||||
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
|
||||
|
||||
/* Only write data to the sink if this is a
|
||||
successful response. */
|
||||
if (httpStatus == 0 || httpStatus == 200 || httpStatus == 201 || httpStatus == 206) {
|
||||
writtenToSink += len;
|
||||
this->request.dataCallback((char *) data, len);
|
||||
}
|
||||
} else
|
||||
this->result.data->append((char *) data, len);
|
||||
})
|
||||
|
@ -103,17 +102,17 @@ struct CurlDownloader : public Downloader
|
|||
requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str());
|
||||
}
|
||||
|
||||
~DownloadItem()
|
||||
~TransferItem()
|
||||
{
|
||||
if (req) {
|
||||
if (active)
|
||||
curl_multi_remove_handle(downloader.curlm, req);
|
||||
curl_multi_remove_handle(fileTransfer.curlm, req);
|
||||
curl_easy_cleanup(req);
|
||||
}
|
||||
if (requestHeaders) curl_slist_free_all(requestHeaders);
|
||||
try {
|
||||
if (!done)
|
||||
fail(DownloadError(Interrupted, format("download of '%s' was interrupted") % request.uri));
|
||||
fail(FileTransferError(Interrupted, format("download of '%s' was interrupted") % request.uri));
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
|
@ -157,7 +156,7 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
|
||||
{
|
||||
return ((DownloadItem *) userp)->writeCallback(contents, size, nmemb);
|
||||
return ((TransferItem *) userp)->writeCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
size_t headerCallback(void * contents, size_t size, size_t nmemb)
|
||||
|
@ -199,7 +198,7 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
|
||||
{
|
||||
return ((DownloadItem *) userp)->headerCallback(contents, size, nmemb);
|
||||
return ((TransferItem *) userp)->headerCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
int progressCallback(double dltotal, double dlnow)
|
||||
|
@ -214,7 +213,7 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
|
||||
{
|
||||
return ((DownloadItem *) userp)->progressCallback(dltotal, dlnow);
|
||||
return ((TransferItem *) userp)->progressCallback(dltotal, dlnow);
|
||||
}
|
||||
|
||||
static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
|
||||
|
@ -238,7 +237,7 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
|
||||
{
|
||||
return ((DownloadItem *) userp)->readCallback(buffer, size, nitems);
|
||||
return ((TransferItem *) userp)->readCallback(buffer, size, nitems);
|
||||
}
|
||||
|
||||
void init()
|
||||
|
@ -249,7 +248,7 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
if (verbosity >= lvlVomit) {
|
||||
curl_easy_setopt(req, CURLOPT_VERBOSE, 1);
|
||||
curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, DownloadItem::debugCallback);
|
||||
curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, TransferItem::debugCallback);
|
||||
}
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
|
||||
|
@ -258,19 +257,19 @@ struct CurlDownloader : public Downloader
|
|||
curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
|
||||
curl_easy_setopt(req, CURLOPT_USERAGENT,
|
||||
("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
|
||||
(downloadSettings.userAgentSuffix != "" ? " " + downloadSettings.userAgentSuffix.get() : "")).c_str());
|
||||
(fileTransferSettings.userAgentSuffix != "" ? " " + fileTransferSettings.userAgentSuffix.get() : "")).c_str());
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00
|
||||
curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x072f00
|
||||
if (downloadSettings.enableHttp2)
|
||||
if (fileTransferSettings.enableHttp2)
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
|
||||
else
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
|
||||
#endif
|
||||
curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, TransferItem::writeCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, DownloadItem::headerCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, TransferItem::headerCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
|
||||
|
@ -298,10 +297,10 @@ struct CurlDownloader : public Downloader
|
|||
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
|
||||
}
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get());
|
||||
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, fileTransferSettings.connectTimeout.get());
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, downloadSettings.stalledDownloadTimeout.get());
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, fileTransferSettings.stalledDownloadTimeout.get());
|
||||
|
||||
/* If no file exist in the specified path, curl continues to work
|
||||
anyway as if netrc support was disabled. */
|
||||
|
@ -390,6 +389,7 @@ struct CurlDownloader : public Downloader
|
|||
case CURLE_SSL_CACERT_BADFILE:
|
||||
case CURLE_TOO_MANY_REDIRECTS:
|
||||
case CURLE_WRITE_ERROR:
|
||||
case CURLE_UNSUPPORTED_PROTOCOL:
|
||||
err = Misc;
|
||||
break;
|
||||
default: // Shut up warnings
|
||||
|
@ -401,14 +401,14 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
auto exc =
|
||||
code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
|
||||
? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
|
||||
? FileTransferError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
|
||||
: httpStatus != 0
|
||||
? DownloadError(err,
|
||||
? FileTransferError(err,
|
||||
fmt("unable to %s '%s': HTTP error %d",
|
||||
request.verb(), request.uri, httpStatus)
|
||||
+ (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
)
|
||||
: DownloadError(err,
|
||||
: FileTransferError(err,
|
||||
fmt("unable to %s '%s': %s (%d)",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code));
|
||||
|
||||
|
@ -422,13 +422,13 @@ struct CurlDownloader : public Downloader
|
|||
|| writtenToSink == 0
|
||||
|| (acceptRanges && encoding.empty())))
|
||||
{
|
||||
int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937));
|
||||
int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(fileTransfer.mt19937));
|
||||
if (writtenToSink)
|
||||
warn("%s; retrying from offset %d in %d ms", exc.what(), writtenToSink, ms);
|
||||
else
|
||||
warn("%s; retrying in %d ms", exc.what(), ms);
|
||||
embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
|
||||
downloader.enqueueItem(shared_from_this());
|
||||
fileTransfer.enqueueItem(shared_from_this());
|
||||
}
|
||||
else
|
||||
fail(exc);
|
||||
|
@ -439,12 +439,12 @@ struct CurlDownloader : public Downloader
|
|||
struct State
|
||||
{
|
||||
struct EmbargoComparator {
|
||||
bool operator() (const std::shared_ptr<DownloadItem> & i1, const std::shared_ptr<DownloadItem> & i2) {
|
||||
bool operator() (const std::shared_ptr<TransferItem> & i1, const std::shared_ptr<TransferItem> & i2) {
|
||||
return i1->embargo > i2->embargo;
|
||||
}
|
||||
};
|
||||
bool quit = false;
|
||||
std::priority_queue<std::shared_ptr<DownloadItem>, std::vector<std::shared_ptr<DownloadItem>>, EmbargoComparator> incoming;
|
||||
std::priority_queue<std::shared_ptr<TransferItem>, std::vector<std::shared_ptr<TransferItem>>, EmbargoComparator> incoming;
|
||||
};
|
||||
|
||||
Sync<State> state_;
|
||||
|
@ -456,7 +456,7 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
std::thread workerThread;
|
||||
|
||||
CurlDownloader()
|
||||
curlFileTransfer()
|
||||
: mt19937(rd())
|
||||
{
|
||||
static std::once_flag globalInit;
|
||||
|
@ -469,7 +469,7 @@ struct CurlDownloader : public Downloader
|
|||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
|
||||
downloadSettings.httpConnections.get());
|
||||
fileTransferSettings.httpConnections.get());
|
||||
#endif
|
||||
|
||||
wakeupPipe.create();
|
||||
|
@ -478,7 +478,7 @@ struct CurlDownloader : public Downloader
|
|||
workerThread = std::thread([&]() { workerThreadEntry(); });
|
||||
}
|
||||
|
||||
~CurlDownloader()
|
||||
~curlFileTransfer()
|
||||
{
|
||||
stopWorkerThread();
|
||||
|
||||
|
@ -504,7 +504,7 @@ struct CurlDownloader : public Downloader
|
|||
stopWorkerThread();
|
||||
});
|
||||
|
||||
std::map<CURL *, std::shared_ptr<DownloadItem>> items;
|
||||
std::map<CURL *, std::shared_ptr<TransferItem>> items;
|
||||
|
||||
bool quit = false;
|
||||
|
||||
|
@ -561,7 +561,7 @@ struct CurlDownloader : public Downloader
|
|||
throw SysError("reading curl wakeup socket");
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<DownloadItem>> incoming;
|
||||
std::vector<std::shared_ptr<TransferItem>> incoming;
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
{
|
||||
|
@ -609,7 +609,7 @@ struct CurlDownloader : public Downloader
|
|||
}
|
||||
}
|
||||
|
||||
void enqueueItem(std::shared_ptr<DownloadItem> item)
|
||||
void enqueueItem(std::shared_ptr<TransferItem> item)
|
||||
{
|
||||
if (item->request.data
|
||||
&& !hasPrefix(item->request.uri, "http://")
|
||||
|
@ -641,8 +641,8 @@ struct CurlDownloader : public Downloader
|
|||
}
|
||||
#endif
|
||||
|
||||
void enqueueDownload(const DownloadRequest & request,
|
||||
Callback<DownloadResult> callback) override
|
||||
void enqueueFileTransfer(const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> callback) override
|
||||
{
|
||||
/* Ugly hack to support s3:// URIs. */
|
||||
if (hasPrefix(request.uri, "s3://")) {
|
||||
|
@ -660,9 +660,9 @@ struct CurlDownloader : public Downloader
|
|||
|
||||
// FIXME: implement ETag
|
||||
auto s3Res = s3Helper.getObject(bucketName, key);
|
||||
DownloadResult res;
|
||||
FileTransferResult res;
|
||||
if (!s3Res.data)
|
||||
throw DownloadError(NotFound, fmt("S3 object '%s' does not exist", request.uri));
|
||||
throw FileTransferError(NotFound, fmt("S3 object '%s' does not exist", request.uri));
|
||||
res.data = s3Res.data;
|
||||
callback(std::move(res));
|
||||
#else
|
||||
|
@ -672,26 +672,26 @@ struct CurlDownloader : public Downloader
|
|||
return;
|
||||
}
|
||||
|
||||
enqueueItem(std::make_shared<DownloadItem>(*this, request, std::move(callback)));
|
||||
enqueueItem(std::make_shared<TransferItem>(*this, request, std::move(callback)));
|
||||
}
|
||||
};
|
||||
|
||||
ref<Downloader> getDownloader()
|
||||
ref<FileTransfer> getFileTransfer()
|
||||
{
|
||||
static ref<Downloader> downloader = makeDownloader();
|
||||
return downloader;
|
||||
static ref<FileTransfer> fileTransfer = makeFileTransfer();
|
||||
return fileTransfer;
|
||||
}
|
||||
|
||||
ref<Downloader> makeDownloader()
|
||||
ref<FileTransfer> makeFileTransfer()
|
||||
{
|
||||
return make_ref<CurlDownloader>();
|
||||
return make_ref<curlFileTransfer>();
|
||||
}
|
||||
|
||||
std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest & request)
|
||||
std::future<FileTransferResult> FileTransfer::enqueueFileTransfer(const FileTransferRequest & request)
|
||||
{
|
||||
auto promise = std::make_shared<std::promise<DownloadResult>>();
|
||||
enqueueDownload(request,
|
||||
{[promise](std::future<DownloadResult> fut) {
|
||||
auto promise = std::make_shared<std::promise<FileTransferResult>>();
|
||||
enqueueFileTransfer(request,
|
||||
{[promise](std::future<FileTransferResult> fut) {
|
||||
try {
|
||||
promise->set_value(fut.get());
|
||||
} catch (...) {
|
||||
|
@ -701,15 +701,21 @@ std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest &
|
|||
return promise->get_future();
|
||||
}
|
||||
|
||||
DownloadResult Downloader::download(const DownloadRequest & request)
|
||||
FileTransferResult FileTransfer::download(const FileTransferRequest & request)
|
||||
{
|
||||
return enqueueDownload(request).get();
|
||||
return enqueueFileTransfer(request).get();
|
||||
}
|
||||
|
||||
void Downloader::download(DownloadRequest && request, Sink & sink)
|
||||
FileTransferResult FileTransfer::upload(const FileTransferRequest & request)
|
||||
{
|
||||
/* Note: this method is the same as download, but helps in readability */
|
||||
return enqueueFileTransfer(request).get();
|
||||
}
|
||||
|
||||
void FileTransfer::download(FileTransferRequest && request, Sink & sink)
|
||||
{
|
||||
/* Note: we can't call 'sink' via request.dataCallback, because
|
||||
that would cause the sink to execute on the downloader
|
||||
that would cause the sink to execute on the fileTransfer
|
||||
thread. If 'sink' is a coroutine, this will fail. Also, if the
|
||||
sink is expensive (e.g. one that does decompression and writing
|
||||
to the Nix store), it would stall the download thread too much.
|
||||
|
@ -755,8 +761,8 @@ void Downloader::download(DownloadRequest && request, Sink & sink)
|
|||
state->avail.notify_one();
|
||||
};
|
||||
|
||||
enqueueDownload(request,
|
||||
{[_state](std::future<DownloadResult> fut) {
|
||||
enqueueFileTransfer(request,
|
||||
{[_state](std::future<FileTransferResult> fut) {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
try {
|
||||
|
@ -801,140 +807,6 @@ void Downloader::download(DownloadRequest && request, Sink & sink)
|
|||
}
|
||||
}
|
||||
|
||||
CachedDownloadResult Downloader::downloadCached(
|
||||
ref<Store> store, const CachedDownloadRequest & request)
|
||||
{
|
||||
auto url = resolveUri(request.uri);
|
||||
|
||||
auto name = request.name;
|
||||
if (name == "") {
|
||||
auto p = url.rfind('/');
|
||||
if (p != string::npos) name = string(url, p + 1);
|
||||
}
|
||||
|
||||
std::optional<StorePath> expectedStorePath;
|
||||
if (request.expectedHash) {
|
||||
expectedStorePath = store->makeFixedOutputPath(request.unpack, request.expectedHash, name);
|
||||
if (store->isValidPath(*expectedStorePath)) {
|
||||
CachedDownloadResult result;
|
||||
result.storePath = store->printStorePath(*expectedStorePath);
|
||||
result.path = store->toRealPath(result.storePath);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
Path cacheDir = getCacheDir() + "/nix/tarballs";
|
||||
createDirs(cacheDir);
|
||||
|
||||
string urlHash = hashString(htSHA256, name + std::string("\0"s) + url).to_string(Base32, false);
|
||||
|
||||
Path dataFile = cacheDir + "/" + urlHash + ".info";
|
||||
Path fileLink = cacheDir + "/" + urlHash + "-file";
|
||||
|
||||
PathLocks lock({fileLink}, fmt("waiting for lock on '%1%'...", fileLink));
|
||||
|
||||
std::optional<StorePath> storePath;
|
||||
|
||||
string expectedETag;
|
||||
|
||||
bool skip = false;
|
||||
|
||||
CachedDownloadResult result;
|
||||
|
||||
if (pathExists(fileLink) && pathExists(dataFile)) {
|
||||
storePath = store->parseStorePath(readLink(fileLink));
|
||||
// FIXME
|
||||
store->addTempRoot(*storePath);
|
||||
if (store->isValidPath(*storePath)) {
|
||||
auto ss = tokenizeString<vector<string>>(readFile(dataFile), "\n");
|
||||
if (ss.size() >= 3 && ss[0] == url) {
|
||||
time_t lastChecked;
|
||||
if (string2Int(ss[2], lastChecked) && (uint64_t) lastChecked + request.ttl >= (uint64_t) time(0)) {
|
||||
skip = true;
|
||||
result.effectiveUri = request.uri;
|
||||
result.etag = ss[1];
|
||||
} else if (!ss[1].empty()) {
|
||||
debug(format("verifying previous ETag '%1%'") % ss[1]);
|
||||
expectedETag = ss[1];
|
||||
}
|
||||
}
|
||||
} else
|
||||
storePath.reset();
|
||||
}
|
||||
|
||||
if (!skip) {
|
||||
|
||||
try {
|
||||
DownloadRequest request2(url);
|
||||
request2.expectedETag = expectedETag;
|
||||
auto res = download(request2);
|
||||
result.effectiveUri = res.effectiveUri;
|
||||
result.etag = res.etag;
|
||||
|
||||
if (!res.cached) {
|
||||
StringSink sink;
|
||||
dumpString(*res.data, sink);
|
||||
Hash hash = hashString(request.expectedHash ? request.expectedHash.type : htSHA256, *res.data);
|
||||
ValidPathInfo info(store->makeFixedOutputPath(false, hash, name));
|
||||
info.narHash = hashString(htSHA256, *sink.s);
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = makeFixedOutputCA(false, hash);
|
||||
store->addToStore(info, sink.s, NoRepair, NoCheckSigs);
|
||||
storePath = info.path.clone();
|
||||
}
|
||||
|
||||
assert(storePath);
|
||||
replaceSymlink(store->printStorePath(*storePath), fileLink);
|
||||
|
||||
writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
|
||||
} catch (DownloadError & e) {
|
||||
if (!storePath) throw;
|
||||
warn("warning: %s; using cached result", e.msg());
|
||||
result.etag = expectedETag;
|
||||
}
|
||||
}
|
||||
|
||||
if (request.unpack) {
|
||||
Path unpackedLink = cacheDir + "/" + ((std::string) storePath->to_string()) + "-unpacked";
|
||||
PathLocks lock2({unpackedLink}, fmt("waiting for lock on '%1%'...", unpackedLink));
|
||||
std::optional<StorePath> unpackedStorePath;
|
||||
if (pathExists(unpackedLink)) {
|
||||
unpackedStorePath = store->parseStorePath(readLink(unpackedLink));
|
||||
// FIXME
|
||||
store->addTempRoot(*unpackedStorePath);
|
||||
if (!store->isValidPath(*unpackedStorePath))
|
||||
unpackedStorePath.reset();
|
||||
}
|
||||
if (!unpackedStorePath) {
|
||||
printInfo("unpacking '%s'...", url);
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete autoDelete(tmpDir, true);
|
||||
unpackTarfile(store->toRealPath(store->printStorePath(*storePath)), tmpDir);
|
||||
auto members = readDirectory(tmpDir);
|
||||
if (members.size() != 1)
|
||||
throw nix::Error("tarball '%s' contains an unexpected number of top-level files", url);
|
||||
auto topDir = tmpDir + "/" + members.begin()->name;
|
||||
unpackedStorePath = store->addToStore(name, topDir, true, htSHA256, defaultPathFilter, NoRepair);
|
||||
}
|
||||
replaceSymlink(store->printStorePath(*unpackedStorePath), unpackedLink);
|
||||
storePath = std::move(*unpackedStorePath);
|
||||
}
|
||||
|
||||
if (expectedStorePath && *storePath != *expectedStorePath) {
|
||||
unsigned int statusCode = 102;
|
||||
Hash gotHash = request.unpack
|
||||
? hashPath(request.expectedHash.type, store->toRealPath(store->printStorePath(*storePath))).first
|
||||
: hashFile(request.expectedHash.type, store->toRealPath(store->printStorePath(*storePath)));
|
||||
throw nix::Error(statusCode, "hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s",
|
||||
url, request.expectedHash.to_string(), gotHash.to_string());
|
||||
}
|
||||
|
||||
result.storePath = store->printStorePath(*storePath);
|
||||
result.path = store->toRealPath(result.storePath);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool isUri(const string & s)
|
||||
{
|
||||
if (s.compare(0, 8, "channel:") == 0) return true;
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
namespace nix {
|
||||
|
||||
struct DownloadSettings : Config
|
||||
struct FileTransferSettings : Config
|
||||
{
|
||||
Setting<bool> enableHttp2{this, true, "http2",
|
||||
"Whether to enable HTTP/2 support."};
|
||||
|
@ -31,15 +31,15 @@ struct DownloadSettings : Config
|
|||
"How often Nix will attempt to download a file before giving up."};
|
||||
};
|
||||
|
||||
extern DownloadSettings downloadSettings;
|
||||
extern FileTransferSettings fileTransferSettings;
|
||||
|
||||
struct DownloadRequest
|
||||
struct FileTransferRequest
|
||||
{
|
||||
std::string uri;
|
||||
std::string expectedETag;
|
||||
bool verifyTLS = true;
|
||||
bool head = false;
|
||||
size_t tries = downloadSettings.tries;
|
||||
size_t tries = fileTransferSettings.tries;
|
||||
unsigned int baseRetryTimeMs = 250;
|
||||
ActivityId parentAct;
|
||||
bool decompress = true;
|
||||
|
@ -47,7 +47,7 @@ struct DownloadRequest
|
|||
std::string mimeType;
|
||||
std::function<void(char *, size_t)> dataCallback;
|
||||
|
||||
DownloadRequest(const std::string & uri)
|
||||
FileTransferRequest(const std::string & uri)
|
||||
: uri(uri), parentAct(getCurActivity()) { }
|
||||
|
||||
std::string verb()
|
||||
|
@ -56,7 +56,7 @@ struct DownloadRequest
|
|||
}
|
||||
};
|
||||
|
||||
struct DownloadResult
|
||||
struct FileTransferResult
|
||||
{
|
||||
bool cached = false;
|
||||
std::string etag;
|
||||
|
@ -65,74 +65,52 @@ struct DownloadResult
|
|||
uint64_t bodySize = 0;
|
||||
};
|
||||
|
||||
struct CachedDownloadRequest
|
||||
{
|
||||
std::string uri;
|
||||
bool unpack = false;
|
||||
std::string name;
|
||||
Hash expectedHash;
|
||||
unsigned int ttl;
|
||||
|
||||
CachedDownloadRequest(const std::string & uri);
|
||||
CachedDownloadRequest() = delete;
|
||||
};
|
||||
|
||||
struct CachedDownloadResult
|
||||
{
|
||||
// Note: 'storePath' may be different from 'path' when using a
|
||||
// chroot store.
|
||||
Path storePath;
|
||||
Path path;
|
||||
std::optional<std::string> etag;
|
||||
std::string effectiveUri;
|
||||
};
|
||||
|
||||
class Store;
|
||||
|
||||
struct Downloader
|
||||
struct FileTransfer
|
||||
{
|
||||
virtual ~Downloader() { }
|
||||
virtual ~FileTransfer() { }
|
||||
|
||||
/* Enqueue a download request, returning a future to the result of
|
||||
the download. The future may throw a DownloadError
|
||||
/* Enqueue a data transfer request, returning a future to the result of
|
||||
the download. The future may throw a FileTransferError
|
||||
exception. */
|
||||
virtual void enqueueDownload(const DownloadRequest & request,
|
||||
Callback<DownloadResult> callback) = 0;
|
||||
virtual void enqueueFileTransfer(const FileTransferRequest & request,
|
||||
Callback<FileTransferResult> callback) = 0;
|
||||
|
||||
std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
|
||||
std::future<FileTransferResult> enqueueFileTransfer(const FileTransferRequest & request);
|
||||
|
||||
/* Synchronously download a file. */
|
||||
DownloadResult download(const DownloadRequest & request);
|
||||
FileTransferResult download(const FileTransferRequest & request);
|
||||
|
||||
/* Synchronously upload a file. */
|
||||
FileTransferResult upload(const FileTransferRequest & request);
|
||||
|
||||
/* Download a file, writing its data to a sink. The sink will be
|
||||
invoked on the thread of the caller. */
|
||||
void download(DownloadRequest && request, Sink & sink);
|
||||
|
||||
/* Check if the specified file is already in ~/.cache/nix/tarballs
|
||||
and is more recent than ‘tarball-ttl’ seconds. Otherwise,
|
||||
use the recorded ETag to verify if the server has a more
|
||||
recent version, and if so, download it to the Nix store. */
|
||||
CachedDownloadResult downloadCached(ref<Store> store, const CachedDownloadRequest & request);
|
||||
void download(FileTransferRequest && request, Sink & sink);
|
||||
|
||||
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
|
||||
};
|
||||
|
||||
/* Return a shared Downloader object. Using this object is preferred
|
||||
/* Return a shared FileTransfer object. Using this object is preferred
|
||||
because it enables connection reuse and HTTP/2 multiplexing. */
|
||||
ref<Downloader> getDownloader();
|
||||
ref<FileTransfer> getFileTransfer();
|
||||
|
||||
/* Return a new Downloader object. */
|
||||
ref<Downloader> makeDownloader();
|
||||
/* Return a new FileTransfer object. */
|
||||
ref<FileTransfer> makeFileTransfer();
|
||||
|
||||
class DownloadError : public Error
|
||||
class FileTransferError : public Error
|
||||
{
|
||||
public:
|
||||
Downloader::Error error;
|
||||
DownloadError(Downloader::Error error, const FormatOrString & fs)
|
||||
FileTransfer::Error error;
|
||||
FileTransferError(FileTransfer::Error error, const FormatOrString & fs)
|
||||
: Error(fs), error(error)
|
||||
{ }
|
||||
};
|
||||
|
||||
bool isUri(const string & s);
|
||||
|
||||
/* Resolve deprecated 'channel:<foo>' URLs. */
|
||||
std::string resolveUri(const std::string & uri);
|
||||
|
||||
}
|
|
@ -202,6 +202,11 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
|
|||
/* Read the `temproots' directory for per-process temporary root
|
||||
files. */
|
||||
for (auto & i : readDirectory(tempRootsDir)) {
|
||||
if (i.name[0] == '.') {
|
||||
// Ignore hidden files. Some package managers (notably portage) create
|
||||
// those to keep the directory alive.
|
||||
continue;
|
||||
}
|
||||
Path path = tempRootsDir + "/" + i.name;
|
||||
|
||||
pid_t pid = std::stoi(i.name);
|
||||
|
@ -414,7 +419,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
|
||||
try {
|
||||
auto mapFile = fmt("/proc/%s/maps", ent->d_name);
|
||||
auto mapLines = tokenizeString<std::vector<string>>(readFile(mapFile, true), "\n");
|
||||
auto mapLines = tokenizeString<std::vector<string>>(readFile(mapFile), "\n");
|
||||
for (const auto & line : mapLines) {
|
||||
auto match = std::smatch{};
|
||||
if (std::regex_match(line, match, mapRegex))
|
||||
|
@ -422,7 +427,7 @@ void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
|||
}
|
||||
|
||||
auto envFile = fmt("/proc/%s/environ", ent->d_name);
|
||||
auto envString = readFile(envFile, true);
|
||||
auto envString = readFile(envFile);
|
||||
auto env_end = std::sregex_iterator{};
|
||||
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
|
@ -884,7 +889,7 @@ void LocalStore::autoGC(bool sync)
|
|||
if (statvfs(realStoreDir.c_str(), &st))
|
||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||
|
||||
return (uint64_t) st.f_bavail * st.f_bsize;
|
||||
return (uint64_t) st.f_bavail * st.f_frsize;
|
||||
};
|
||||
|
||||
std::shared_future<void> future;
|
||||
|
|
|
@ -20,13 +20,6 @@ namespace nix {
|
|||
must be deleted and recreated on startup.) */
|
||||
#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
|
||||
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
#if __APPLE__
|
||||
#define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
|
||||
#else
|
||||
#define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
|
||||
#endif
|
||||
|
||||
Settings settings;
|
||||
|
||||
static GlobalConfig::Register r1(&settings);
|
||||
|
@ -38,6 +31,7 @@ Settings::Settings()
|
|||
, nixLogDir(canonPath(getEnv("NIX_LOG_DIR").value_or(NIX_LOG_DIR)))
|
||||
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR").value_or(NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR").value_or(NIX_CONF_DIR)))
|
||||
, nixUserConfFiles(getUserConfigFiles())
|
||||
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR").value_or(NIX_LIBEXEC_DIR)))
|
||||
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR").value_or(NIX_BIN_DIR)))
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
|
@ -68,7 +62,12 @@ Settings::Settings()
|
|||
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
|
||||
#endif
|
||||
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
|
||||
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
#if __APPLE__
|
||||
sandboxPaths = tokenizeString<StringSet>("/System/Library/Frameworks /System/Library/PrivateFrameworks /bin/sh /bin/bash /private/tmp /private/var/tmp /usr/lib");
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>("/System/Library /usr/lib /dev /bin/sh");
|
||||
#endif
|
||||
}
|
||||
|
||||
void loadConfFile()
|
||||
|
@ -79,13 +78,29 @@ void loadConfFile()
|
|||
~/.nix/nix.conf or the command line. */
|
||||
globalConfig.resetOverriden();
|
||||
|
||||
auto dirs = getConfigDirs();
|
||||
// Iterate over them in reverse so that the ones appearing first in the path take priority
|
||||
for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) {
|
||||
globalConfig.applyConfigFile(*dir + "/nix/nix.conf");
|
||||
auto files = settings.nixUserConfFiles;
|
||||
for (auto file = files.rbegin(); file != files.rend(); file++) {
|
||||
globalConfig.applyConfigFile(*file);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Path> getUserConfigFiles()
|
||||
{
|
||||
// Use the paths specified in NIX_USER_CONF_FILES if it has been defined
|
||||
auto nixConfFiles = getEnv("NIX_USER_CONF_FILES");
|
||||
if (nixConfFiles.has_value()) {
|
||||
return tokenizeString<std::vector<string>>(nixConfFiles.value(), ":");
|
||||
}
|
||||
|
||||
// Use the paths specified by the XDG spec
|
||||
std::vector<Path> files;
|
||||
auto dirs = getConfigDirs();
|
||||
for (auto & dir : dirs) {
|
||||
files.insert(files.end(), dir + "/nix/nix.conf");
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
unsigned int Settings::getDefaultCores()
|
||||
{
|
||||
return std::max(1U, std::thread::hardware_concurrency());
|
||||
|
@ -115,7 +130,7 @@ bool Settings::isExperimentalFeatureEnabled(const std::string & name)
|
|||
void Settings::requireExperimentalFeature(const std::string & name)
|
||||
{
|
||||
if (!isExperimentalFeatureEnabled(name))
|
||||
throw Error("experimental Nix feature '%s' is disabled", name);
|
||||
throw Error("experimental Nix feature '%1%' is disabled; use '--experimental-features %1%' to override", name);
|
||||
}
|
||||
|
||||
bool Settings::isWSL1()
|
||||
|
@ -152,21 +167,24 @@ template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
|
|||
|
||||
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
{
|
||||
args.mkFlag()
|
||||
.longName(name)
|
||||
.description("Enable sandboxing.")
|
||||
.handler([=](std::vector<std::string> ss) { override(smEnabled); })
|
||||
.category(category);
|
||||
args.mkFlag()
|
||||
.longName("no-" + name)
|
||||
.description("Disable sandboxing.")
|
||||
.handler([=](std::vector<std::string> ss) { override(smDisabled); })
|
||||
.category(category);
|
||||
args.mkFlag()
|
||||
.longName("relaxed-" + name)
|
||||
.description("Enable sandboxing, but allow builds to disable it.")
|
||||
.handler([=](std::vector<std::string> ss) { override(smRelaxed); })
|
||||
.category(category);
|
||||
args.addFlag({
|
||||
.longName = name,
|
||||
.description = "Enable sandboxing.",
|
||||
.category = category,
|
||||
.handler = {[=]() { override(smEnabled); }}
|
||||
});
|
||||
args.addFlag({
|
||||
.longName = "no-" + name,
|
||||
.description = "Disable sandboxing.",
|
||||
.category = category,
|
||||
.handler = {[=]() { override(smDisabled); }}
|
||||
});
|
||||
args.addFlag({
|
||||
.longName = "relaxed-" + name,
|
||||
.description = "Enable sandboxing, but allow builds to disable it.",
|
||||
.category = category,
|
||||
.handler = {[=]() { override(smRelaxed); }}
|
||||
});
|
||||
}
|
||||
|
||||
void MaxBuildJobsSetting::set(const std::string & str)
|
||||
|
|
|
@ -53,9 +53,12 @@ public:
|
|||
/* The directory where state is stored. */
|
||||
Path nixStateDir;
|
||||
|
||||
/* The directory where configuration files are stored. */
|
||||
/* The directory where system configuration files are stored. */
|
||||
Path nixConfDir;
|
||||
|
||||
/* A list of user configuration files to load. */
|
||||
std::vector<Path> nixUserConfFiles;
|
||||
|
||||
/* The directory where internal helper programs are stored. */
|
||||
Path nixLibexecDir;
|
||||
|
||||
|
@ -311,12 +314,7 @@ public:
|
|||
Setting<bool> printMissing{this, true, "print-missing",
|
||||
"Whether to print what paths need to be built or downloaded."};
|
||||
|
||||
Setting<std::string> preBuildHook{this,
|
||||
#if __APPLE__
|
||||
nixLibexecDir + "/nix/resolve-system-dependencies",
|
||||
#else
|
||||
"",
|
||||
#endif
|
||||
Setting<std::string> preBuildHook{this, "",
|
||||
"pre-build-hook",
|
||||
"A program to run just before a build to set derivation-specific build settings."};
|
||||
|
||||
|
@ -356,12 +354,21 @@ public:
|
|||
Setting<Paths> pluginFiles{this, {}, "plugin-files",
|
||||
"Plugins to dynamically load at nix initialization time."};
|
||||
|
||||
Setting<std::string> githubAccessToken{this, "", "github-access-token",
|
||||
"GitHub access token to get access to GitHub data through the GitHub API for github:<..> flakes."};
|
||||
|
||||
Setting<Strings> experimentalFeatures{this, {}, "experimental-features",
|
||||
"Experimental Nix features to enable."};
|
||||
|
||||
bool isExperimentalFeatureEnabled(const std::string & name);
|
||||
|
||||
void requireExperimentalFeature(const std::string & name);
|
||||
|
||||
Setting<bool> allowDirty{this, true, "allow-dirty",
|
||||
"Whether to allow dirty Git/Mercurial trees."};
|
||||
|
||||
Setting<bool> warnDirty{this, true, "warn-dirty",
|
||||
"Whether to warn about dirty Git/Mercurial trees."};
|
||||
};
|
||||
|
||||
|
||||
|
@ -374,6 +381,9 @@ void initPlugins();
|
|||
|
||||
void loadConfFile();
|
||||
|
||||
// Used by the Settings constructor
|
||||
std::vector<Path> getUserConfigFiles();
|
||||
|
||||
extern const string nixVersion;
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#include "binary-cache-store.hh"
|
||||
#include "download.hh"
|
||||
#include "filetransfer.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
|
||||
|
@ -85,14 +85,14 @@ protected:
|
|||
checkEnabled();
|
||||
|
||||
try {
|
||||
DownloadRequest request(cacheUri + "/" + path);
|
||||
FileTransferRequest request(cacheUri + "/" + path);
|
||||
request.head = true;
|
||||
getDownloader()->download(request);
|
||||
getFileTransfer()->download(request);
|
||||
return true;
|
||||
} catch (DownloadError & e) {
|
||||
} catch (FileTransferError & e) {
|
||||
/* S3 buckets return 403 if a file doesn't exist and the
|
||||
bucket is unlistable, so treat 403 as 404. */
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return false;
|
||||
maybeDisable();
|
||||
throw;
|
||||
|
@ -103,19 +103,19 @@ protected:
|
|||
const std::string & data,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto req = DownloadRequest(cacheUri + "/" + path);
|
||||
auto req = FileTransferRequest(cacheUri + "/" + path);
|
||||
req.data = std::make_shared<string>(data); // FIXME: inefficient
|
||||
req.mimeType = mimeType;
|
||||
try {
|
||||
getDownloader()->download(req);
|
||||
} catch (DownloadError & e) {
|
||||
getFileTransfer()->upload(req);
|
||||
} catch (FileTransferError & e) {
|
||||
throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
DownloadRequest makeRequest(const std::string & path)
|
||||
FileTransferRequest makeRequest(const std::string & path)
|
||||
{
|
||||
DownloadRequest request(cacheUri + "/" + path);
|
||||
FileTransferRequest request(cacheUri + "/" + path);
|
||||
return request;
|
||||
}
|
||||
|
||||
|
@ -124,9 +124,9 @@ protected:
|
|||
checkEnabled();
|
||||
auto request(makeRequest(path));
|
||||
try {
|
||||
getDownloader()->download(std::move(request), sink);
|
||||
} catch (DownloadError & e) {
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
getFileTransfer()->download(std::move(request), sink);
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
|
||||
maybeDisable();
|
||||
throw;
|
||||
|
@ -142,12 +142,12 @@ protected:
|
|||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getDownloader()->enqueueDownload(request,
|
||||
{[callbackPtr, this](std::future<DownloadResult> result) {
|
||||
getFileTransfer()->enqueueFileTransfer(request,
|
||||
{[callbackPtr, this](std::future<FileTransferResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(result.get().data);
|
||||
} catch (DownloadError & e) {
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
} catch (FileTransferError & e) {
|
||||
if (e.error == FileTransfer::NotFound || e.error == FileTransfer::Forbidden)
|
||||
return (*callbackPtr)(std::shared_ptr<std::string>());
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
|
@ -163,14 +163,14 @@ static RegisterStoreImplementation regStore([](
|
|||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1";
|
||||
if (std::string(uri, 0, 7) != "http://" &&
|
||||
std::string(uri, 0, 8) != "https://" &&
|
||||
(getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || std::string(uri, 0, 7) != "file://")
|
||||
) return 0;
|
||||
(!forceHttp || std::string(uri, 0, 7) != "file://"))
|
||||
return 0;
|
||||
auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
|
||||
store->init();
|
||||
return store;
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -195,7 +195,7 @@ struct LegacySSHStore : public Store
|
|||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
|
|
|
@ -298,9 +298,7 @@ void LocalStore::openDB(State & state, bool create)
|
|||
/* Open the Nix database. */
|
||||
string dbPath = dbDir + "/db.sqlite";
|
||||
auto & db(state.db);
|
||||
if (sqlite3_open_v2(dbPath.c_str(), &db.db,
|
||||
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0), 0) != SQLITE_OK)
|
||||
throw Error(format("cannot open Nix database '%1%'") % dbPath);
|
||||
state.db = SQLite(dbPath, create);
|
||||
|
||||
#ifdef __CYGWIN__
|
||||
/* The cygwin version of sqlite3 has a patch which calls
|
||||
|
@ -312,11 +310,6 @@ void LocalStore::openDB(State & state, bool create)
|
|||
SetDllDirectoryW(L"");
|
||||
#endif
|
||||
|
||||
if (sqlite3_busy_timeout(db, 60 * 60 * 1000) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting timeout");
|
||||
|
||||
db.exec("pragma foreign_keys = 1");
|
||||
|
||||
/* !!! check whether sqlite has been built with foreign key
|
||||
support */
|
||||
|
||||
|
@ -350,7 +343,7 @@ void LocalStore::openDB(State & state, bool create)
|
|||
|
||||
/* Initialise the database schema, if necessary. */
|
||||
if (create) {
|
||||
const char * schema =
|
||||
static const char schema[] =
|
||||
#include "schema.sql.gen.hh"
|
||||
;
|
||||
db.exec(schema);
|
||||
|
@ -577,7 +570,7 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
|
|||
if (i.second.hash == "") {
|
||||
throw Error("Fixed output derivation needs hash");
|
||||
}
|
||||
bool recursive; Hash h;
|
||||
FileIngestionMethod recursive; Hash h;
|
||||
i.second.parseHashInfo(recursive, h);
|
||||
StorePath path = makeFixedOutputPath(recursive, h, drvName);
|
||||
check(path, i.second.path, i.first);
|
||||
|
@ -1058,11 +1051,11 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
|
|||
|
||||
|
||||
StorePath LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive, HashType hashAlgo, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, RepairFlag repair)
|
||||
{
|
||||
Hash h = hashString(hashAlgo, dump);
|
||||
|
||||
auto dstPath = makeFixedOutputPath(recursive, h, name);
|
||||
auto dstPath = makeFixedOutputPath(method, h, name);
|
||||
|
||||
addTempRoot(dstPath);
|
||||
|
||||
|
@ -1082,7 +1075,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
|
||||
autoGC();
|
||||
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
StringSource source(dump);
|
||||
restorePath(realPath, source);
|
||||
} else
|
||||
|
@ -1095,7 +1088,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
above (if called with recursive == true and hashAlgo ==
|
||||
sha256); otherwise, compute it here. */
|
||||
HashResult hash;
|
||||
if (recursive) {
|
||||
if (method == FileIngestionMethod::Recursive) {
|
||||
hash.first = hashAlgo == htSHA256 ? h : hashString(htSHA256, dump);
|
||||
hash.second = dump.size();
|
||||
} else
|
||||
|
@ -1106,7 +1099,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
ValidPathInfo info(dstPath.clone());
|
||||
info.narHash = hash.first;
|
||||
info.narSize = hash.second;
|
||||
info.ca = makeFixedOutputCA(recursive, h);
|
||||
info.ca = makeFixedOutputCA(method, h);
|
||||
registerValidPath(info);
|
||||
}
|
||||
|
||||
|
@ -1118,7 +1111,7 @@ StorePath LocalStore::addToStoreFromDump(const string & dump, const string & nam
|
|||
|
||||
|
||||
StorePath LocalStore::addToStore(const string & name, const Path & _srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
FileIngestionMethod method, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
Path srcPath(absPath(_srcPath));
|
||||
|
||||
|
@ -1126,12 +1119,12 @@ StorePath LocalStore::addToStore(const string & name, const Path & _srcPath,
|
|||
method for very large paths, but `copyPath' is mainly used for
|
||||
small files. */
|
||||
StringSink sink;
|
||||
if (recursive)
|
||||
if (method == FileIngestionMethod::Recursive)
|
||||
dumpPath(srcPath, sink, filter);
|
||||
else
|
||||
sink.s = make_ref<std::string>(readFile(srcPath));
|
||||
|
||||
return addToStoreFromDump(*sink.s, name, recursive, hashAlgo, repair);
|
||||
return addToStoreFromDump(*sink.s, name, method, hashAlgo, repair);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1283,7 +1276,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
|||
else
|
||||
hashSink = std::make_unique<HashModuloSink>(info->narHash.type, storePathToHash(printStorePath(info->path)));
|
||||
|
||||
dumpPath(toRealPath(printStorePath(i)), *hashSink);
|
||||
dumpPath(Store::toRealPath(i), *hashSink);
|
||||
auto current = hashSink->finish();
|
||||
|
||||
if (info->narHash != nullHash && info->narHash != current.first) {
|
||||
|
|
|
@ -149,7 +149,7 @@ public:
|
|||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
StorePath addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
FileIngestionMethod method, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override;
|
||||
|
||||
/* Like addToStore(), but the contents of the path are contained
|
||||
|
@ -157,7 +157,7 @@ public:
|
|||
true) or simply the contents of a regular file (if recursive ==
|
||||
false). */
|
||||
StorePath addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
|
||||
FileIngestionMethod method = FileIngestionMethod::Recursive, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair) override;
|
||||
|
||||
StorePath addTextToStore(const string & name, const string & s,
|
||||
const StorePathSet & references, RepairFlag repair) override;
|
||||
|
|
|
@ -31,7 +31,8 @@ ifeq ($(HAVE_SECCOMP), 1)
|
|||
libstore_LDFLAGS += -lseccomp
|
||||
endif
|
||||
|
||||
libstore_CXXFLAGS = \
|
||||
libstore_CXXFLAGS += \
|
||||
-I src/libutil -I src/libstore \
|
||||
-DNIX_PREFIX=\"$(prefix)\" \
|
||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||
-DNIX_DATA_DIR=\"$(datadir)\" \
|
||||
|
|
|
@ -78,12 +78,7 @@ public:
|
|||
|
||||
state->db = SQLite(dbPath);
|
||||
|
||||
if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
|
||||
throwSQLiteError(state->db, "setting timeout");
|
||||
|
||||
// We can always reproduce the cache.
|
||||
state->db.exec("pragma synchronous = off");
|
||||
state->db.exec("pragma main.journal_mode = truncate");
|
||||
state->db.isCache();
|
||||
|
||||
state->db.exec(schema);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ class NarInfoDiskCache
|
|||
public:
|
||||
typedef enum { oValid, oInvalid, oUnknown } Outcome;
|
||||
|
||||
virtual ~NarInfoDiskCache() { };
|
||||
virtual ~NarInfoDiskCache() { }
|
||||
|
||||
virtual void createCache(const std::string & uri, const Path & storeDir,
|
||||
bool wantMassQuery, int priority) = 0;
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#include "parsed-derivations.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
ParsedDerivation::ParsedDerivation(StorePath && drvPath, BasicDerivation & drv)
|
||||
|
@ -9,13 +11,15 @@ ParsedDerivation::ParsedDerivation(StorePath && drvPath, BasicDerivation & drv)
|
|||
auto jsonAttr = drv.env.find("__json");
|
||||
if (jsonAttr != drv.env.end()) {
|
||||
try {
|
||||
structuredAttrs = nlohmann::json::parse(jsonAttr->second);
|
||||
structuredAttrs = std::make_unique<nlohmann::json>(nlohmann::json::parse(jsonAttr->second));
|
||||
} catch (std::exception & e) {
|
||||
throw Error("cannot process __json attribute of '%s': %s", drvPath.to_string(), e.what());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ParsedDerivation::~ParsedDerivation() { }
|
||||
|
||||
std::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue