diff --git a/aterm-gc.supp b/aterm-gc.supp
index dcd6371fe..21b9a2372 100644
--- a/aterm-gc.supp
+++ b/aterm-gc.supp
@@ -115,3 +115,35 @@
fun:*
fun:AT_collect
}
+
+{
+ ATerm library conservatively scans for GC roots
+ Memcheck:Value4
+ fun:*
+ fun:*
+ fun:mark_phase
+}
+
+{
+ ATerm library conservatively scans for GC roots
+ Memcheck:Cond
+ fun:*
+ fun:*
+ fun:mark_phase
+}
+
+{
+ ATerm library conservatively scans for GC roots
+ Memcheck:Value4
+ fun:*
+ fun:*
+ fun:mark_phase_young
+}
+
+{
+ ATerm library conservatively scans for GC roots
+ Memcheck:Cond
+ fun:*
+ fun:*
+ fun:mark_phase_young
+}
diff --git a/bootstrap.sh b/bootstrap.sh
index f007c713b..2547f5dc8 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -1,4 +1,5 @@
#! /bin/sh -e
+rm -f aclocal.m4
mkdir -p config
libtoolize --copy
aclocal
diff --git a/configure.ac b/configure.ac
index f108c53be..e34e4ba3a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -50,39 +50,24 @@ AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
test "$localstatedir" = '${prefix}/var' && localstatedir=/nix/var
-# Whether to produce a statically linked binary. On Cygwin, this is
-# the default: dynamically linking against the ATerm DLL does work,
-# except that it requires the ATerm "lib" directory to be in $PATH, as
-# Windows doesn't have anything like an RPATH embedded in executable.
-# Since this is kind of annoying, we use static libraries for now.
-
-AC_ARG_ENABLE(static-nix, AC_HELP_STRING([--enable-static-nix],
- [produce statically linked binaries]),
- static_nix=$enableval, static_nix=no)
-
-if test "$sys_name" = cygwin; then
- static_nix=yes
-fi
-
-if test "$static_nix" = yes; then
+# Windows-specific stuff. On Cygwin, dynamically linking against the
+# ATerm DLL works, except that it requires the ATerm "lib" directory
+# to be in $PATH, as Windows doesn't have anything like an RPATH
+# embedded in executable. Since this is kind of annoying, we use
+# static libraries for now.
+if test "$sys_name" = "cygwin"; then
AC_DISABLE_SHARED
AC_ENABLE_STATIC
fi
-# Windows-specific stuff.
-if test "$sys_name" = "cygwin"; then
- # We cannot delete open files.
- AC_DEFINE(CANNOT_DELETE_OPEN_FILES, 1, [Whether it is impossible to delete open files.])
-fi
-
# Solaris-specific stuff.
if test "$sys_name" = "sunos"; then
# Solaris requires -lsocket -lnsl for network functions
- ADDITIONAL_NETWORK_LIBS="-lsocket -lnsl"
- AC_SUBST(ADDITIONAL_NETWORK_LIBS)
+ LIBS="-lsocket -lnsl $LIBS"
fi
+
AC_PROG_CC
AC_PROG_CXX
@@ -101,6 +86,13 @@ AC_DISABLE_STATIC
AC_ENABLE_SHARED
AC_PROG_LIBTOOL
+if test "$enable_shared" = yes; then
+ SUB_CONFIGURE_FLAGS="--enable-shared --disable-static"
+else
+ SUB_CONFIGURE_FLAGS="--enable-static --disable-shared"
+fi
+AC_SUBST(SUB_CONFIGURE_FLAGS)
+
# Use 64-bit file system calls so that we can support files > 2 GiB.
AC_SYS_LARGEFILE
@@ -229,6 +221,8 @@ AC_ARG_WITH(bzip2, AC_HELP_STRING([--with-bzip2=PATH],
[prefix of bzip2]),
bzip2=$withval, bzip2=)
AM_CONDITIONAL(HAVE_BZIP2, test -n "$bzip2")
+ATERM_VERSION=2.5
+AC_SUBST(ATERM_VERSION)
if test -z "$bzip2"; then
# Headers and libraries will be used from the temporary installation
# in externals/inst-bzip2.
@@ -249,6 +243,24 @@ AC_SUBST(bzip2_include)
AC_SUBST(bzip2_bin)
AC_SUBST(bzip2_bin_test)
+AC_ARG_WITH(sqlite, AC_HELP_STRING([--with-sqlite=PATH],
+ [prefix of SQLite]),
+ sqlite=$withval, sqlite=)
+AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite")
+SQLITE_VERSION=3070500
+AC_SUBST(SQLITE_VERSION)
+if test -z "$sqlite"; then
+ sqlite_lib='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)/libsqlite3.la'
+ sqlite_include='-I${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)'
+ sqlite_bin='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)'
+else
+ sqlite_lib="-L$sqlite/lib -lsqlite3"
+ sqlite_include="-I$sqlite/include"
+ sqlite_bin="$sqlite/bin"
+fi
+AC_SUBST(sqlite_lib)
+AC_SUBST(sqlite_include)
+AC_SUBST(sqlite_bin)
# Whether to use the Boehm garbage collector.
AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
@@ -274,8 +286,7 @@ AC_CHECK_FUNCS([setresuid setreuid lchown])
# Nice to have, but not essential.
-AC_CHECK_FUNCS([strsignal])
-AC_CHECK_FUNCS([posix_fallocate])
+AC_CHECK_FUNCS([strsignal posix_fallocate nanosleep])
# This is needed if ATerm or bzip2 are static libraries,
@@ -285,14 +296,6 @@ if test "$(uname)" = "Darwin"; then
fi
-if test "$static_nix" = yes; then
- # `-all-static' has to be added at the end of configure, because
- # the C compiler doesn't know about -all-static (it's filtered out
- # by libtool, but configure doesn't use libtool).
- LDFLAGS="-all-static $LDFLAGS"
-fi
-
-
AM_CONFIG_HEADER([config.h])
AC_CONFIG_FILES([Makefile
externals/Makefile
diff --git a/corepkgs/buildenv/default.nix b/corepkgs/buildenv/default.nix
index 36dd9d0c6..d76f52740 100644
--- a/corepkgs/buildenv/default.nix
+++ b/corepkgs/buildenv/default.nix
@@ -11,4 +11,8 @@ derivation {
paths = derivations;
active = map (x: if x ? meta && x.meta ? active then x.meta.active else "true") derivations;
priority = map (x: if x ? meta && x.meta ? priority then x.meta.priority else "5") derivations;
+
+ # Building user environments remotely just causes huge amounts of
+ # network traffic, so don't do that.
+ preferLocalBuild = true;
}
diff --git a/corepkgs/nar/nar.sh.in b/corepkgs/nar/nar.sh.in
index 67933ac67..1369d3a21 100644
--- a/corepkgs/nar/nar.sh.in
+++ b/corepkgs/nar/nar.sh.in
@@ -7,8 +7,6 @@ dst=$out/tmp.nar.bz2
@bzip2@ < tmp > $dst
-@bindir@/nix-hash -vvvvv --flat --type $hashAlgo --base32 tmp > $out/nar-hash
-
@bindir@/nix-hash --flat --type $hashAlgo --base32 $dst > $out/narbz2-hash
@coreutils@/mv $out/tmp.nar.bz2 $out/$(@coreutils@/cat $out/narbz2-hash).nar.bz2
diff --git a/doc/manual/conf-file.xml b/doc/manual/conf-file.xml
index ec64e8b11..cb47b9941 100644
--- a/doc/manual/conf-file.xml
+++ b/doc/manual/conf-file.xml
@@ -260,7 +260,7 @@ build-use-chroot = /dev /proc /bin
Nix store metadata (in /nix/var/nix/db) are
synchronously flushed to disk. This improves robustness in case
of system crashes, but reduces performance. The default is
- false.
+ true.
diff --git a/doc/manual/nix-store.xml b/doc/manual/nix-store.xml
index 10bb3eda5..a32559c03 100644
--- a/doc/manual/nix-store.xml
+++ b/doc/manual/nix-store.xml
@@ -404,6 +404,7 @@ error: cannot delete path `/nix/store/zq0h41l75vlb4z45kzgjjmsjxvcv1qk7-mesa-6.4'
name
+
@@ -587,9 +588,21 @@ query is applied to the target of the symlink.
Prints the SHA-256 hash of the contents of the
- store paths paths. Since the hash is
- stored in the Nix database, this is a fast
- operation.
+ store paths paths (that is, the hash of
+ the output of nix-store --dump on the given
+ paths). Since the hash is stored in the Nix database, this is a
+ fast operation.
+
+
+
+
+
+ Prints the size in bytes of the contents of the
+ store paths paths — to be precise, the
+ size of the output of nix-store --dump on the
+ given paths. Note that the actual disk space required by the
+ store paths may be higher, especially on filesystems with large
+ cluster sizes.
diff --git a/doc/manual/quick-start.xml b/doc/manual/quick-start.xml
index 6d96cb5ac..d2431151f 100644
--- a/doc/manual/quick-start.xml
+++ b/doc/manual/quick-start.xml
@@ -60,7 +60,7 @@ available remotely.
in the channel:
-$ nix-env -qa ’*’ (mind the quotes!)
+$ nix-env -qa \*
docbook-xml-4.2
firefox-1.0pre-PR-0.10.1
hello-2.1.1
diff --git a/externals/Makefile.am b/externals/Makefile.am
index 63150b1f5..884d87bf1 100644
--- a/externals/Makefile.am
+++ b/externals/Makefile.am
@@ -12,30 +12,56 @@ $(BZIP2).tar.gz:
$(BZIP2): $(BZIP2).tar.gz
gunzip < $(srcdir)/$(BZIP2).tar.gz | tar xvf -
-have-bzip2:
- $(MAKE) $(BZIP2)
- touch have-bzip2
-
if HAVE_BZIP2
build-bzip2:
else
-build-bzip2: have-bzip2
- (pfx=`pwd` && \
- cd $(BZIP2) && \
- $(MAKE) && \
- $(MAKE) install PREFIX=$$pfx/inst-bzip2)
+build-bzip2: $(BZIP2)
+ (cd $(BZIP2) && \
+ $(MAKE) CC="$(CC)" && \
+ $(MAKE) install PREFIX=$(abs_builddir)/inst-bzip2)
touch build-bzip2
-install:
+install-exec-local:: build-bzip2
mkdir -p $(DESTDIR)${bzip2_bin}
$(INSTALL_PROGRAM) $(bzip2_bin_test)/bzip2 $(bzip2_bin_test)/bunzip2 $(DESTDIR)${bzip2_bin}
endif
-all: build-bzip2
+# SQLite
-EXTRA_DIST = $(BZIP2).tar.gz
+SQLITE = sqlite-autoconf-$(SQLITE_VERSION)
+SQLITE_TAR = sqlite-autoconf-$(SQLITE_VERSION).tar.gz
-ext-clean:
- $(RM) -f have-bzip2 build-bzip2
- $(RM) -rf $(BZIP2)
+$(SQLITE_TAR):
+ @echo "Nix requires the SQLite library to build."
+ @echo "Please download version $(SQLITE_VERSION) from"
+ @echo " http://www.sqlite.org/$(SQLITE_TAR)"
+ @echo "and place it in the externals/ directory."
+ false
+
+$(SQLITE): $(SQLITE_TAR)
+ gzip -d < $(srcdir)/$(SQLITE_TAR) | tar xvf -
+
+if HAVE_SQLITE
+build-sqlite:
+else
+build-sqlite: $(SQLITE)
+ (cd $(SQLITE) && \
+ CC="$(CC)" CFLAGS="-DSQLITE_ENABLE_COLUMN_METADATA=1" ./configure --disable-static --prefix=$(pkglibdir)/dummy --libdir=${pkglibdir} $(SUB_CONFIGURE_FLAGS) && \
+ $(MAKE) )
+ touch build-sqlite
+
+install-exec-local:: build-sqlite
+ cd $(SQLITE) && $(MAKE) install
+ rm -rf "$(DESTDIR)/$(pkglibdir)/dummy"
+endif
+
+
+all: build-bzip2 build-sqlite
+
+EXTRA_DIST = $(BZIP2).tar.gz $(SQLITE_TAR)
+
+clean:
+ $(RM) -f build-bzip2 build-sqlite
+ $(RM) -rf $(BZIP2) $(SQLITE)
+ $(RM) -rf inst-bzip2
diff --git a/release.nix b/release.nix
index c89d79a7d..1fc9405bd 100644
--- a/release.nix
+++ b/release.nix
@@ -33,6 +33,9 @@ let
stripHash ${bzip2.src}
cp -pv ${bzip2.src} externals/$strippedName
+ stripHash ${sqlite.src}
+ cp -pv ${sqlite.src} externals/$strippedName
+
# TeX needs a writable font cache.
export VARTEXFONTS=$TMPDIR/texfonts
'';
@@ -71,7 +74,7 @@ let
configureFlags = ''
--disable-init-state
- --with-bzip2=${bzip2}
+ --with-bzip2=${bzip2} --with-sqlite=${sqlite}
--enable-gc
'';
};
@@ -92,10 +95,10 @@ let
configureFlags = ''
--disable-init-state --disable-shared
- --with-bzip2=${bzip2}
+ --with-bzip2=${bzip2} --with-sqlite=${sqlite}
'';
- lcovFilter = ["*/boost/*" "*-tab.*"];
+ lcovFilter = [ "*/boost/*" "*-tab.*" ];
# We call `dot', and even though we just use it to
# syntax-check generated dot files, it still requires some
@@ -144,11 +147,11 @@ let
with import nixpkgs { inherit system; };
releaseTools.rpmBuild rec {
- name = "nix-rpm";
+ name = "nix-rpm-${diskImage.name}";
src = jobs.tarball;
diskImage = diskImageFun vmTools.diskImages;
memSize = 1024;
- meta = { schedulingPriority = prio; };
+ meta.schedulingPriority = prio;
};
@@ -165,7 +168,7 @@ let
src = jobs.tarball;
diskImage = diskImageFun vmTools.diskImages;
memSize = 1024;
- meta = { schedulingPriority = prio; };
+ meta.schedulingPriority = prio;
configureFlags = "--sysconfdir=/etc";
debRequires = [ "curl" ];
};
diff --git a/scripts/GeneratePatches.pm.in b/scripts/GeneratePatches.pm.in
new file mode 100755
index 000000000..2d2653255
--- /dev/null
+++ b/scripts/GeneratePatches.pm.in
@@ -0,0 +1,334 @@
+#! @perl@ -w -I@libexecdir@/nix
+
+use strict;
+use File::Temp qw(tempdir);
+
+
+# Some patch generations options.
+
+# Max size of NAR archives to generate patches for.
+my $maxNarSize = $ENV{"NIX_MAX_NAR_SIZE"};
+$maxNarSize = 160 * 1024 * 1024 if !defined $maxNarSize;
+
+# If patch is bigger than this fraction of full archive, reject.
+my $maxPatchFraction = $ENV{"NIX_PATCH_FRACTION"};
+$maxPatchFraction = 0.60 if !defined $maxPatchFraction;
+
+my $timeLimit = $ENV{"NIX_BSDIFF_TIME_LIMIT"};
+$timeLimit = 180 if !defined $timeLimit;
+
+my $hashAlgo = "sha256";
+
+
+sub findOutputPaths {
+ my $narFiles = shift;
+
+ my %outPaths;
+
+ foreach my $p (keys %{$narFiles}) {
+
+ # Ignore derivations.
+ next if ($p =~ /\.drv$/);
+
+ # Ignore builders (too much ambiguity -- they're all called
+ # `builder.sh').
+ next if ($p =~ /\.sh$/);
+ next if ($p =~ /\.patch$/);
+
+ # Don't bother including tar files etc.
+ next if ($p =~ /\.tar$/ || $p =~ /\.tar\.(gz|bz2|Z|lzma|xz)$/ || $p =~ /\.zip$/ || $p =~ /\.bin$/ || $p =~ /\.tgz$/ || $p =~ /\.rpm$/ || $p =~ /cvs-export$/ || $p =~ /fetchhg$/);
+
+ $outPaths{$p} = 1;
+ }
+
+ return %outPaths;
+}
+
+
+sub getNameVersion {
+ my $p = shift;
+ $p =~ /\/[0-9a-z]+((?:-[a-zA-Z][^\/-]*)+)([^\/]*)$/;
+ my $name = $1;
+ my $version = $2;
+ return undef unless defined $name && defined $version;
+ $name =~ s/^-//;
+ $version =~ s/^-//;
+ return ($name, $version);
+}
+
+
+# A quick hack to get a measure of the `distance' between two
+# versions: it's just the position of the first character that differs
+# (or 999 if they are the same).
+sub versionDiff {
+ my $s = shift;
+ my $t = shift;
+ my $i;
+ return 999 if $s eq $t;
+ for ($i = 0; $i < length $s; $i++) {
+ return $i if $i >= length $t or
+ substr($s, $i, 1) ne substr($t, $i, 1);
+ }
+ return $i;
+}
+
+
+sub getNarBz2 {
+ my $narPath = shift;
+ my $narFiles = shift;
+ my $storePath = shift;
+
+ my $narFileList = $$narFiles{$storePath};
+ die "missing path $storePath" unless defined $narFileList;
+
+ my $narFile = @{$narFileList}[0];
+ die unless defined $narFile;
+
+ $narFile->{url} =~ /\/([^\/]+)$/;
+ die unless defined $1;
+ return "$narPath/$1";
+}
+
+
+sub containsPatch {
+ my $patches = shift;
+ my $storePath = shift;
+ my $basePath = shift;
+ my $patchList = $$patches{$storePath};
+ return 0 if !defined $patchList;
+ my $found = 0;
+ foreach my $patch (@{$patchList}) {
+ # !!! baseHash might differ
+ return 1 if $patch->{basePath} eq $basePath;
+ }
+ return 0;
+}
+
+
+sub generatePatches {
+ my ($srcNarFiles, $dstNarFiles, $srcPatches, $dstPatches, $narPath, $patchesPath, $patchesURL, $tmpDir) = @_;
+
+ my %srcOutPaths = findOutputPaths $srcNarFiles;
+ my %dstOutPaths = findOutputPaths $dstNarFiles;
+
+ # For each output path in the destination, see if we need to / can
+ # create a patch.
+
+ print STDERR "creating patches...\n";
+
+ foreach my $p (keys %dstOutPaths) {
+
+ # If exactly the same path already exists in the source, skip it.
+ next if defined $srcOutPaths{$p};
+
+ print " $p\n";
+
+ # If not, then we should find the paths in the source that are
+ # `most' likely to be present on a system that wants to
+ # install this path.
+
+ (my $name, my $version) = getNameVersion $p;
+ next unless defined $name && defined $version;
+
+ my @closest = ();
+ my $closestVersion;
+ my $minDist = -1; # actually, larger means closer
+
+ # Find all source paths with the same name.
+
+ foreach my $q (keys %srcOutPaths) {
+ (my $name2, my $version2) = getNameVersion $q;
+ next unless defined $name2 && defined $version2;
+
+ if ($name eq $name2) {
+
+ my $srcSystem = @{$$dstNarFiles{$p}}[0]->{system};
+ my $dstSystem = @{$$srcNarFiles{$q}}[0]->{system};
+ if (defined $srcSystem && defined $dstSystem && $srcSystem ne $dstSystem) {
+ print " SKIPPING $q due to different systems ($srcSystem vs. $dstSystem)\n";
+ next;
+ }
+
+ # If the sizes differ too much, then skip. This
+ # disambiguates between, e.g., a real component and a
+ # wrapper component (cf. Firefox in Nixpkgs).
+ my $srcSize = @{$$srcNarFiles{$q}}[0]->{size};
+ my $dstSize = @{$$dstNarFiles{$p}}[0]->{size};
+ my $ratio = $srcSize / $dstSize;
+ $ratio = 1 / $ratio if $ratio < 1;
+ # print " SIZE $srcSize $dstSize $ratio $q\n";
+
+ if ($ratio >= 3) {
+ print " SKIPPING $q due to size ratio $ratio ($srcSize vs. $dstSize)\n";
+ next;
+ }
+
+ # If there are multiple matching names, include the
+ # ones with the closest version numbers.
+ my $dist = versionDiff $version, $version2;
+ if ($dist > $minDist) {
+ $minDist = $dist;
+ @closest = ($q);
+ $closestVersion = $version2;
+ } elsif ($dist == $minDist) {
+ push @closest, $q;
+ }
+ }
+ }
+
+ if (scalar(@closest) == 0) {
+ print " NO BASE: $p\n";
+ next;
+ }
+
+ foreach my $closest (@closest) {
+
+ # Generate a patch between $closest and $p.
+ print STDERR " $p <- $closest\n";
+
+ # If the patch already exists, skip it.
+ if (containsPatch($srcPatches, $p, $closest) ||
+ containsPatch($dstPatches, $p, $closest))
+ {
+ print " skipping, already exists\n";
+ next;
+ }
+
+ my $srcNarBz2 = getNarBz2 $narPath, $srcNarFiles, $closest;
+ my $dstNarBz2 = getNarBz2 $narPath, $dstNarFiles, $p;
+
+ if (! -f $srcNarBz2) {
+ warn "patch source archive $srcNarBz2 is missing\n";
+ next;
+ }
+
+ system("@bunzip2@ < $srcNarBz2 > $tmpDir/A") == 0
+ or die "cannot unpack $srcNarBz2";
+
+ if ((stat "$tmpDir/A")[7] >= $maxNarSize) {
+ print " skipping, source is too large\n";
+ next;
+ }
+
+ system("@bunzip2@ < $dstNarBz2 > $tmpDir/B") == 0
+ or die "cannot unpack $dstNarBz2";
+
+ if ((stat "$tmpDir/B")[7] >= $maxNarSize) {
+ print " skipping, destination is too large\n";
+ next;
+ }
+
+ my $time1 = time();
+ my $res = system("ulimit -t $timeLimit; @libexecdir@/bsdiff $tmpDir/A $tmpDir/B $tmpDir/DIFF");
+ my $time2 = time();
+ if ($res) {
+ warn "binary diff computation aborted after ", $time2 - $time1, " seconds\n";
+ next;
+ }
+
+ my $baseHash = `@bindir@/nix-hash --flat --type $hashAlgo --base32 $tmpDir/A` or die;
+ chomp $baseHash;
+
+ my $narHash = `@bindir@/nix-hash --flat --type $hashAlgo --base32 $tmpDir/B` or die;
+ chomp $narHash;
+
+ my $narDiffHash = `@bindir@/nix-hash --flat --type $hashAlgo --base32 $tmpDir/DIFF` or die;
+ chomp $narDiffHash;
+
+ my $narDiffSize = (stat "$tmpDir/DIFF")[7];
+ my $dstNarBz2Size = (stat $dstNarBz2)[7];
+
+ print " size $narDiffSize; full size $dstNarBz2Size; ", $time2 - $time1, " seconds\n";
+
+ if ($narDiffSize >= $dstNarBz2Size) {
+ print " rejecting; patch bigger than full archive\n";
+ next;
+ }
+
+ if ($narDiffSize / $dstNarBz2Size >= $maxPatchFraction) {
+ print " rejecting; patch too large relative to full archive\n";
+ next;
+ }
+
+ my $finalName = "$narDiffHash.nar-bsdiff";
+
+ if (-e "$patchesPath/$finalName") {
+ print " not copying, already exists\n";
+ }
+
+ else {
+ system("cp '$tmpDir/DIFF' '$patchesPath/$finalName.tmp'") == 0
+ or die "cannot copy diff";
+ rename("$patchesPath/$finalName.tmp", "$patchesPath/$finalName")
+ or die "cannot rename $patchesPath/$finalName.tmp";
+ }
+
+ # Add the patch to the manifest.
+ addPatch $dstPatches, $p,
+ { url => "$patchesURL/$finalName", hash => "$hashAlgo:$narDiffHash"
+ , size => $narDiffSize, basePath => $closest, baseHash => "$hashAlgo:$baseHash"
+ , narHash => "$hashAlgo:$narHash", patchType => "nar-bsdiff"
+ };
+ }
+ }
+}
+
+
+# Propagate useful patches from $srcPatches to $dstPatches. A patch
+# is useful if it produces either paths in the $dstNarFiles or paths
+# that can be used as the base for other useful patches.
+sub propagatePatches {
+ my ($srcPatches, $dstNarFiles, $dstPatches) = @_;
+
+ print STDERR "propagating patches...\n";
+
+ my $changed;
+ do {
+ # !!! we repeat this to reach the transitive closure; inefficient
+ $changed = 0;
+
+ print STDERR "loop\n";
+
+ my %dstBasePaths;
+ foreach my $q (keys %{$dstPatches}) {
+ foreach my $patch (@{$$dstPatches{$q}}) {
+ $dstBasePaths{$patch->{basePath}} = 1;
+ }
+ }
+
+ foreach my $p (keys %{$srcPatches}) {
+ my $patchList = $$srcPatches{$p};
+
+ my $include = 0;
+
+ # Is path $p included in the destination? If so, include
+ # patches that produce it.
+ $include = 1 if defined $$dstNarFiles{$p};
+
+ # Is path $p a path that serves as a base for paths in the
+ # destination? If so, include patches that produce it.
+ # !!! check baseHash
+ $include = 1 if defined $dstBasePaths{$p};
+
+ if ($include) {
+ foreach my $patch (@{$patchList}) {
+ $changed = 1 if addPatch $dstPatches, $p, $patch;
+ }
+ }
+
+ }
+
+ } while $changed;
+}
+
+
+# Add all new patches in $srcPatches to $dstPatches.
+sub copyPatches {
+ my ($srcPatches, $dstPatches) = @_;
+ foreach my $p (keys %{$srcPatches}) {
+ addPatch $dstPatches, $p, $_ foreach @{$$srcPatches{$p}};
+ }
+}
+
+
+return 1;
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
index aa5d6f78c..60bb0a9b8 100644
--- a/scripts/Makefile.am
+++ b/scripts/Makefile.am
@@ -1,23 +1,23 @@
bin_SCRIPTS = nix-collect-garbage \
nix-pull nix-push nix-prefetch-url \
nix-install-package nix-channel nix-build \
- nix-copy-closure
+ nix-copy-closure nix-generate-patches
-noinst_SCRIPTS = nix-profile.sh generate-patches.pl \
+noinst_SCRIPTS = nix-profile.sh GeneratePatches.pm \
find-runtime-roots.pl build-remote.pl nix-reduce-build \
copy-from-other-stores.pl nix-http-export.cgi
-nix-pull nix-push: readmanifest.pm readconfig.pm download-using-manifests.pl
+nix-pull nix-push: NixManifest.pm NixConfig.pm download-using-manifests.pl
-install-exec-local: readmanifest.pm download-using-manifests.pl copy-from-other-stores.pl find-runtime-roots.pl
+install-exec-local: NixManifest.pm GeneratePatches.pm download-using-manifests.pl copy-from-other-stores.pl find-runtime-roots.pl
$(INSTALL) -d $(DESTDIR)$(sysconfdir)/profile.d
$(INSTALL_PROGRAM) nix-profile.sh $(DESTDIR)$(sysconfdir)/profile.d/nix.sh
$(INSTALL) -d $(DESTDIR)$(libexecdir)/nix
- $(INSTALL_DATA) readmanifest.pm $(DESTDIR)$(libexecdir)/nix
- $(INSTALL_DATA) readconfig.pm $(DESTDIR)$(libexecdir)/nix
- $(INSTALL_DATA) ssh.pm $(DESTDIR)$(libexecdir)/nix
+ $(INSTALL_DATA) NixManifest.pm $(DESTDIR)$(libexecdir)/nix
+ $(INSTALL_DATA) NixConfig.pm $(DESTDIR)$(libexecdir)/nix
+ $(INSTALL_DATA) SSH.pm $(DESTDIR)$(libexecdir)/nix
+ $(INSTALL_DATA) GeneratePatches.pm $(DESTDIR)$(libexecdir)/nix
$(INSTALL_PROGRAM) find-runtime-roots.pl $(DESTDIR)$(libexecdir)/nix
- $(INSTALL_PROGRAM) generate-patches.pl $(DESTDIR)$(libexecdir)/nix
$(INSTALL_PROGRAM) build-remote.pl $(DESTDIR)$(libexecdir)/nix
$(INSTALL) -d $(DESTDIR)$(libexecdir)/nix/substituters
$(INSTALL_PROGRAM) download-using-manifests.pl $(DESTDIR)$(libexecdir)/nix/substituters
@@ -30,15 +30,16 @@ EXTRA_DIST = nix-collect-garbage.in \
nix-pull.in nix-push.in nix-profile.sh.in \
nix-prefetch-url.in nix-install-package.in \
nix-channel.in \
- readmanifest.pm.in \
- readconfig.pm.in \
- ssh.pm \
+ NixManifest.pm.in \
+ NixConfig.pm.in \
+ SSH.pm \
+ GeneratePatches.pm.in \
nix-build.in \
download-using-manifests.pl.in \
copy-from-other-stores.pl.in \
- generate-patches.pl.in \
nix-copy-closure.in \
find-runtime-roots.pl.in \
build-remote.pl.in \
nix-reduce-build.in \
- nix-http-export.cgi.in
+ nix-http-export.cgi.in \
+ nix-generate-patches.in
diff --git a/scripts/readconfig.pm.in b/scripts/NixConfig.pm.in
similarity index 100%
rename from scripts/readconfig.pm.in
rename to scripts/NixConfig.pm.in
diff --git a/scripts/readmanifest.pm.in b/scripts/NixManifest.pm.in
similarity index 82%
rename from scripts/readmanifest.pm.in
rename to scripts/NixManifest.pm.in
index 7244984ea..be0dda616 100644
--- a/scripts/readmanifest.pm.in
+++ b/scripts/NixManifest.pm.in
@@ -33,18 +33,8 @@ sub readManifest {
my $manifestVersion = 2;
- my $storePath;
- my $url;
- my $hash;
- my $size;
- my $basePath;
- my $baseHash;
- my $patchType;
- my $narHash;
- my $references;
- my $deriver;
- my $hashAlgo;
- my $copyFrom;
+ my ($storePath, $url, $hash, $size, $basePath, $baseHash, $patchType);
+ my ($narHash, $narSize, $references, $deriver, $hashAlgo, $copyFrom, $system);
while () {
chomp;
@@ -62,9 +52,11 @@ sub readManifest {
undef $hash;
undef $size;
undef $narHash;
+ undef $narSize;
undef $basePath;
undef $baseHash;
undef $patchType;
+ undef $system;
$references = "";
$deriver = "";
$hashAlgo = "md5";
@@ -89,8 +81,10 @@ sub readManifest {
if (!$found) {
push @{$narFileList},
{ url => $url, hash => $hash, size => $size
- , narHash => $narHash, references => $references
+ , narHash => $narHash, narSize => $narSize
+ , references => $references
, deriver => $deriver, hashAlgo => $hashAlgo
+ , system => $system
};
}
@@ -100,8 +94,8 @@ sub readManifest {
addPatch $patches, $storePath,
{ url => $url, hash => $hash, size => $size
, basePath => $basePath, baseHash => $baseHash
- , narHash => $narHash, patchType => $patchType
- , hashAlgo => $hashAlgo
+ , narHash => $narHash, narSize => $narSize
+ , patchType => $patchType, hashAlgo => $hashAlgo
};
}
@@ -132,9 +126,11 @@ sub readManifest {
elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; }
elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; }
elsif (/^\s*NarHash:\s*(\S+)\s*$/) { $narHash = $1; }
+ elsif (/^\s*NarSize:\s*(\d+)\s*$/) { $narSize = $1; }
elsif (/^\s*References:\s*(.*)\s*$/) { $references = $1; }
elsif (/^\s*Deriver:\s*(\S+)\s*$/) { $deriver = $1; }
elsif (/^\s*ManifestVersion:\s*(\d+)\s*$/) { $manifestVersion = $1; }
+ elsif (/^\s*System:\s*(\S+)\s*$/) { $system = $1; }
# Compatibility;
elsif (/^\s*NarURL:\s*(\S+)\s*$/) { $url = $1; }
@@ -150,7 +146,7 @@ sub readManifest {
sub writeManifest {
- my ($manifest, $narFiles, $patches) = @_;
+ my ($manifest, $narFiles, $patches, $noCompress) = @_;
open MANIFEST, ">$manifest.tmp"; # !!! check exclusive
@@ -165,12 +161,14 @@ sub writeManifest {
print MANIFEST " StorePath: $storePath\n";
print MANIFEST " NarURL: $narFile->{url}\n";
print MANIFEST " Hash: $narFile->{hash}\n" if defined $narFile->{hash};
- print MANIFEST " NarHash: $narFile->{narHash}\n";
print MANIFEST " Size: $narFile->{size}\n" if defined $narFile->{size};
+ print MANIFEST " NarHash: $narFile->{narHash}\n";
+ print MANIFEST " NarSize: $narFile->{narSize}\n" if $narFile->{narSize};
print MANIFEST " References: $narFile->{references}\n"
if defined $narFile->{references} && $narFile->{references} ne "";
print MANIFEST " Deriver: $narFile->{deriver}\n"
if defined $narFile->{deriver} && $narFile->{deriver} ne "";
+ print MANIFEST " System: $narFile->{system}\n" if defined $narFile->{system};
print MANIFEST "}\n";
}
}
@@ -182,8 +180,9 @@ sub writeManifest {
print MANIFEST " StorePath: $storePath\n";
print MANIFEST " NarURL: $patch->{url}\n";
print MANIFEST " Hash: $patch->{hash}\n";
- print MANIFEST " NarHash: $patch->{narHash}\n";
print MANIFEST " Size: $patch->{size}\n";
+ print MANIFEST " NarHash: $patch->{narHash}\n";
+ print MANIFEST " NarSize: $patch->{narSize}\n" if $patch->{narSize};
print MANIFEST " BasePath: $patch->{basePath}\n";
print MANIFEST " BaseHash: $patch->{baseHash}\n";
print MANIFEST " Type: $patch->{patchType}\n";
@@ -199,11 +198,13 @@ sub writeManifest {
# Create a bzipped manifest.
- system("@bzip2@ < $manifest > $manifest.bz2.tmp") == 0
- or die "cannot compress manifest";
+ unless (defined $noCompress) {
+ system("@bzip2@ < $manifest > $manifest.bz2.tmp") == 0
+ or die "cannot compress manifest";
- rename("$manifest.bz2.tmp", "$manifest.bz2")
- or die "cannot rename $manifest.bz2.tmp: $!";
+ rename("$manifest.bz2.tmp", "$manifest.bz2")
+ or die "cannot rename $manifest.bz2.tmp: $!";
+ }
}
diff --git a/scripts/ssh.pm b/scripts/SSH.pm
similarity index 81%
rename from scripts/ssh.pm
rename to scripts/SSH.pm
index c6d667a65..68f4a628b 100644
--- a/scripts/ssh.pm
+++ b/scripts/SSH.pm
@@ -3,6 +3,8 @@ use File::Temp qw(tempdir);
our @sshOpts = split ' ', ($ENV{"NIX_SSHOPTS"} or "");
+push @sshOpts, "-x";
+
my $sshStarted = 0;
my $sshHost;
@@ -24,14 +26,17 @@ sub openSSHConnection {
# child continues to run if we are killed. So instead make SSH
# print "started" when it has established the connection, and wait
# until we see that.
- open SSH, "ssh $sshHost @sshOpts -M -N -o LocalCommand='echo started' -o PermitLocalCommand=yes |" or die;
- while () {
+ open SSHPIPE, "ssh $sshHost @sshOpts -M -N -o LocalCommand='echo started' -o PermitLocalCommand=yes |" or die;
+
+ while () {
chomp;
- last if /started/;
+ if ($_ eq "started") {
+ $sshStarted = 1;
+ return 1;
+ }
}
-
- $sshStarted = 1;
- return 1;
+
+ return 0;
}
# Tell the master SSH client to exit.
diff --git a/scripts/build-remote.pl.in b/scripts/build-remote.pl.in
index c440b6a0f..e943b0d9e 100755
--- a/scripts/build-remote.pl.in
+++ b/scripts/build-remote.pl.in
@@ -3,7 +3,8 @@
use Fcntl ':flock';
use English '-no_match_vars';
use IO::Handle;
-use ssh qw/sshOpts openSSHConnection/;
+use SSH qw/sshOpts openSSHConnection/;
+no warnings('once');
# General operation:
@@ -31,57 +32,22 @@ $ENV{"DISPLAY"} = "";
$ENV{"SSH_ASKPASS"} = "";
-my $loadIncreased = 0;
-
-my ($amWilling, $localSystem, $neededSystem, $drvPath, $maxSilentTime) = @ARGV;
-$maxSilentTime = 0 unless defined $maxSilentTime;
-
sub sendReply {
my $reply = shift;
print STDERR "# $reply\n";
}
-sub decline {
- sendReply "decline";
- exit 0;
-}
+sub all { $_ || return 0 for @_; 1 }
+
+
+# Initialisation.
+my $loadIncreased = 0;
+
+my ($localSystem, $maxSilentTime, $printBuildTrace) = @ARGV;
+$maxSilentTime = 0 unless defined $maxSilentTime;
my $currentLoad = $ENV{"NIX_CURRENT_LOAD"};
-decline unless defined $currentLoad;
-mkdir $currentLoad, 0777 or die unless -d $currentLoad;
-
my $conf = $ENV{"NIX_REMOTE_SYSTEMS"};
-decline if !defined $conf || ! -e $conf;
-
-my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem);
-
-
-# Read the list of machines.
-my @machines;
-open CONF, "< $conf" or die;
-
-while () {
- chomp;
- s/\#.*$//g;
- next if /^\s*$/;
- /^\s*(\S+)\s+(\S+)\s+(\S+)\s+(\d+)(\s+([0-9\.]+))?\s*$/ or die;
- push @machines,
- { hostName => $1
- , systemTypes => [split(/,/, $2)]
- , sshKeys => $3
- , maxJobs => $4
- , speedFactor => 1.0 * ($6 || 1)
- , enabled => 1
- };
-}
-
-close CONF;
-
-
-# Acquire the exclusive lock on $currentLoad/main-lock.
-my $mainLock = "$currentLoad/main-lock";
-open MAINLOCK, ">>$mainLock" or die;
-flock(MAINLOCK, LOCK_EX) or die;
sub openSlotLock {
@@ -91,150 +57,213 @@ sub openSlotLock {
open $slotLock, ">>$slotLockFn" or die;
return $slotLock;
}
+
+
+# Read the list of machines.
+my @machines;
+if (defined $conf && -e $conf) {
+ open CONF, "< $conf" or die;
+ while () {
+ chomp;
+ s/\#.*$//g;
+ next if /^\s*$/;
+ my @tokens = split /\s/, $_;
+ push @machines,
+ { hostName => $tokens[0]
+ , systemTypes => [ split(/,/, $tokens[1]) ]
+ , sshKeys => $tokens[2]
+ , maxJobs => int($tokens[3])
+ , speedFactor => 1.0 * (defined $tokens[4] ? int($tokens[4]) : 1)
+ , features => [ split(/,/, $tokens[5] || "") ]
+ , enabled => 1
+ };
+ }
+ close CONF;
+}
+
+
+
+# Wait for the calling process to ask us whether we can build some derivation.
+my ($drvPath, $hostName, $slotLock);
+
+REQ: while (1) {
+ $_ = || exit 0;
+ my ($amWilling, $neededSystem);
+ ($amWilling, $neededSystem, $drvPath, $requiredFeatures) = split;
+ my @requiredFeatures = split /,/, $requiredFeatures;
+
+ my $canBuildLocally = $amWilling && ($localSystem eq $neededSystem);
+
+ if (!defined $currentLoad) {
+ sendReply "decline";
+ next;
+ }
-
-my $hostName;
-my $slotLock;
-
-while (1) {
+ # Acquire the exclusive lock on $currentLoad/main-lock.
+ mkdir $currentLoad, 0777 or die unless -d $currentLoad;
+ my $mainLock = "$currentLoad/main-lock";
+ open MAINLOCK, ">>$mainLock" or die;
+ flock(MAINLOCK, LOCK_EX) or die;
- # Find all machine that can execute this build, i.e., that support
- # builds for the given platform and are not at their job limit.
- my $rightType = 0;
- my @available = ();
- LOOP: foreach my $cur (@machines) {
- if ($cur->{enabled} && grep { $neededSystem eq $_ } @{$cur->{systemTypes}}) {
- $rightType = 1;
+
+ while (1) {
+ # Find all machine that can execute this build, i.e., that
+ # support builds for the given platform and features, and are
+ # not at their job limit.
+ my $rightType = 0;
+ my @available = ();
+ LOOP: foreach my $cur (@machines) {
+ if ($cur->{enabled}
+ && (grep { $neededSystem eq $_ } @{$cur->{systemTypes}})
+ && all(map { my $f = $_; 0 != grep { $f eq $_ } @{$cur->{features}} } @requiredFeatures))
+ {
+ $rightType = 1;
- # We have a machine of the right type. Determine the load on
- # the machine.
- my $slot = 0;
- my $load = 0;
- my $free;
- while ($slot < $cur->{maxJobs}) {
- my $slotLock = openSlotLock($cur, $slot);
- if (flock($slotLock, LOCK_EX | LOCK_NB)) {
- $free = $slot unless defined $free;
- flock($slotLock, LOCK_UN) or die;
- } else {
- $load++;
+ # We have a machine of the right type. Determine the load on
+ # the machine.
+ my $slot = 0;
+ my $load = 0;
+ my $free;
+ while ($slot < $cur->{maxJobs}) {
+ my $slotLock = openSlotLock($cur, $slot);
+ if (flock($slotLock, LOCK_EX | LOCK_NB)) {
+ $free = $slot unless defined $free;
+ flock($slotLock, LOCK_UN) or die;
+ } else {
+ $load++;
+ }
+ close $slotLock;
+ $slot++;
}
- close $slotLock;
- $slot++;
+
+ push @available, { machine => $cur, load => $load, free => $free }
+ if $load < $cur->{maxJobs};
}
-
- push @available, { machine => $cur, load => $load, free => $free }
- if $load < $cur->{maxJobs};
}
- }
- if (defined $ENV{NIX_DEBUG_HOOK}) {
- print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n"
- foreach @available;
- }
-
-
- # Didn't find any available machine? Then decline or postpone.
- if (scalar @available == 0) {
- # Postpone if we have a machine of the right type, except if the
- # local system can and wants to do the build.
- if ($rightType && !$canBuildLocally) {
- sendReply "postpone";
- exit 0;
- } else {
- decline;
+ if (defined $ENV{NIX_DEBUG_HOOK}) {
+ print STDERR "load on " . $_->{machine}->{hostName} . " = " . $_->{load} . "\n"
+ foreach @available;
}
- }
- # Prioritise the available machines as follows:
- # - First by load divided by speed factor, rounded to the nearest
- # integer. This causes fast machines to be preferred over slow
- # machines with similar loads.
- # - Then by speed factor.
- # - Finally by load.
- sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); }
- @available = sort
- { lf($a) <=> lf($b)
- || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor}
- || $a->{load} <=> $b->{load}
- } @available;
+ # Didn't find any available machine? Then decline or postpone.
+ if (scalar @available == 0) {
+ # Postpone if we have a machine of the right type, except
+ # if the local system can and wants to do the build.
+ if ($rightType && !$canBuildLocally) {
+ sendReply "postpone";
+ } else {
+ sendReply "decline";
+ }
+ close MAINLOCK;
+ next REQ;
+ }
- # Select the best available machine and lock a free slot.
- my $selected = $available[0];
- my $machine = $selected->{machine};
-
- $slotLock = openSlotLock($machine, $selected->{free});
- flock($slotLock, LOCK_EX | LOCK_NB) or die;
- utime undef, undef, $slotLock;
-
- close MAINLOCK;
+ # Prioritise the available machines as follows:
+ # - First by load divided by speed factor, rounded to the nearest
+ # integer. This causes fast machines to be preferred over slow
+ # machines with similar loads.
+ # - Then by speed factor.
+ # - Finally by load.
+ sub lf { my $x = shift; return int($x->{load} / $x->{machine}->{speedFactor} + 0.4999); }
+ @available = sort
+ { lf($a) <=> lf($b)
+ || $b->{machine}->{speedFactor} <=> $a->{machine}->{speedFactor}
+ || $a->{load} <=> $b->{load}
+ } @available;
- # Connect to the selected machine.
- @sshOpts = ("-i", $machine->{sshKeys}, "-x");
- $hostName = $machine->{hostName};
- last if openSSHConnection $hostName;
+ # Select the best available machine and lock a free slot.
+ my $selected = $available[0];
+ my $machine = $selected->{machine};
+
+ $slotLock = openSlotLock($machine, $selected->{free});
+ flock($slotLock, LOCK_EX | LOCK_NB) or die;
+ utime undef, undef, $slotLock;
+
+ close MAINLOCK;
+
+
+ # Connect to the selected machine.
+ @sshOpts = ("-i", $machine->{sshKeys}, "-x");
+ $hostName = $machine->{hostName};
+ last REQ if openSSHConnection $hostName;
- warn "unable to open SSH connection to $hostName, trying other available machines...\n";
- $machine->{enabled} = 0;
+ warn "unable to open SSH connection to $hostName, trying other available machines...\n";
+ $machine->{enabled} = 0;
+ }
}
# Tell Nix we've accepted the build.
sendReply "accept";
-my $x = ;
-chomp $x;
-
-if ($x ne "okay") {
- exit 0;
-}
+my @inputs = split /\s/, readline(STDIN);
+my @outputs = split /\s/, readline(STDIN);
-# Do the actual build.
print STDERR "building `$drvPath' on `$hostName'\n";
+print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace;
-my $inputs = `cat inputs`; die if ($? != 0);
-$inputs =~ s/\n/ /g;
-
-my $outputs = `cat outputs`; die if ($? != 0);
-$outputs =~ s/\n/ /g;
-
-print "copying inputs...\n";
my $maybeSign = "";
$maybeSign = "--sign" if -e "/nix/etc/nix/signing-key.sec";
-system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath $inputs") == 0
+
+# Register the derivation as a temporary GC root. Note that $PPID is
+# the PID of the remote SSH process, which, due to the use of a
+# persistant SSH connection, should be the same across all remote
+# command invocations for this session.
+my $rootsDir = "@localstatedir@/nix/gcroots/tmp";
+system("ssh $hostName @sshOpts 'mkdir -m 1777 -p $rootsDir; ln -sfn $drvPath $rootsDir/\$PPID.drv'");
+
+sub removeRoots {
+ system("ssh $hostName @sshOpts 'rm -f $rootsDir/\$PPID.drv $rootsDir/\$PPID.out'");
+}
+
+
+# Copy the derivation and its dependencies to the build machine.
+system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath @inputs") == 0
or die "cannot copy inputs to $hostName: $?";
-print "building...\n";
-my $buildFlags = "--max-silent-time $maxSilentTime --fallback";
+# Perform the build.
+my $buildFlags = "--max-silent-time $maxSilentTime --fallback --add-root $rootsDir/\$PPID.out --option verbosity 0";
-# `-tt' forces allocation of a pseudo-terminal. This is required to
-# make the remote nix-store process receive a signal when the
-# connection dies. Without it, the remote process might continue to
-# run indefinitely (that is, until it next tries to write to
-# stdout/stderr).
-if (system("ssh $hostName @sshOpts -tt 'nix-store -r $drvPath $buildFlags > /dev/null'") != 0) {
- # If we couldn't run ssh or there was an ssh problem (indicated by
- # exit code 255), then we return exit code 1; otherwise we assume
- # that the builder failed, which we indicate to Nix using exit
- # code 100. It's important to distinguish between the two because
- # the first is a transient failure and the latter is permanent.
- my $res = $? == -1 || ($? >> 8) == 255 ? 1 : 100;
- print STDERR "build of `$drvPath' on `$hostName' failed with exit code $?\n";
+# We let the remote side kill its process group when the connection is
+# closed unexpectedly. This is necessary to ensure that no processes
+# are left running on the remote system if the local Nix process is
+# killed. (SSH itself doesn't kill child processes if the connection
+# is interrupted unless the `-tt' flag is used to force a pseudo-tty,
+# in which case every child receives SIGHUP; however, `-tt' doesn't
+# work on some platforms when connection sharing is used.)
+pipe STDIN, DUMMY; # make sure we have a readable STDIN
+if (system("ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
+ # Note that if we get exit code 100 from `nix-store -r', it
+ # denotes a permanent build failure (as opposed to an SSH problem
+ # or a temporary Nix problem). We propagate this to the caller to
+ # allow it to distinguish between transient and permanent
+ # failures.
+ my $res = $? >> 8;
+ print STDERR "build of `$drvPath' on `$hostName' failed with exit code $res\n";
+ removeRoots;
exit $res;
}
-print "build of `$drvPath' on `$hostName' succeeded\n";
+#print "build of `$drvPath' on `$hostName' succeeded\n";
-foreach my $output (split '\n', $outputs) {
+
+# Copy the output from the build machine.
+foreach my $output (@outputs) {
my $maybeSignRemote = "";
$maybeSignRemote = "--sign" if $UID != 0;
- system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output' | @bindir@/nix-store --import > /dev/null") == 0
+ system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
+ "| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0
or die "cannot copy $output from $hostName: $?";
}
+
+
+# Get rid of the temporary GC roots.
+removeRoots;
diff --git a/scripts/copy-from-other-stores.pl.in b/scripts/copy-from-other-stores.pl.in
index 8f0ff4ca8..10130c089 100644
--- a/scripts/copy-from-other-stores.pl.in
+++ b/scripts/copy-from-other-stores.pl.in
@@ -17,25 +17,19 @@ foreach my $dir (@remoteStoresAll) {
}
+$ENV{"NIX_REMOTE"} = "";
+
+
sub findStorePath {
my $storePath = shift;
-
- my $storePathName = basename $storePath;
-
foreach my $store (@remoteStores) {
- # Determine whether $storePath exists by looking for the
- # existence of the info file, and if so, get store path info
- # from that file. This rather breaks abstraction: we should
- # be using `nix-store' for that. But right now there is no
- # good way to tell nix-store to access a store mounted under a
- # different location (there's $NIX_STORE, but that only works
- # if the remote store is mounted under its "real" location).
- my $infoFile = "$store/var/nix/db/info/$storePathName";
- my $storePath2 = "$store/store/$storePathName";
- if (-f $infoFile && -e $storePath2) {
- return ($infoFile, $storePath2);
- }
+ my $sourcePath = "$store/store/" . basename $storePath;
+ next unless -e $sourcePath || -l $sourcePath;
+ $ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
+ return ($store, $sourcePath) if
+ system("@bindir@/nix-store --check-validity $storePath") == 0;
}
+ return undef;
}
@@ -46,37 +40,38 @@ if ($ARGV[0] eq "--query") {
if ($cmd eq "have") {
my $storePath = ; chomp $storePath;
- (my $infoFile) = findStorePath $storePath;
- print STDOUT ($infoFile ? "1\n" : "0\n");
+ print STDOUT (defined findStorePath($storePath) ? "1\n" : "0\n");
}
elsif ($cmd eq "info") {
my $storePath = ; chomp $storePath;
- (my $infoFile) = findStorePath $storePath;
- if (!$infoFile) {
+ my ($store, $sourcePath) = findStorePath($storePath);
+ if (!defined $store) {
print "0\n";
next; # not an error
}
print "1\n";
- my $deriver = "";
- my @references = ();
+ $ENV{"NIX_DB_DIR"} = "$store/var/nix/db";
+
+ my $deriver = `@bindir@/nix-store --query --deriver $storePath`;
+ die "cannot query deriver of `$storePath'" if $? != 0;
+ chomp $deriver;
+ $deriver = "" if $deriver eq "unknown-deriver";
- open INFO, "<$infoFile" or die "cannot read info file $infoFile\n";
- while () {
- chomp;
- /^([\w-]+): (.*)$/ or die "bad info file";
- my $key = $1;
- my $value = $2;
- if ($key eq "Deriver") { $deriver = $value; }
- elsif ($key eq "References") { @references = split ' ', $value; }
- }
- close INFO;
+ my @references = split "\n",
+ `@bindir@/nix-store --query --references $storePath`;
+ die "cannot query references of `$storePath'" if $? != 0;
+
+ my $narSize = `@bindir@/nix-store --query --size $storePath`;
+ die "cannot query size of `$storePath'" if $? != 0;
+ chomp $narSize;
print "$deriver\n";
print scalar @references, "\n";
print "$_\n" foreach @references;
- print "0\n"; # !!! showing size not supported (yet)
+ print "$narSize\n";
+ print "$narSize\n";
}
else { die "unknown command `$cmd'"; }
@@ -87,8 +82,8 @@ if ($ARGV[0] eq "--query") {
elsif ($ARGV[0] eq "--substitute") {
die unless scalar @ARGV == 2;
my $storePath = $ARGV[1];
- (my $infoFile, my $sourcePath) = findStorePath $storePath;
- die unless $infoFile;
+ my ($store, $sourcePath) = findStorePath $storePath;
+ die unless $store;
print "\n*** Copying `$storePath' from `$sourcePath'\n\n";
system("$binDir/nix-store --dump $sourcePath | $binDir/nix-store --restore $storePath") == 0
or die "cannot copy `$sourcePath' to `$storePath'";
diff --git a/scripts/download-using-manifests.pl.in b/scripts/download-using-manifests.pl.in
index d48c7dd4b..fe80bc11f 100644
--- a/scripts/download-using-manifests.pl.in
+++ b/scripts/download-using-manifests.pl.in
@@ -1,7 +1,7 @@
#! @perl@ -w -I@libexecdir@/nix
use strict;
-use readmanifest;
+use NixManifest;
use POSIX qw(strftime);
use File::Temp qw(tempdir);
@@ -12,6 +12,10 @@ STDOUT->autoflush(1);
my $manifestDir = ($ENV{"NIX_MANIFESTS_DIR"} or "@localstatedir@/nix/manifests");
my $logFile = "@localstatedir@/log/nix/downloads";
+# For queries, skip expensive calls to nix-hash etc. We're just
+# estimating the expected download size.
+my $fast = 1;
+
# Load all manifests.
my %narFiles;
@@ -31,6 +35,151 @@ for my $manifest (glob "$manifestDir/*.nixmanifest") {
}
+sub isValidPath {
+ my $p = shift;
+ if ($fast) {
+ return -e $p;
+ } else {
+ return system("$binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
+ }
+}
+
+
+sub parseHash {
+ my $hash = shift;
+ if ($hash =~ /^(.+):(.+)$/) {
+ return ($1, $2);
+ } else {
+ return ("md5", $hash);
+ }
+}
+
+
+# Compute the most efficient sequence of downloads to produce the
+# given path.
+sub computeSmallestDownload {
+ my $targetPath = shift;
+
+ # Build a graph of all store paths that might contribute to the
+ # construction of $targetPath, and the special node "start". The
+ # edges are either patch operations, or downloads of full NAR
+ # files. The latter edges only occur between "start" and a store
+ # path.
+ my %graph;
+
+ $graph{"start"} = {d => 0, pred => undef, edges => []};
+
+ my @queue = ();
+ my $queueFront = 0;
+ my %done;
+
+ sub addNode {
+ my $graph = shift;
+ my $u = shift;
+ $$graph{$u} = {d => 999999999999, pred => undef, edges => []}
+ unless defined $$graph{$u};
+ }
+
+ sub addEdge {
+ my $graph = shift;
+ my $u = shift;
+ my $v = shift;
+ my $w = shift;
+ my $type = shift;
+ my $info = shift;
+ addNode $graph, $u;
+ push @{$$graph{$u}->{edges}},
+ {weight => $w, start => $u, end => $v, type => $type, info => $info};
+ my $n = scalar @{$$graph{$u}->{edges}};
+ }
+
+ push @queue, $targetPath;
+
+ while ($queueFront < scalar @queue) {
+ my $u = $queue[$queueFront++];
+ next if defined $done{$u};
+ $done{$u} = 1;
+
+ addNode \%graph, $u;
+
+ # If the path already exists, it has distance 0 from the
+ # "start" node.
+ if (isValidPath($u)) {
+ addEdge \%graph, "start", $u, 0, "present", undef;
+ }
+
+ else {
+
+ # Add patch edges.
+ my $patchList = $patches{$u};
+ foreach my $patch (@{$patchList}) {
+ if (isValidPath($patch->{basePath})) {
+ # !!! this should be cached
+ my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
+ my $format = "--base32";
+ $format = "" if $baseHashAlgo eq "md5";
+ my $hash = $fast && $baseHashAlgo eq "sha256"
+ ? `$binDir/nix-store -q --hash "$patch->{basePath}"`
+ : `$binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`;
+ chomp $hash;
+ $hash =~ s/.*://;
+ next if $hash ne $baseHash;
+ }
+ push @queue, $patch->{basePath};
+ addEdge \%graph, $patch->{basePath}, $u, $patch->{size}, "patch", $patch;
+ }
+
+ # Add NAR file edges to the start node.
+ my $narFileList = $narFiles{$u};
+ foreach my $narFile (@{$narFileList}) {
+ # !!! how to handle files whose size is not known in advance?
+ # For now, assume some arbitrary size (1 MB).
+ addEdge \%graph, "start", $u, ($narFile->{size} || 1000000), "narfile", $narFile;
+ }
+ }
+ }
+
+
+ # Run Dijkstra's shortest path algorithm to determine the shortest
+ # sequence of download and/or patch actions that will produce
+ # $targetPath.
+
+ my @todo = keys %graph;
+
+ while (scalar @todo > 0) {
+
+ # Remove the closest element from the todo list.
+ # !!! inefficient, use a priority queue
+ @todo = sort { -($graph{$a}->{d} <=> $graph{$b}->{d}) } @todo;
+ my $u = pop @todo;
+
+ my $u_ = $graph{$u};
+
+ foreach my $edge (@{$u_->{edges}}) {
+ my $v_ = $graph{$edge->{end}};
+ if ($v_->{d} > $u_->{d} + $edge->{weight}) {
+ $v_->{d} = $u_->{d} + $edge->{weight};
+ # Store the edge; to edge->start is actually the
+ # predecessor.
+ $v_->{pred} = $edge;
+ }
+ }
+ }
+
+
+ # Retrieve the shortest path from "start" to $targetPath.
+ my @path = ();
+ my $cur = $targetPath;
+ return () unless defined $graph{$targetPath}->{pred};
+ while ($cur ne "start") {
+ push @path, $graph{$cur}->{pred};
+ $cur = $graph{$cur}->{pred}->{start};
+ }
+
+ return @path;
+}
+
+
# Parse the arguments.
if ($ARGV[0] eq "--query") {
@@ -46,6 +195,7 @@ if ($ARGV[0] eq "--query") {
elsif ($cmd eq "info") {
my $storePath = ; chomp $storePath;
+
my $info;
if (defined $narFiles{$storePath}) {
$info = @{$narFiles{$storePath}}[0];
@@ -57,13 +207,32 @@ if ($ARGV[0] eq "--query") {
print "0\n";
next; # not an error
}
+
print "1\n";
print "$info->{deriver}\n";
my @references = split " ", $info->{references};
print scalar @references, "\n";
print "$_\n" foreach @references;
- my $size = $info->{size} || 0;
- print "$size\n";
+
+ my @path = computeSmallestDownload $storePath;
+
+ my $downloadSize = 0;
+ while (scalar @path > 0) {
+ my $edge = pop @path;
+ my $u = $edge->{start};
+ my $v = $edge->{end};
+ if ($edge->{type} eq "patch") {
+ $downloadSize += $edge->{info}->{size} || 0;
+ }
+ elsif ($edge->{type} eq "narfile") {
+ $downloadSize += $edge->{info}->{size} || 0;
+ }
+ }
+
+ print "$downloadSize\n";
+
+ my $narSize = $info->{narSize} || 0;
+ print "$narSize\n";
}
else { die "unknown command `$cmd'"; }
@@ -79,6 +248,7 @@ elsif ($ARGV[0] ne "--substitute") {
die unless scalar @ARGV == 2;
my $targetPath = $ARGV[1];
+$fast = 0;
# Create a temporary directory.
@@ -110,148 +280,9 @@ foreach my $localPath (@{$localPathList}) {
}
-# Build a graph of all store paths that might contribute to the
-# construction of $targetPath, and the special node "start". The
-# edges are either patch operations, or downloads of full NAR files.
-# The latter edges only occur between "start" and a store path.
-
-my %graph;
-
-$graph{"start"} = {d => 0, pred => undef, edges => []};
-
-my @queue = ();
-my $queueFront = 0;
-my %done;
-
-sub addToQueue {
- my $v = shift;
- return if defined $done{$v};
- $done{$v} = 1;
- push @queue, $v;
-}
-
-sub addNode {
- my $u = shift;
- $graph{$u} = {d => 999999999999, pred => undef, edges => []}
- unless defined $graph{$u};
-}
-
-sub addEdge {
- my $u = shift;
- my $v = shift;
- my $w = shift;
- my $type = shift;
- my $info = shift;
- addNode $u;
- push @{$graph{$u}->{edges}},
- {weight => $w, start => $u, end => $v, type => $type, info => $info};
- my $n = scalar @{$graph{$u}->{edges}};
-}
-
-addToQueue $targetPath;
-
-sub isValidPath {
- my $p = shift;
- return system("$binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
-}
-
-sub parseHash {
- my $hash = shift;
- if ($hash =~ /^(.+):(.+)$/) {
- return ($1, $2);
- } else {
- return ("md5", $hash);
- }
-}
-
-while ($queueFront < scalar @queue) {
- my $u = $queue[$queueFront++];
-# print "$u\n";
-
- addNode $u;
-
- # If the path already exists, it has distance 0 from the "start"
- # node.
- if (isValidPath($u)) {
- addEdge "start", $u, 0, "present", undef;
- }
-
- else {
-
- # Add patch edges.
- my $patchList = $patches{$u};
- foreach my $patch (@{$patchList}) {
- if (isValidPath($patch->{basePath})) {
- # !!! this should be cached
- my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
- my $format = "--base32";
- $format = "" if $baseHashAlgo eq "md5";
- my $hash = `$binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`;
- chomp $hash;
- if ($hash ne $baseHash) {
- print LOGFILE "$$ rejecting $patch->{basePath}\n";
- next;
- }
- }
- addToQueue $patch->{basePath};
- addEdge $patch->{basePath}, $u, $patch->{size}, "patch", $patch;
- }
-
- # Add NAR file edges to the start node.
- my $narFileList = $narFiles{$u};
- foreach my $narFile (@{$narFileList}) {
- # !!! how to handle files whose size is not known in advance?
- # For now, assume some arbitrary size (1 MB).
- addEdge "start", $u, ($narFile->{size} || 1000000), "narfile", $narFile;
- if ($u eq $targetPath) {
- my $size = $narFile->{size} || -1;
- print LOGFILE "$$ full-download-would-be $size\n";
- }
- }
-
- }
-}
-
-
-# Run Dijkstra's shortest path algorithm to determine the shortest
-# sequence of download and/or patch actions that will produce
-# $targetPath.
-
-sub byDistance { # sort by distance, reversed
- return -($graph{$a}->{d} <=> $graph{$b}->{d});
-}
-
-my @todo = keys %graph;
-
-while (scalar @todo > 0) {
-
- # Remove the closest element from the todo list.
- @todo = sort byDistance @todo;
- my $u = pop @todo;
-
- my $u_ = $graph{$u};
-
- foreach my $edge (@{$u_->{edges}}) {
- my $v_ = $graph{$edge->{end}};
- if ($v_->{d} > $u_->{d} + $edge->{weight}) {
- $v_->{d} = $u_->{d} + $edge->{weight};
- # Store the edge; to edge->start is actually the
- # predecessor.
- $v_->{pred} = $edge;
- }
- }
-}
-
-
-# Retrieve the shortest path from "start" to $targetPath.
-my @path = ();
-my $cur = $targetPath;
-die "don't know how to produce $targetPath\n"
- unless defined $graph{$targetPath}->{pred};
-while ($cur ne "start") {
- push @path, $graph{$cur}->{pred};
- $cur = $graph{$cur}->{pred}->{start};
-}
+# Compute the shortest path.
+my @path = computeSmallestDownload $targetPath;
+die "don't know how to produce $targetPath\n" if scalar @path == 0;
# Traverse the shortest path, perform the actions described by the
diff --git a/scripts/generate-patches.pl.in b/scripts/generate-patches.pl.in
deleted file mode 100755
index dba647350..000000000
--- a/scripts/generate-patches.pl.in
+++ /dev/null
@@ -1,416 +0,0 @@
-#! @perl@ -w -I@libexecdir@/nix
-
-use strict;
-use File::Temp qw(tempdir);
-use readmanifest;
-
-
-# Some patch generations options.
-
-# Max size of NAR archives to generate patches for.
-my $maxNarSize = $ENV{"NIX_MAX_NAR_SIZE"};
-$maxNarSize = 100 * 1024 * 1024 if !defined $maxNarSize;
-
-# If patch is bigger than this fraction of full archive, reject.
-my $maxPatchFraction = $ENV{"NIX_PATCH_FRACTION"};
-$maxPatchFraction = 0.60 if !defined $maxPatchFraction;
-
-
-die unless scalar @ARGV == 5;
-
-my $hashAlgo = "sha256";
-
-my $narDir = $ARGV[0];
-my $patchesDir = $ARGV[1];
-my $patchesURL = $ARGV[2];
-my $srcManifest = $ARGV[3];
-my $dstManifest = $ARGV[4];
-
-my $tmpDir = tempdir("nix-generate-patches.XXXXXX", CLEANUP => 1, TMPDIR => 1)
- or die "cannot create a temporary directory";
-
-print "TEMP = $tmpDir\n";
-
-#END { rmdir $tmpDir; }
-
-my %srcNarFiles;
-my %srcLocalPaths;
-my %srcPatches;
-
-my %dstNarFiles;
-my %dstLocalPaths;
-my %dstPatches;
-
-readManifest "$srcManifest",
- \%srcNarFiles, \%srcLocalPaths, \%srcPatches;
-
-readManifest "$dstManifest",
- \%dstNarFiles, \%dstLocalPaths, \%dstPatches;
-
-
-sub findOutputPaths {
- my $narFiles = shift;
-
- my %outPaths;
-
- foreach my $p (keys %{$narFiles}) {
-
- # Ignore derivations.
- next if ($p =~ /\.drv$/);
-
- # Ignore builders (too much ambiguity -- they're all called
- # `builder.sh').
- next if ($p =~ /\.sh$/);
- next if ($p =~ /\.patch$/);
-
- # Don't bother including tar files etc.
- next if ($p =~ /\.tar$/ || $p =~ /\.tar\.(gz|bz2|Z|lzma|xz)$/ || $p =~ /\.zip$/ || $p =~ /\.bin$/ || $p =~ /\.tgz$/ || $p =~ /\.rpm$/ || $p =~ /cvs-export$/ || $p =~ /fetchhg$/);
-
- $outPaths{$p} = 1;
- }
-
- return %outPaths;
-}
-
-print "finding src output paths...\n";
-my %srcOutPaths = findOutputPaths \%srcNarFiles;
-
-print "finding dst output paths...\n";
-my %dstOutPaths = findOutputPaths \%dstNarFiles;
-
-
-sub getNameVersion {
- my $p = shift;
- $p =~ /\/[0-9a-z]+((?:-[a-zA-Z][^\/-]*)+)([^\/]*)$/;
- my $name = $1;
- my $version = $2;
- return undef unless defined $name && defined $version;
- $name =~ s/^-//;
- $version =~ s/^-//;
- return ($name, $version);
-}
-
-
-# A quick hack to get a measure of the `distance' between two
-# versions: it's just the position of the first character that differs
-# (or 999 if they are the same).
-sub versionDiff {
- my $s = shift;
- my $t = shift;
- my $i;
- return 999 if $s eq $t;
- for ($i = 0; $i < length $s; $i++) {
- return $i if $i >= length $t or
- substr($s, $i, 1) ne substr($t, $i, 1);
- }
- return $i;
-}
-
-
-sub getNarBz2 {
- my $narFiles = shift;
- my $storePath = shift;
-
- my $narFileList = $$narFiles{$storePath};
- die "missing path $storePath" unless defined $narFileList;
-
- my $narFile = @{$narFileList}[0];
- die unless defined $narFile;
-
- $narFile->{url} =~ /\/([^\/]+)$/;
- die unless defined $1;
- return "$narDir/$1";
-}
-
-
-sub containsPatch {
- my $patches = shift;
- my $storePath = shift;
- my $basePath = shift;
- my $patchList = $$patches{$storePath};
- return 0 if !defined $patchList;
- my $found = 0;
- foreach my $patch (@{$patchList}) {
- # !!! baseHash might differ
- return 1 if $patch->{basePath} eq $basePath;
- }
- return 0;
-}
-
-
-# Compute the "weighted" number of uses of a path in the build graph.
-sub computeUses {
- my $narFiles = shift;
- my $path = shift;
-
- # Find the deriver of $path.
- return 1 unless defined $$narFiles{$path};
- my $deriver = @{$$narFiles{$path}}[0]->{deriver};
- return 1 unless defined $deriver && $deriver ne "";
-
-# print " DERIVER $deriver\n";
-
- # Optimisation: build the referrers graph from the references
- # graph.
- my %referrers;
- foreach my $q (keys %{$narFiles}) {
- my @refs = split " ", @{$$narFiles{$q}}[0]->{references};
- foreach my $r (@refs) {
- $referrers{$r} = [] unless defined $referrers{$r};
- push @{$referrers{$r}}, $q;
- }
- }
-
- # Determine the shortest path from $deriver to all other reachable
- # paths in the `referrers' graph.
-
- my %dist;
- $dist{$deriver} = 0;
-
- my @queue = ($deriver);
- my $pos = 0;
-
- while ($pos < scalar @queue) {
- my $p = $queue[$pos];
- $pos++;
-
- foreach my $q (@{$referrers{$p}}) {
- if (!defined $dist{$q}) {
- $dist{$q} = $dist{$p} + 1;
-# print " $q $dist{$q}\n";
- push @queue, $q;
- }
- }
- }
-
- my $wuse = 1.0;
- foreach my $user (keys %dist) {
- next if $user eq $deriver;
-# print " $user $dist{$user}\n";
- $wuse += 1.0 / 2.0**$dist{$user};
- }
-
-# print " XXX $path $wuse\n";
-
- return $wuse;
-}
-
-
-# For each output path in the destination, see if we need to / can
-# create a patch.
-
-print "creating patches...\n";
-
-foreach my $p (keys %dstOutPaths) {
-
- # If exactly the same path already exists in the source, skip it.
- next if defined $srcOutPaths{$p};
-
- print " $p\n";
-
- # If not, then we should find the paths in the source that are
- # `most' likely to be present on a system that wants to install
- # this path.
-
- (my $name, my $version) = getNameVersion $p;
- next unless defined $name && defined $version;
-
- my @closest = ();
- my $closestVersion;
- my $minDist = -1; # actually, larger means closer
-
- # Find all source paths with the same name.
-
- foreach my $q (keys %srcOutPaths) {
- (my $name2, my $version2) = getNameVersion $q;
- next unless defined $name2 && defined $version2;
-
- if ($name eq $name2) {
-
- # If the sizes differ too much, then skip. This
- # disambiguates between, e.g., a real component and a
- # wrapper component (cf. Firefox in Nixpkgs).
- my $srcSize = @{$srcNarFiles{$q}}[0]->{size};
- my $dstSize = @{$dstNarFiles{$p}}[0]->{size};
- my $ratio = $srcSize / $dstSize;
- $ratio = 1 / $ratio if $ratio < 1;
-# print " SIZE $srcSize $dstSize $ratio $q\n";
-
- if ($ratio >= 3) {
- print " SKIPPING $q due to size ratio $ratio ($srcSize $dstSize)\n";
- next;
- }
-
- # If the numbers of weighted uses differ too much, then
- # skip. This disambiguates between, e.g., the bootstrap
- # GCC and the final GCC in Nixpkgs.
-# my $srcUses = computeUses \%srcNarFiles, $q;
-# my $dstUses = computeUses \%dstNarFiles, $p;
-# $ratio = $srcUses / $dstUses;
-# $ratio = 1 / $ratio if $ratio < 1;
-# print " USE $srcUses $dstUses $ratio $q\n";
-
-# if ($ratio >= 2) {
-# print " SKIPPING $q due to use ratio $ratio ($srcUses $dstUses)\n";
-# next;
-# }
-
- # If there are multiple matching names, include the ones
- # with the closest version numbers.
- my $dist = versionDiff $version, $version2;
- if ($dist > $minDist) {
- $minDist = $dist;
- @closest = ($q);
- $closestVersion = $version2;
- } elsif ($dist == $minDist) {
- push @closest, $q;
- }
- }
- }
-
- if (scalar(@closest) == 0) {
- print " NO BASE: $p\n";
- next;
- }
-
- foreach my $closest (@closest) {
-
- # Generate a patch between $closest and $p.
- print " $p <- $closest\n";
-
- # If the patch already exists, skip it.
- if (containsPatch(\%srcPatches, $p, $closest) ||
- containsPatch(\%dstPatches, $p, $closest))
- {
- print " skipping, already exists\n";
- next;
- }
-
-# next;
-
- my $srcNarBz2 = getNarBz2 \%srcNarFiles, $closest;
- my $dstNarBz2 = getNarBz2 \%dstNarFiles, $p;
-
- if (! -f $srcNarBz2) {
- warn "patch source archive $srcNarBz2 is missing\n";
- next;
- }
-
- system("@bunzip2@ < $srcNarBz2 > $tmpDir/A") == 0
- or die "cannot unpack $srcNarBz2";
-
- if ((stat "$tmpDir/A")[7] >= $maxNarSize) {
- print " skipping, source is too large\n";
- next;
- }
-
- system("@bunzip2@ < $dstNarBz2 > $tmpDir/B") == 0
- or die "cannot unpack $dstNarBz2";
-
- if ((stat "$tmpDir/B")[7] >= $maxNarSize) {
- print " skipping, destination is too large\n";
- next;
- }
-
- system("@libexecdir@/bsdiff $tmpDir/A $tmpDir/B $tmpDir/DIFF") == 0
- or die "cannot compute binary diff";
-
- my $baseHash = `@bindir@/nix-hash --flat --type $hashAlgo --base32 $tmpDir/A` or die;
- chomp $baseHash;
-
- my $narHash = `@bindir@/nix-hash --flat --type $hashAlgo --base32 $tmpDir/B` or die;
- chomp $narHash;
-
- my $narDiffHash = `@bindir@/nix-hash --flat --type $hashAlgo --base32 $tmpDir/DIFF` or die;
- chomp $narDiffHash;
-
- my $narDiffSize = (stat "$tmpDir/DIFF")[7];
- my $dstNarBz2Size = (stat $dstNarBz2)[7];
-
- print " size $narDiffSize; full size $dstNarBz2Size\n";
-
- if ($narDiffSize >= $dstNarBz2Size) {
- print " rejecting; patch bigger than full archive\n";
- next;
- }
-
- if ($narDiffSize / $dstNarBz2Size >= $maxPatchFraction) {
- print " rejecting; patch too large relative to full archive\n";
- next;
- }
-
- my $finalName =
- "$narDiffHash.nar-bsdiff";
-
- if (-e "$patchesDir/$finalName") {
- print " not copying, already exists\n";
- }
-
- else {
-
- system("cp '$tmpDir/DIFF' '$patchesDir/$finalName.tmp'") == 0
- or die "cannot copy diff";
-
- rename("$patchesDir/$finalName.tmp", "$patchesDir/$finalName")
- or die "cannot rename $patchesDir/$finalName.tmp";
-
- }
-
- # Add the patch to the manifest.
- addPatch \%dstPatches, $p,
- { url => "$patchesURL/$finalName", hash => "$hashAlgo:$narDiffHash"
- , size => $narDiffSize, basePath => $closest, baseHash => "$hashAlgo:$baseHash"
- , narHash => "$hashAlgo:$narHash", patchType => "nar-bsdiff"
- }, 0;
- }
-}
-
-
-# Add in any potentially useful patches in the source (namely, those
-# patches that produce either paths in the destination or paths that
-# can be used as the base for other useful patches).
-
-print "propagating patches...\n";
-
-my $changed;
-do {
- # !!! we repeat this to reach the transitive closure; inefficient
- $changed = 0;
-
- print "loop\n";
-
- my %dstBasePaths;
- foreach my $q (keys %dstPatches) {
- foreach my $patch (@{$dstPatches{$q}}) {
- $dstBasePaths{$patch->{basePath}} = 1;
- }
- }
-
- foreach my $p (keys %srcPatches) {
- my $patchList = $srcPatches{$p};
-
- my $include = 0;
-
- # Is path $p included in the destination? If so, include
- # patches that produce it.
- $include = 1 if defined $dstNarFiles{$p};
-
- # Is path $p a path that serves as a base for paths in the
- # destination? If so, include patches that produce it.
- # !!! check baseHash
- $include = 1 if defined $dstBasePaths{$p};
-
- if ($include) {
- foreach my $patch (@{$patchList}) {
- $changed = 1 if addPatch \%dstPatches, $p, $patch;
- }
- }
-
- }
-
-} while $changed;
-
-
-# Rewrite the manifest of the destination (with the new patches).
-writeManifest "${dstManifest}",
- \%dstNarFiles, \%dstPatches;
diff --git a/scripts/nix-build.in b/scripts/nix-build.in
index ed85d5712..f9d81b36c 100644
--- a/scripts/nix-build.in
+++ b/scripts/nix-build.in
@@ -123,6 +123,11 @@ EOF
$verbose = 1;
}
+ elsif ($arg eq "--quiet") {
+ push @buildArgs, $arg;
+ push @instArgs, $arg;
+ }
+
elsif (substr($arg, 0, 1) eq "-") {
push @buildArgs, $arg;
}
@@ -165,7 +170,7 @@ foreach my $expr (@exprs) {
# Build.
my @outPaths;
- $pid = open(OUTPATHS, "-|") || exec "$binDir/nix-store", "--add-root", $outLink, "--indirect", "-rv",
+ $pid = open(OUTPATHS, "-|") || exec "$binDir/nix-store", "--add-root", $outLink, "--indirect", "-r",
@buildArgs, @drvPaths;
while () {chomp; push @outPaths, $_;}
if (!close OUTPATHS) {
diff --git a/scripts/nix-copy-closure.in b/scripts/nix-copy-closure.in
index a477cc01a..c037f003f 100644
--- a/scripts/nix-copy-closure.in
+++ b/scripts/nix-copy-closure.in
@@ -1,6 +1,6 @@
#! @perl@ -w -I@libexecdir@/nix
-use ssh;
+use SSH;
my $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
@@ -61,7 +61,7 @@ if ($toMode) { # Copy TO the remote machine.
my @allStorePaths;
# Get the closure of this path.
- my $pid = open(READ, "$binDir/nix-store --query --requisites @storePaths|") or die;
+ my $pid = open(READ, "set -f; $binDir/nix-store --query --requisites @storePaths|") or die;
while () {
chomp;
@@ -73,7 +73,7 @@ if ($toMode) { # Copy TO the remote machine.
# Ask the remote host which paths are invalid.
- open(READ, "ssh $sshHost @sshOpts nix-store --check-validity --print-invalid @allStorePaths|");
+ open(READ, "set -f; ssh $sshHost @sshOpts nix-store --check-validity --print-invalid @allStorePaths|");
my @missing = ();
while () {
chomp;
@@ -88,7 +88,7 @@ if ($toMode) { # Copy TO the remote machine.
print STDERR " $_\n" foreach @missing;
my $extraOpts = "";
$extraOpts .= "--sign" if $sign == 1;
- system("nix-store --export $extraOpts @missing $compressor | ssh $sshHost @sshOpts '$decompressor nix-store --import'") == 0
+ system("set -f; nix-store --export $extraOpts @missing $compressor | ssh $sshHost @sshOpts '$decompressor nix-store --import'") == 0
or die "copying store paths to remote machine `$sshHost' failed: $?";
}
@@ -101,7 +101,7 @@ else { # Copy FROM the remote machine.
# machine. Paths are assumed to be store paths; there is no
# resolution (following of symlinks).
my $pid = open(READ,
- "ssh @sshOpts $sshHost nix-store --query --requisites @storePaths|") or die;
+ "set -f; ssh @sshOpts $sshHost nix-store --query --requisites @storePaths|") or die;
my @allStorePaths;
@@ -115,7 +115,7 @@ else { # Copy FROM the remote machine.
# What paths are already valid locally?
- open(READ, "@bindir@/nix-store --check-validity --print-invalid @allStorePaths|");
+ open(READ, "set -f; @bindir@/nix-store --check-validity --print-invalid @allStorePaths|");
my @missing = ();
while () {
chomp;
@@ -130,7 +130,7 @@ else { # Copy FROM the remote machine.
print STDERR " $_\n" foreach @missing;
my $extraOpts = "";
$extraOpts .= "--sign" if $sign == 1;
- system("ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor @bindir@/nix-store --import") == 0
+ system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor @bindir@/nix-store --import") == 0
or die "copying store paths from remote machine `$sshHost' failed: $?";
}
diff --git a/scripts/nix-generate-patches.in b/scripts/nix-generate-patches.in
new file mode 100644
index 000000000..c96cc704a
--- /dev/null
+++ b/scripts/nix-generate-patches.in
@@ -0,0 +1,42 @@
+#! @perl@ -w -I@libexecdir@/nix
+
+use strict;
+use File::Temp qw(tempdir);
+use NixManifest;
+use GeneratePatches;
+
+if (scalar @ARGV != 5) {
+ print STDERR < 1, TMPDIR => 1)
+ or die "cannot create a temporary directory";
+
+generatePatches \%srcNarFiles, \%dstNarFiles, \%srcPatches, \%dstPatches,
+ $narPath, $patchesPath, $patchesURL, $tmpDir;
+
+propagatePatches \%srcPatches, \%dstNarFiles, \%dstPatches;
+
+writeManifest $dstManifest, \%dstNarFiles, \%dstPatches;
diff --git a/scripts/nix-pull.in b/scripts/nix-pull.in
index e2a0cc1fa..f3b533ff7 100644
--- a/scripts/nix-pull.in
+++ b/scripts/nix-pull.in
@@ -2,7 +2,7 @@
use strict;
use File::Temp qw(tempdir);
-use readmanifest;
+use NixManifest;
my $tmpDir = tempdir("nix-pull.XXXXXX", CLEANUP => 1, TMPDIR => 1)
or die "cannot create a temporary directory";
diff --git a/scripts/nix-push.in b/scripts/nix-push.in
index 38097f740..1d8ba86a8 100644
--- a/scripts/nix-push.in
+++ b/scripts/nix-push.in
@@ -2,7 +2,7 @@
use strict;
use File::Temp qw(tempdir);
-use readmanifest;
+use NixManifest;
my $hashAlgo = "sha256";
@@ -172,12 +172,6 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
$narbz2Hash =~ /^[0-9a-z]+$/ or die "invalid hash";
close HASH;
- open HASH, "$narDir/nar-hash" or die "cannot open nar-hash";
- my $narHash = ;
- chomp $narHash;
- $narHash =~ /^[0-9a-z]+$/ or die "invalid hash";
- close HASH;
-
my $narName = "$narbz2Hash.nar.bz2";
my $narFile = "$narDir/$narName";
@@ -195,6 +189,14 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
chomp $deriver;
$deriver = "" if $deriver eq "unknown-deriver";
+ my $narHash = `$binDir/nix-store --query --hash '$storePath'`;
+ die "cannot query hash for `$storePath'" if $? != 0;
+ chomp $narHash;
+
+ my $narSize = `$binDir/nix-store --query --size '$storePath'`;
+ die "cannot query size for `$storePath'" if $? != 0;
+ chomp $narSize;
+
my $url;
if ($localCopy) {
$url = "$targetArchivesUrl/$narName";
@@ -205,7 +207,8 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
{ url => $url
, hash => "$hashAlgo:$narbz2Hash"
, size => $narbz2Size
- , narHash => "$hashAlgo:$narHash"
+ , narHash => "$narHash"
+ , narSize => $narSize
, references => $references
, deriver => $deriver
}
diff --git a/src/bin2c/bin2c.c b/src/bin2c/bin2c.c
index 18bf81d69..5ed8a5708 100644
--- a/src/bin2c/bin2c.c
+++ b/src/bin2c/bin2c.c
@@ -14,10 +14,10 @@ int main(int argc, char * * argv)
{
int c;
if (argc != 2) abort();
- print("static unsigned char %s[] = {", argv[1]);
+ print("static unsigned char %s[] = { ", argv[1]);
while ((c = getchar()) != EOF) {
print("0x%02x, ", (unsigned char) c);
}
- print("};\n");
+ print("0 };\n");
return 0;
}
diff --git a/src/bsdiff-4.3/bsdiff.c b/src/bsdiff-4.3/bsdiff.c
index 150a7f79c..374ed038f 100644
--- a/src/bsdiff-4.3/bsdiff.c
+++ b/src/bsdiff-4.3/bsdiff.c
@@ -277,6 +277,7 @@ int main(int argc,char *argv[])
for(scsc=scan+=len;scan 64 * 1024) break;
for(;scsc= s.size() ? "" : string(s, start, len), context);
}
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 29fc13e33..68f145820 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -54,25 +54,26 @@ void printGCWarning()
void printMissing(const PathSet & paths)
{
- unsigned long long downloadSize;
+ unsigned long long downloadSize, narSize;
PathSet willBuild, willSubstitute, unknown;
- queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize);
+ queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize);
if (!willBuild.empty()) {
- printMsg(lvlInfo, format("the following derivations will be built:"));
+ printMsg(lvlInfo, format("these derivations will be built:"));
foreach (PathSet::iterator, i, willBuild)
printMsg(lvlInfo, format(" %1%") % *i);
}
if (!willSubstitute.empty()) {
- printMsg(lvlInfo, format("the following paths will be downloaded/copied (%.2f MiB):") %
- (downloadSize / (1024.0 * 1024.0)));
+ printMsg(lvlInfo, format("these paths will be downloaded/copied (%.2f MiB download, %.2f MiB unpacked):")
+ % (downloadSize / (1024.0 * 1024.0))
+ % (narSize / (1024.0 * 1024.0)));
foreach (PathSet::iterator, i, willSubstitute)
printMsg(lvlInfo, format(" %1%") % *i);
}
if (!unknown.empty()) {
- printMsg(lvlInfo, format("don't know how to build the following paths%1%:")
+ printMsg(lvlInfo, format("don't know how to build these paths%1%:")
% (readOnlyMode ? " (may be caused by read-only store access)" : ""));
foreach (PathSet::iterator, i, unknown)
printMsg(lvlInfo, format(" %1%") % *i);
@@ -200,17 +201,16 @@ static void initAndRun(int argc, char * * argv)
remaining.clear();
/* Process default options. */
+ int verbosityDelta = 0;
for (Strings::iterator i = args.begin(); i != args.end(); ++i) {
string arg = *i;
- if (arg == "--verbose" || arg == "-v")
- verbosity = (Verbosity) ((int) verbosity + 1);
+ if (arg == "--verbose" || arg == "-v") verbosityDelta++;
+ else if (arg == "--quiet") verbosityDelta--;
else if (arg == "--log-type") {
++i;
if (i == args.end()) throw UsageError("`--log-type' requires an argument");
setLogType(*i);
}
- else if (arg == "--build-output" || arg == "-B")
- ; /* !!! obsolete - remove eventually */
else if (arg == "--no-build-output" || arg == "-Q")
buildVerbosity = lvlVomit;
else if (arg == "--print-build-trace")
@@ -251,6 +251,9 @@ static void initAndRun(int argc, char * * argv)
else remaining.push_back(arg);
}
+ verbosityDelta += queryIntSetting("verbosity", lvlInfo);
+ verbosity = (Verbosity) (verbosityDelta < 0 ? 0 : verbosityDelta);
+
/* Automatically clean up the temporary roots file when we
exit. */
RemoveTempRoots removeTempRoots __attribute__((unused));
@@ -390,7 +393,7 @@ int main(int argc, char * * argv)
printMsg(lvlError, format("error: %1%%2%") % (showTrace ? e.prefix() : "") % e.msg());
if (e.prefix() != "" && !showTrace)
printMsg(lvlError, "(use `--show-trace' to show detailed location information)");
- return 1;
+ return e.status;
} catch (std::exception & e) {
printMsg(lvlError, format("error: %1%") % e.what());
return 1;
diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh
index f70f6893b..c99810c78 100644
--- a/src/libmain/shared.hh
+++ b/src/libmain/shared.hh
@@ -1,7 +1,7 @@
#ifndef __SHARED_H
#define __SHARED_H
-#include "types.hh"
+#include "util.hh"
#include
diff --git a/src/libstore/Makefile.am b/src/libstore/Makefile.am
index 9accc3005..e19256b92 100644
--- a/src/libstore/Makefile.am
+++ b/src/libstore/Makefile.am
@@ -10,7 +10,14 @@ pkginclude_HEADERS = \
globals.hh references.hh pathlocks.hh \
worker-protocol.hh
-libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la @ADDITIONAL_NETWORK_LIBS@
+libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la ${aterm_lib} ${sqlite_lib}
+
+EXTRA_DIST = schema.sql
AM_CXXFLAGS = -Wall \
- -I$(srcdir)/.. -I$(srcdir)/../libutil
+ ${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil
+
+local-store.lo: schema.sql.hh
+
+%.sql.hh: %.sql
+ ../bin2c/bin2c schema < $< > $@ || (rm $@ && exit 1)
diff --git a/src/libstore/build.cc b/src/libstore/build.cc
index ef2f7adf3..83bd6754a 100644
--- a/src/libstore/build.cc
+++ b/src/libstore/build.cc
@@ -64,6 +64,7 @@ static const uid_t rootUserId = 0;
/* Forward definition. */
class Worker;
+class HookInstance;
/* A pointer to a goal. */
@@ -213,8 +214,14 @@ public:
bool cacheFailure;
+ /* Set if at least one derivation had a BuildError (i.e. permanent
+ failure). */
+ bool permanentFailure;
+
LocalStore & store;
+ boost::shared_ptr hook;
+
Worker(LocalStore & store);
~Worker();
@@ -263,7 +270,8 @@ public:
/* Wait for input to become available. */
void waitForInput();
-
+
+ unsigned int exitStatus();
};
@@ -615,6 +623,107 @@ void deletePathWrapped(const Path & path)
//////////////////////////////////////////////////////////////////////
+struct HookInstance
+{
+ /* Pipes for talking to the build hook. */
+ Pipe toHook;
+
+ /* Pipe for the hook's standard output/error. */
+ Pipe fromHook;
+
+ /* Pipe for the builder's standard output/error. */
+ Pipe builderOut;
+
+ /* The process ID of the hook. */
+ Pid pid;
+
+ HookInstance();
+
+ ~HookInstance();
+};
+
+
+HookInstance::HookInstance()
+{
+ debug("starting build hook");
+
+ Path buildHook = absPath(getEnv("NIX_BUILD_HOOK"));
+
+ /* Create a pipe to get the output of the child. */
+ fromHook.create();
+
+ /* Create the communication pipes. */
+ toHook.create();
+
+ /* Create a pipe to get the output of the builder. */
+ builderOut.create();
+
+ /* Fork the hook. */
+ pid = fork();
+ switch (pid) {
+
+ case -1:
+ throw SysError("unable to fork");
+
+ case 0:
+ try { /* child */
+
+ commonChildInit(fromHook);
+
+ if (chdir("/") == -1) throw SysError("changing into `/");
+
+ /* Dup the communication pipes. */
+ toHook.writeSide.close();
+ if (dup2(toHook.readSide, STDIN_FILENO) == -1)
+ throw SysError("dupping to-hook read side");
+
+ /* Use fd 4 for the builder's stdout/stderr. */
+ builderOut.readSide.close();
+ if (dup2(builderOut.writeSide, 4) == -1)
+ throw SysError("dupping builder's stdout/stderr");
+
+ execl(buildHook.c_str(), buildHook.c_str(), thisSystem.c_str(),
+ (format("%1%") % maxSilentTime).str().c_str(),
+ (format("%1%") % printBuildTrace).str().c_str(),
+ NULL);
+
+ throw SysError(format("executing `%1%'") % buildHook);
+
+ } catch (std::exception & e) {
+ std::cerr << format("build hook error: %1%") % e.what() << std::endl;
+ }
+ quickExit(1);
+ }
+
+ /* parent */
+ pid.setSeparatePG(true);
+ pid.setKillSignal(SIGTERM);
+ fromHook.writeSide.close();
+ toHook.readSide.close();
+}
+
+
+HookInstance::~HookInstance()
+{
+ try {
+ /* Cleanly shut down the hook by closing its stdin if it's not
+ already building. Otherwise pid's destructor will kill
+ it. */
+ if (pid != -1 && toHook.writeSide != -1) {
+ toHook.writeSide.close();
+ pid.wait(true);
+ }
+ } catch (...) {
+ ignoreException();
+ }
+}
+
+
+//////////////////////////////////////////////////////////////////////
+
+
+typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
+
class DerivationGoal : public Goal
{
private:
@@ -649,14 +758,11 @@ private:
AutoCloseFD fdLogFile;
/* Pipe for the builder's standard output/error. */
- Pipe logPipe;
-
- /* Whether we're building using a build hook. */
- bool usingBuildHook;
-
- /* Pipes for talking to the build hook (if any). */
- Pipe toHook;
+ Pipe builderOut;
+ /* The build hook. */
+ boost::shared_ptr hook;
+
/* Whether we're currently doing a chroot build. */
bool useChroot;
@@ -694,12 +800,8 @@ private:
void buildDone();
/* Is the build hook willing to perform the build? */
- typedef enum {rpAccept, rpDecline, rpPostpone} HookReply;
HookReply tryBuildHook();
- /* Synchronously wait for a build hook to finish. */
- void terminateBuildHook(bool kill = false);
-
/* Start building a derivation. */
void startBuilder();
@@ -711,10 +813,6 @@ private:
/* Open a log file and a pipe to it. */
Path openLogFile();
- /* Common initialisation to be performed in child processes (i.e.,
- both in builders and in build hooks). */
- void initChild();
-
/* Delete the temporary directory, if we have one. */
void deleteTmpDir(bool force);
@@ -742,6 +840,7 @@ DerivationGoal::DerivationGoal(const Path & drvPath, Worker & worker)
trace("created");
}
+
DerivationGoal::~DerivationGoal()
{
/* Careful: we should never ever throw an exception from a
@@ -754,6 +853,7 @@ DerivationGoal::~DerivationGoal()
}
}
+
void DerivationGoal::killChild()
{
if (pid != -1) {
@@ -778,6 +878,8 @@ void DerivationGoal::killChild()
assert(pid == -1);
}
+
+ hook.reset();
}
@@ -887,7 +989,10 @@ void DerivationGoal::outputsSubstituted()
foreach (PathSet::iterator, i, drv.inputSrcs)
addWaitee(worker.makeSubstitutionGoal(*i));
- state = &DerivationGoal::inputsRealised;
+ if (waitees.empty()) /* to prevent hang (no wake-up event) */
+ inputsRealised();
+ else
+ state = &DerivationGoal::inputsRealised;
}
@@ -961,6 +1066,16 @@ PathSet outputPaths(const DerivationOutputs & outputs)
}
+static bool canBuildLocally(const string & platform)
+{
+ return platform == thisSystem
+#ifdef CAN_DO_LINUX32_BUILDS
+ || (platform == "i686-linux" && thisSystem == "x86_64-linux")
+#endif
+ ;
+}
+
+
void DerivationGoal::tryToBuild()
{
trace("trying to build");
@@ -1028,28 +1143,36 @@ void DerivationGoal::tryToBuild()
foreach (DerivationOutputs::iterator, i, drv.outputs)
if (pathFailed(i->second.path)) return;
+ /* Don't do a remote build if the derivation has the attribute
+ `preferLocalBuild' set. */
+ bool preferLocalBuild =
+ drv.env["preferLocalBuild"] == "1" && canBuildLocally(drv.platform);
+
/* Is the build hook willing to accept this job? */
- usingBuildHook = true;
- switch (tryBuildHook()) {
- case rpAccept:
- /* Yes, it has started doing so. Wait until we get EOF
- from the hook. */
- state = &DerivationGoal::buildDone;
- return;
- case rpPostpone:
- /* Not now; wait until at least one child finishes. */
- worker.waitForAWhile(shared_from_this());
- outputLocks.unlock();
- return;
- case rpDecline:
- /* We should do it ourselves. */
- break;
+ if (!preferLocalBuild) {
+ switch (tryBuildHook()) {
+ case rpAccept:
+ /* Yes, it has started doing so. Wait until we get
+ EOF from the hook. */
+ state = &DerivationGoal::buildDone;
+ return;
+ case rpPostpone:
+ /* Not now; wait until at least one child finishes or
+ the wake-up timeout expires. */
+ worker.waitForAWhile(shared_from_this());
+ outputLocks.unlock();
+ return;
+ case rpDecline:
+ /* We should do it ourselves. */
+ break;
+ }
}
-
- usingBuildHook = false;
-
- /* Make sure that we are allowed to start a build. */
- if (worker.getNrLocalBuilds() >= maxBuildJobs) {
+
+ /* Make sure that we are allowed to start a build. If this
+ derivation prefers to be done locally, do it even if
+ maxBuildJobs is 0. */
+ unsigned int curBuilds = worker.getNrLocalBuilds();
+ if (curBuilds >= maxBuildJobs && !(preferLocalBuild && curBuilds == 0)) {
worker.waitForBuildSlot(shared_from_this());
outputLocks.unlock();
return;
@@ -1067,6 +1190,7 @@ void DerivationGoal::tryToBuild()
if (printBuildTrace)
printMsg(lvlError, format("@ build-failed %1% %2% %3% %4%")
% drvPath % drv.outputs["out"].path % 0 % e.msg());
+ worker.permanentFailure = true;
amDone(ecFailed);
return;
}
@@ -1085,18 +1209,29 @@ void DerivationGoal::buildDone()
to have terminated. In fact, the builder could also have
simply have closed its end of the pipe --- just don't do that
:-) */
- /* !!! this could block! security problem! solution: kill the
- child */
- pid_t savedPid = pid;
- int status = pid.wait(true);
+ int status;
+ pid_t savedPid;
+ if (hook) {
+ savedPid = hook->pid;
+ status = hook->pid.wait(true);
+ } else {
+ /* !!! this could block! security problem! solution: kill the
+ child */
+ savedPid = pid;
+ status = pid.wait(true);
+ }
debug(format("builder process for `%1%' finished") % drvPath);
/* So the child is gone now. */
worker.childTerminated(savedPid);
-
+
/* Close the read side of the logger pipe. */
- logPipe.readSide.close();
+ if (hook) {
+ hook->builderOut.readSide.close();
+ hook->fromHook.readSide.close();
+ }
+ else builderOut.readSide.close();
/* Close the log file. */
fdLogFile.close();
@@ -1169,11 +1304,11 @@ void DerivationGoal::buildDone()
/* When using a build hook, the hook will return a remote
build failure using exit code 100. Anything else is a hook
problem. */
- bool hookError = usingBuildHook &&
+ bool hookError = hook &&
(!WIFEXITED(status) || WEXITSTATUS(status) != 100);
if (printBuildTrace) {
- if (usingBuildHook && hookError)
+ if (hook && hookError)
printMsg(lvlError, format("@ hook-failed %1% %2% %3% %4%")
% drvPath % drv.outputs["out"].path % status % e.msg());
else
@@ -1192,6 +1327,7 @@ void DerivationGoal::buildDone()
foreach (DerivationOutputs::iterator, i, drv.outputs)
worker.store.registerFailedPath(i->second.path);
+ worker.permanentFailure = !hookError && !fixedOutput;
amDone(ecFailed);
return;
}
@@ -1208,162 +1344,85 @@ void DerivationGoal::buildDone()
}
-DerivationGoal::HookReply DerivationGoal::tryBuildHook()
+HookReply DerivationGoal::tryBuildHook()
{
- if (!useBuildHook) return rpDecline;
- Path buildHook = getEnv("NIX_BUILD_HOOK");
- if (buildHook == "") return rpDecline;
- buildHook = absPath(buildHook);
+ if (!useBuildHook || getEnv("NIX_BUILD_HOOK") == "") return rpDecline;
- /* Create a directory where we will store files used for
- communication between us and the build hook. */
- tmpDir = createTempDir();
-
- /* Create the log file and pipe. */
- Path logFile = openLogFile();
+ if (!worker.hook)
+ worker.hook = boost::shared_ptr(new HookInstance);
- /* Create the communication pipes. */
- toHook.create();
+ /* Tell the hook about system features (beyond the system type)
+ required from the build machine. (The hook could parse the
+ drv file itself, but this is easier.) */
+ Strings features = tokenizeString(drv.env["requiredSystemFeatures"]);
+ foreach (Strings::iterator, i, features) checkStoreName(*i); /* !!! abuse */
- /* Fork the hook. */
- pid = fork();
- switch (pid) {
-
- case -1:
- throw SysError("unable to fork");
-
- case 0:
- try { /* child */
-
- initChild();
-
- string s;
- foreach (DerivationOutputs::const_iterator, i, drv.outputs)
- s += i->second.path + " ";
- if (setenv("NIX_HELD_LOCKS", s.c_str(), 1))
- throw SysError("setting an environment variable");
-
- execl(buildHook.c_str(), buildHook.c_str(),
- (worker.getNrLocalBuilds() < maxBuildJobs ? (string) "1" : "0").c_str(),
- thisSystem.c_str(),
- drv.platform.c_str(),
- drvPath.c_str(),
- (format("%1%") % maxSilentTime).str().c_str(),
- NULL);
-
- throw SysError(format("executing `%1%'") % buildHook);
-
- } catch (std::exception & e) {
- std::cerr << format("build hook error: %1%") % e.what() << std::endl;
- }
- quickExit(1);
- }
-
- /* parent */
- pid.setSeparatePG(true);
- pid.setKillSignal(SIGTERM);
- logPipe.writeSide.close();
- worker.childStarted(shared_from_this(),
- pid, singleton >(logPipe.readSide), false, false);
-
- toHook.readSide.close();
+ /* Send the request to the hook. */
+ writeLine(worker.hook->toHook.writeSide, (format("%1% %2% %3% %4%")
+ % (worker.getNrLocalBuilds() < maxBuildJobs ? "1" : "0")
+ % drv.platform % drvPath % concatStringsSep(",", features)).str());
/* Read the first line of input, which should be a word indicating
whether the hook wishes to perform the build. */
string reply;
- try {
- while (true) {
- string s = readLine(logPipe.readSide);
- if (string(s, 0, 2) == "# ") {
- reply = string(s, 2);
- break;
- }
- handleChildOutput(logPipe.readSide, s + "\n");
+ while (true) {
+ string s = readLine(worker.hook->fromHook.readSide);
+ if (string(s, 0, 2) == "# ") {
+ reply = string(s, 2);
+ break;
}
- } catch (Error & e) {
- terminateBuildHook(true);
- throw;
+ s += "\n";
+ writeToStderr((unsigned char *) s.c_str(), s.size());
}
debug(format("hook reply is `%1%'") % reply);
- if (reply == "decline" || reply == "postpone") {
- /* Clean up the child. !!! hacky / should verify */
- terminateBuildHook();
+ if (reply == "decline" || reply == "postpone")
return reply == "decline" ? rpDecline : rpPostpone;
- }
+ else if (reply != "accept")
+ throw Error(format("bad hook reply `%1%'") % reply);
- else if (reply == "accept") {
+ printMsg(lvlTalkative, format("using hook to build path(s) %1%")
+ % showPaths(outputPaths(drv.outputs)));
- printMsg(lvlInfo, format("using hook to build path(s) %1%")
- % showPaths(outputPaths(drv.outputs)));
+ hook = worker.hook;
+ worker.hook.reset();
- /* Write the information that the hook needs to perform the
- build, i.e., the set of input paths, the set of output
- paths, and the references (pointer graph) in the input
- paths. */
+ /* Tell the hook all the inputs that have to be copied to the
+ remote system. This unfortunately has to contain the entire
+ derivation closure to ensure that the validity invariant holds
+ on the remote system. (I.e., it's unfortunate that we have to
+ list it since the remote system *probably* already has it.) */
+ PathSet allInputs;
+ allInputs.insert(inputPaths.begin(), inputPaths.end());
+ computeFSClosure(drvPath, allInputs);
- Path inputListFN = tmpDir + "/inputs";
- Path outputListFN = tmpDir + "/outputs";
- Path referencesFN = tmpDir + "/references";
-
- /* The `inputs' file lists all inputs that have to be copied
- to the remote system. This unfortunately has to contain
- the entire derivation closure to ensure that the validity
- invariant holds on the remote system. (I.e., it's
- unfortunate that we have to list it since the remote system
- *probably* already has it.) */
- PathSet allInputs;
- allInputs.insert(inputPaths.begin(), inputPaths.end());
- computeFSClosure(drvPath, allInputs);
+ string s;
+ foreach (PathSet::iterator, i, allInputs) s += *i + " ";
+ writeLine(hook->toHook.writeSide, s);
- string s;
- foreach (PathSet::iterator, i, allInputs) s += *i + "\n";
+ /* Tell the hooks the outputs that have to be copied back from the
+ remote system. */
+ s = "";
+ foreach (DerivationOutputs::iterator, i, drv.outputs)
+ s += i->second.path + " ";
+ writeLine(hook->toHook.writeSide, s);
+
+ hook->toHook.writeSide.close();
+
+ /* Create the log file and pipe. */
+ Path logFile = openLogFile();
+
+ set fds;
+ fds.insert(hook->fromHook.readSide);
+ fds.insert(hook->builderOut.readSide);
+ worker.childStarted(shared_from_this(), hook->pid, fds, false, false);
+
+ if (printBuildTrace)
+ printMsg(lvlError, format("@ build-started %1% %2% %3% %4%")
+ % drvPath % drv.outputs["out"].path % drv.platform % logFile);
- writeFile(inputListFN, s);
-
- /* The `outputs' file lists all outputs that have to be copied
- from the remote system. */
- s = "";
- foreach (DerivationOutputs::iterator, i, drv.outputs)
- s += i->second.path + "\n";
- writeFile(outputListFN, s);
-
- /* The `references' file has exactly the format accepted by
- `nix-store --register-validity'. */
- writeFile(referencesFN,
- makeValidityRegistration(allInputs, true, false));
-
- /* Tell the hook to proceed. */
- writeLine(toHook.writeSide, "okay");
- toHook.writeSide.close();
-
- if (printBuildTrace)
- printMsg(lvlError, format("@ build-started %1% %2% %3% %4%")
- % drvPath % drv.outputs["out"].path % drv.platform % logFile);
-
- return rpAccept;
- }
-
- else throw Error(format("bad hook reply `%1%'") % reply);
-}
-
-
-void DerivationGoal::terminateBuildHook(bool kill)
-{
- debug("terminating build hook");
- pid_t savedPid = pid;
- if (kill)
- pid.kill();
- else
- pid.wait(true);
- /* `false' means don't wake up waiting goals, since we want to
- keep this build slot ourselves. */
- worker.childTerminated(savedPid, false);
- toHook.writeSide.close();
- fdLogFile.close();
- logPipe.readSide.close();
- deleteTmpDir(true); /* get rid of the hook's temporary directory */
+ return rpAccept;
}
@@ -1380,11 +1439,7 @@ void DerivationGoal::startBuilder()
format("building path(s) %1%") % showPaths(outputPaths(drv.outputs)))
/* Right platform? */
- if (drv.platform != thisSystem
-#ifdef CAN_DO_LINUX32_BUILDS
- && !(drv.platform == "i686-linux" && thisSystem == "x86_64-linux")
-#endif
- )
+ if (!canBuildLocally(drv.platform))
throw Error(
format("a `%1%' is required to build `%3%', but I am a `%2%'")
% drv.platform % thisSystem % drvPath);
@@ -1499,7 +1554,7 @@ void DerivationGoal::startBuilder()
/* Write closure info to `fileName'. */
writeFile(tmpDir + "/" + fileName,
- makeValidityRegistration(paths, false, false));
+ worker.store.makeValidityRegistration(paths, false, false));
}
@@ -1549,6 +1604,9 @@ void DerivationGoal::startBuilder()
if (fixedOutput) useChroot = false;
+ /* Hack to allow derivations to disable chroot builds. */
+ if (drv.env["__noChroot"] == "1") useChroot = false;
+
if (useChroot) {
#if CHROOT_ENABLED
/* Create a temporary directory in which we set up the chroot
@@ -1572,7 +1630,7 @@ void DerivationGoal::startBuilder()
/* Create a /etc/passwd with entries for the build user and the
nobody account. The latter is kind of a hack to support
- Samba-in-QEMU. */
+ Samba-in-QEMU. */
createDirs(chrootRootDir + "/etc");
writeFile(chrootRootDir + "/etc/passwd",
@@ -1580,13 +1638,13 @@ void DerivationGoal::startBuilder()
"nixbld:x:%1%:%2%:Nix build user:/:/noshell\n"
"nobody:x:65534:65534:Nobody:/:/noshell\n")
% (buildUser.enabled() ? buildUser.getUID() : getuid())
- % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+ % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
/* Declare the build user's group so that programs get a consistent
- view of the system (e.g., "id -gn"). */
- writeFile(chrootRootDir + "/etc/group",
- (format("nixbld:!:%1%:\n")
- % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
+ view of the system (e.g., "id -gn"). */
+ writeFile(chrootRootDir + "/etc/group",
+ (format("nixbld:!:%1%:\n")
+ % (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
/* Bind-mount a user-configurable set of directories from the
host file system. The `/dev/pts' directory must be mounted
@@ -1645,9 +1703,12 @@ void DerivationGoal::startBuilder()
printMsg(lvlChatty, format("executing builder `%1%'") %
drv.builder);
- /* Create the log file and pipe. */
+ /* Create the log file. */
Path logFile = openLogFile();
+ /* Create a pipe to get the output of the builder. */
+ builderOut.create();
+
/* Fork a child to build the package. Note that while we
currently use forks to run and wait for the children, it
shouldn't be hard to use threads for this on systems where
@@ -1661,7 +1722,7 @@ void DerivationGoal::startBuilder()
case 0:
/* Warning: in the child we should absolutely not make any
- Berkeley DB calls! */
+ SQLite calls! */
try { /* child */
@@ -1688,18 +1749,23 @@ void DerivationGoal::startBuilder()
throw SysError(format("bind mount from `%1%' to `%2%' failed") % source % target);
}
- /* Do the chroot(). initChild() will do a chdir() to
- the temporary build directory to make sure the
- current directory is in the chroot. (Actually the
- order doesn't matter, since due to the bind mount
- tmpDir and tmpRootDit/tmpDir are the same
- directories.) */
+ /* Do the chroot(). Below we do a chdir() to the
+ temporary build directory to make sure the current
+ directory is in the chroot. (Actually the order
+ doesn't matter, since due to the bind mount tmpDir
+ and tmpRootDit/tmpDir are the same directories.) */
if (chroot(chrootRootDir.c_str()) == -1)
throw SysError(format("cannot change root directory to `%1%'") % chrootRootDir);
}
#endif
- initChild();
+ commonChildInit(builderOut);
+
+ if (chdir(tmpDir.c_str()) == -1)
+ throw SysError(format("changing into `%1%'") % tmpDir);
+
+ /* Close all other file descriptors. */
+ closeMostFDs(set());
#ifdef CAN_DO_LINUX32_BUILDS
if (drv.platform == "i686-linux" && thisSystem == "x86_64-linux") {
@@ -1720,10 +1786,10 @@ void DerivationGoal::startBuilder()
/* If we are running in `build-users' mode, then switch to
the user we allocated above. Make sure that we drop
- all root privileges. Note that initChild() above has
- closed all file descriptors except std*, so that's
- safe. Also note that setuid() when run as root sets
- the real, effective and saved UIDs. */
+ all root privileges. Note that above we have closed
+ all file descriptors except std*, so that's safe. Also
+ note that setuid() when run as root sets the real,
+ effective and saved UIDs. */
if (buildUser.enabled()) {
printMsg(lvlChatty, format("switching to user `%1%'") % buildUser.getUser());
@@ -1777,9 +1843,9 @@ void DerivationGoal::startBuilder()
/* parent */
pid.setSeparatePG(true);
- logPipe.writeSide.close();
+ builderOut.writeSide.close();
worker.childStarted(shared_from_this(), pid,
- singleton >(logPipe.readSide), true, true);
+ singleton >(builderOut.readSide), true, true);
if (printBuildTrace) {
printMsg(lvlError, format("@ build-started %1% %2% %3% %4%")
@@ -1811,12 +1877,12 @@ PathSet parseReferenceSpecifiers(const Derivation & drv, string attr)
void DerivationGoal::computeClosure()
{
map allReferences;
- map contentHashes;
+ map contentHashes;
/* When using a build hook, the build hook can register the output
as valid (by doing `nix-store --import'). If so we don't have
to do anything here. */
- if (usingBuildHook) {
+ if (hook) {
bool allValid = true;
foreach (DerivationOutputs::iterator, i, drv.outputs)
if (!worker.store.isValidPath(i->second.path)) allValid = false;
@@ -1868,7 +1934,7 @@ void DerivationGoal::computeClosure()
if (ht == htUnknown)
throw BuildError(format("unknown hash algorithm `%1%'") % algo);
Hash h = parseHash(ht, i->second.hash);
- Hash h2 = recursive ? hashPath(ht, path) : hashFile(ht, path);
+ Hash h2 = recursive ? hashPath(ht, path).first : hashFile(ht, path);
if (h != h2)
throw BuildError(
format("output path `%1%' should have %2% hash `%3%', instead has `%4%'")
@@ -1882,7 +1948,7 @@ void DerivationGoal::computeClosure()
contained in it. Compute the SHA-256 NAR hash at the same
time. The hash is stored in the database so that we can
verify later on whether nobody has messed with the store. */
- Hash hash;
+ HashResult hash;
PathSet references = scanForReferences(path, allPaths, hash);
contentHashes[path] = hash;
@@ -1911,14 +1977,18 @@ void DerivationGoal::computeClosure()
}
/* Register each output path as valid, and register the sets of
- paths referenced by each of them. !!! this should be
- atomic so that either all paths are registered as valid, or
- none are. */
- foreach (DerivationOutputs::iterator, i, drv.outputs)
- worker.store.registerValidPath(i->second.path,
- contentHashes[i->second.path],
- allReferences[i->second.path],
- drvPath);
+ paths referenced by each of them. */
+ ValidPathInfos infos;
+ foreach (DerivationOutputs::iterator, i, drv.outputs) {
+ ValidPathInfo info;
+ info.path = i->second.path;
+ info.hash = contentHashes[i->second.path].first;
+ info.narSize = contentHashes[i->second.path].second;
+ info.references = allReferences[i->second.path];
+ info.deriver = drvPath;
+ infos.push_back(info);
+ }
+ worker.store.registerValidPaths(infos);
/* It is now safe to delete the lock files, since all future
lockers will see that the output paths are valid; they will not
@@ -1944,32 +2014,10 @@ Path DerivationGoal::openLogFile()
if (fdLogFile == -1)
throw SysError(format("creating log file `%1%'") % logFileName);
- /* Create a pipe to get the output of the child. */
- logPipe.create();
-
return logFileName;
}
-void DerivationGoal::initChild()
-{
- commonChildInit(logPipe);
-
- if (chdir(tmpDir.c_str()) == -1)
- throw SysError(format("changing into `%1%'") % tmpDir);
-
- /* When running a hook, dup the communication pipes. */
- if (usingBuildHook) {
- toHook.writeSide.close();
- if (dup2(toHook.readSide, STDIN_FILENO) == -1)
- throw SysError("dupping to-hook read side");
- }
-
- /* Close all other file descriptors. */
- closeMostFDs(set());
-}
-
-
void DerivationGoal::deleteTmpDir(bool force)
{
if (tmpDir != "") {
@@ -1989,19 +2037,22 @@ void DerivationGoal::deleteTmpDir(bool force)
void DerivationGoal::handleChildOutput(int fd, const string & data)
{
- if (fd == logPipe.readSide) {
+ if ((hook && fd == hook->builderOut.readSide) ||
+ (!hook && fd == builderOut.readSide))
+ {
if (verbosity >= buildVerbosity)
writeToStderr((unsigned char *) data.c_str(), data.size());
writeFull(fdLogFile, (unsigned char *) data.c_str(), data.size());
}
- else abort();
+ if (hook && fd == hook->fromHook.readSide)
+ writeToStderr((unsigned char *) data.c_str(), data.size());
}
void DerivationGoal::handleEOF(int fd)
{
- if (fd == logPipe.readSide) worker.wakeUp(shared_from_this());
+ worker.wakeUp(shared_from_this());
}
@@ -2345,10 +2396,15 @@ void SubstitutionGoal::finished()
canonicalisePathMetaData(storePath);
- Hash contentHash = hashPath(htSHA256, storePath);
-
- worker.store.registerValidPath(storePath, contentHash,
- info.references, info.deriver);
+ HashResult hash = hashPath(htSHA256, storePath);
+
+ ValidPathInfo info2;
+ info2.path = storePath;
+ info2.hash = hash.first;
+ info2.narSize = hash.second;
+ info2.references = info.references;
+ info2.deriver = info.deriver;
+ worker.store.registerValidPath(info2);
outputLock->setDeletion(true);
@@ -2395,6 +2451,7 @@ Worker::Worker(LocalStore & store)
nrLocalBuilds = 0;
lastWokenUp = 0;
cacheFailure = queryBoolSetting("build-cache-failure", false);
+ permanentFailure = false;
}
@@ -2721,6 +2778,11 @@ void Worker::waitForInput()
}
+unsigned int Worker::exitStatus()
+{
+ return permanentFailure ? 100 : 1;
+}
+
//////////////////////////////////////////////////////////////////////
@@ -2747,7 +2809,7 @@ void LocalStore::buildDerivations(const PathSet & drvPaths)
}
if (!failed.empty())
- throw Error(format("build of %1% failed") % showPaths(failed));
+ throw Error(format("build of %1% failed") % showPaths(failed), worker.exitStatus());
}
@@ -2763,7 +2825,7 @@ void LocalStore::ensurePath(const Path & path)
worker.run(goals);
if (goal->getExitCode() != Goal::ecSuccess)
- throw Error(format("path `%1%' does not exist and cannot be created") % path);
+ throw Error(format("path `%1%' does not exist and cannot be created") % path, worker.exitStatus());
}
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 95e49d42c..c14be48af 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -1,10 +1,10 @@
#ifndef __DERIVATIONS_H
#define __DERIVATIONS_H
-#include "hash.hh"
-
#include