Remove manifest support

Manifests have been superseded by binary caches for years. This also
gets rid of nix-pull, nix-generate-patches and bsdiff/bspatch.
This commit is contained in:
Eelco Dolstra 2016-04-11 14:16:56 +02:00
parent af4fb6ef61
commit 867967265b
37 changed files with 20 additions and 2143 deletions

8
.gitignore vendored
View file

@ -34,7 +34,6 @@ Makefile.config
# /scripts/ # /scripts/
/scripts/nix-profile.sh /scripts/nix-profile.sh
/scripts/nix-pull
/scripts/nix-push /scripts/nix-push
/scripts/nix-switch /scripts/nix-switch
/scripts/nix-collect-garbage /scripts/nix-collect-garbage
@ -43,11 +42,8 @@ Makefile.config
/scripts/nix-channel /scripts/nix-channel
/scripts/nix-build /scripts/nix-build
/scripts/nix-copy-closure /scripts/nix-copy-closure
/scripts/nix-generate-patches
/scripts/NixConfig.pm /scripts/NixConfig.pm
/scripts/NixManifest.pm /scripts/NixManifest.pm
/scripts/GeneratePatches.pm
/scripts/download-using-manifests.pl
/scripts/copy-from-other-stores.pl /scripts/copy-from-other-stores.pl
/scripts/download-from-binary-cache.pl /scripts/download-from-binary-cache.pl
/scripts/find-runtime-roots.pl /scripts/find-runtime-roots.pl
@ -55,10 +51,6 @@ Makefile.config
/scripts/nix-reduce-build /scripts/nix-reduce-build
/scripts/nix-http-export.cgi /scripts/nix-http-export.cgi
# /src/bsdiff-4.3/
/src/bsdiff-4.3/bsdiff
/src/bsdiff-4.3/bspatch
# /src/libexpr/ # /src/libexpr/
/src/libexpr/lexer-tab.cc /src/libexpr/lexer-tab.cc
/src/libexpr/lexer-tab.hh /src/libexpr/lexer-tab.hh

View file

@ -14,7 +14,6 @@ makefiles = \
src/download-via-ssh/local.mk \ src/download-via-ssh/local.mk \
src/nix-log2xml/local.mk \ src/nix-log2xml/local.mk \
src/nix-prefetch-url/local.mk \ src/nix-prefetch-url/local.mk \
src/bsdiff-4.3/local.mk \
perl/local.mk \ perl/local.mk \
scripts/local.mk \ scripts/local.mk \
corepkgs/local.mk \ corepkgs/local.mk \

View file

@ -435,18 +435,6 @@ flag, e.g. <literal>--option gc-keep-outputs false</literal>.</para>
</varlistentry> </varlistentry>
<varlistentry><term><literal>force-manifest</literal></term>
<listitem><para>If this option is set to <literal>false</literal>
(default) and a Nix channel provides both a manifest and a binary
cache, only the binary cache will be used. If set to
<literal>true</literal>, the manifest will be fetched as well.
This is useful if you want to use binary patches (which are
currently not supported by binary caches).</para></listitem>
</varlistentry>
<varlistentry><term><literal>system</literal></term> <varlistentry><term><literal>system</literal></term>
<listitem><para>This option specifies the canonical Nix system <listitem><para>This option specifies the canonical Nix system

View file

@ -73,11 +73,10 @@ condition="manual">See also <xref linkend="sec-channels"
<listitem><para>Downloads the Nix expressions of all subscribed <listitem><para>Downloads the Nix expressions of all subscribed
channels (or only those included in channels (or only those included in
<replaceable>names</replaceable> if specified), makes them the <replaceable>names</replaceable> if specified) and makes them the
default for <command>nix-env</command> operations (by symlinking default for <command>nix-env</command> operations (by symlinking
them from the directory <filename>~/.nix-defexpr</filename>), and them from the directory
performs a <command>nix-pull</command> on the manifests of all <filename>~/.nix-defexpr</filename>).</para></listitem>
channels to make pre-built binaries available.</para></listitem>
</varlistentry> </varlistentry>
@ -187,16 +186,6 @@ following files:</para>
</varlistentry> </varlistentry>
<varlistentry><term><filename>MANIFEST.bz2</filename></term>
<listitem><para>(Deprecated in favour of binary caches.) A
manifest as created by <command>nix-push</command>. Only used if
<filename>binary-cache-url</filename> is not present or if the
<filename>nix.conf</filename> option
<option>force-manifest</option> is set.</para></listitem>
</varlistentry>
</variablelist> </variablelist>
</refsection> </refsection>

View file

@ -1,44 +0,0 @@
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-nix-generate-patches">
<refmeta>
<refentrytitle>nix-generate-patches</refentrytitle>
<manvolnum>1</manvolnum>
<refmiscinfo class="source">Nix</refmiscinfo>
<refmiscinfo class="version"><xi:include href="../version.txt" parse="text"/></refmiscinfo>
</refmeta>
<refnamediv>
<refname>nix-generate-patches</refname>
<refpurpose>generates binary patches between NAR files</refpurpose>
</refnamediv>
<refsynopsisdiv>
<cmdsynopsis>
<command>nix-generate-patches</command>
<arg choice='plain'><replaceable>NAR-DIR</replaceable></arg>
<arg choice='plain'><replaceable>PATCH-DIR</replaceable></arg>
<arg choice='plain'><replaceable>PATCH-URI</replaceable></arg>
<arg choice='plain'><replaceable>OLD-MANIFEST</replaceable></arg>
<arg choice='plain'><replaceable>NEW-MANIFEST</replaceable></arg>
</cmdsynopsis>
</refsynopsisdiv>
<refsection><title>Description</title>
<para>The command <command>nix-generate-patches</command> generates
binary patches between NAR files listed in OLD-MANIFEST and NEW-MANIFEST.
The patches are written to the directory PATCH-DIR, and the prefix
PATCH-URI is used to generate URIs for the patches. The patches are
added to NEW-MANIFEST. All NARs are required to exist in NAR-DIR.
Patches are generated between succeeding versions of packages with
the same name.</para>
</refsection>
</refentry>

View file

@ -146,9 +146,7 @@ The elements are as follows:
<varlistentry><term><replaceable>manifestURL</replaceable></term> <varlistentry><term><replaceable>manifestURL</replaceable></term>
<listitem><para>The manifest to be pulled by <listitem><para>Obsolete.</para></listitem>
<command>nix-pull</command>. The manifest must contain
<replaceable>outPath</replaceable>.</para></listitem>
</varlistentry> </varlistentry>

View file

@ -1,54 +0,0 @@
<refentry xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
version="5.0"
xml:id="sec-nix-pull">
<refmeta>
<refentrytitle>nix-pull</refentrytitle>
<manvolnum>1</manvolnum>
<refmiscinfo class="source">Nix</refmiscinfo>
<refmiscinfo class="version"><xi:include href="../version.txt" parse="text"/></refmiscinfo>
</refmeta>
<refnamediv>
<refname>nix-pull</refname>
<refpurpose>register availability of pre-built binaries (deprecated)</refpurpose>
</refnamediv>
<refsynopsisdiv>
<cmdsynopsis>
<command>nix-pull</command>
<arg choice='plain'><replaceable>url</replaceable></arg>
</cmdsynopsis>
</refsynopsisdiv>
<refsection><title>Description</title>
<note><para>This command and the use of manifests is deprecated. It is
better to use binary caches.</para></note>
<para>The command <command>nix-pull</command> obtains a list of
pre-built store paths from the URL <replaceable>url</replaceable>, and
for each of these store paths, registers a substitute derivation that
downloads and unpacks it into the Nix store. This is used to speed up
installations: if you attempt to install something that has already
been built and stored into the network cache, Nix can transparently
re-use the pre-built store paths.</para>
<para>The file at <replaceable>url</replaceable> must be compatible
with the files created by <replaceable>nix-push</replaceable>.</para>
</refsection>
<refsection><title>Examples</title>
<screen>
$ nix-pull https://nixos.org/releases/nixpkgs/nixpkgs-15.05pre54468.69858d7/MANIFEST</screen>
</refsection>
</refentry>

View file

@ -73,8 +73,7 @@ automatically.</para>
<listitem><para>Optionally, a single <emphasis>manifest</emphasis> <listitem><para>Optionally, a single <emphasis>manifest</emphasis>
file is created that contains the same metadata as the file is created that contains the same metadata as the
<filename>.narinfo</filename> files. This is for compatibility with <filename>.narinfo</filename> files. This is for compatibility with
Nix versions prior to 1.2 (see <command>nix-pull</command> for Nix versions prior to 1.2.</para></listitem>
details).</para></listitem>
<listitem><para>A file named <option>nix-cache-info</option> is <listitem><para>A file named <option>nix-cache-info</option> is
placed in the destination directory. The existence of this file placed in the destination directory. The existence of this file
@ -135,7 +134,7 @@ automatically.</para>
<varlistentry><term><option>--manifest</option></term> <varlistentry><term><option>--manifest</option></term>
<listitem><para>Force the generation of a manifest suitable for <listitem><para>Force the generation of a manifest suitable for
use by <command>nix-pull</command>. The manifest is stored as use by old versions of Nix. The manifest is stored as
<filename><replaceable>dest-dir</replaceable>/MANIFEST</filename>.</para></listitem> <filename><replaceable>dest-dir</replaceable>/MANIFEST</filename>.</para></listitem>
</varlistentry> </varlistentry>
@ -203,20 +202,6 @@ $ nix-push --dest /tmp/cache $(nix-instantiate -A thunderbird)
</para> </para>
<para>To generate a manifest suitable for <command>nix-pull</command>:
<screen>
$ nix-push --dest /tmp/cache $(nix-build -A thunderbird) --manifest
</screen>
On another machine you can then do:
<screen>
$ nix-pull http://example.org/cache
</screen>
to cause the binaries to be used by subsequent Nix operations.</para>
<para>To generate a signed binary cache, you must first generate a key <para>To generate a signed binary cache, you must first generate a key
pair, in this example called <literal>cache.example.org-1</literal>, pair, in this example called <literal>cache.example.org-1</literal>,
storing the secret key in <filename>./sk</filename> and the public key storing the secret key in <filename>./sk</filename> and the public key

View file

@ -13,14 +13,10 @@ work with Nix.</para>
<xi:include href="nix-collect-garbage.xml" /> <xi:include href="nix-collect-garbage.xml" />
<xi:include href="nix-copy-closure.xml" /> <xi:include href="nix-copy-closure.xml" />
<xi:include href="nix-daemon.xml" /> <xi:include href="nix-daemon.xml" />
<!--
<xi:include href="nix-generate-patches.xml" />
-->
<xi:include href="nix-hash.xml" /> <xi:include href="nix-hash.xml" />
<xi:include href="nix-install-package.xml" /> <xi:include href="nix-install-package.xml" />
<xi:include href="nix-instantiate.xml" /> <xi:include href="nix-instantiate.xml" />
<xi:include href="nix-prefetch-url.xml" /> <xi:include href="nix-prefetch-url.xml" />
<xi:include href="nix-pull.xml" />
<xi:include href="nix-push.xml" /> <xi:include href="nix-push.xml" />
</chapter> </chapter>

View file

@ -39,7 +39,7 @@ dist-files += $(d)/manual.xmli $(d)/version.txt $(d)/manual.is-valid
# Generate man pages. # Generate man pages.
man-pages := $(foreach n, \ man-pages := $(foreach n, \
nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \ nix-env.1 nix-build.1 nix-shell.1 nix-store.1 nix-instantiate.1 \
nix-collect-garbage.1 nix-push.1 nix-pull.1 \ nix-collect-garbage.1 nix-push.1 \
nix-prefetch-url.1 nix-channel.1 \ nix-prefetch-url.1 nix-channel.1 \
nix-install-package.1 nix-hash.1 nix-copy-closure.1 \ nix-install-package.1 nix-hash.1 nix-copy-closure.1 \
nix.conf.5 nix-daemon.8, \ nix.conf.5 nix-daemon.8, \

View file

@ -7,7 +7,6 @@ $version = "@PACKAGE_VERSION@";
$binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@"; $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@"; $libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
$stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix"; $stateDir = $ENV{"NIX_STATE_DIR"} || "@localstatedir@/nix";
$manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests";
$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix"; $logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix"; $confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
$storeDir = $ENV{"NIX_STORE_DIR"} || "@storedir@"; $storeDir = $ENV{"NIX_STORE_DIR"} || "@storedir@";

View file

@ -1,340 +0,0 @@
package Nix::GeneratePatches;
use strict;
use File::Temp qw(tempdir);
use File::stat;
use Nix::Config;
use Nix::Manifest;
our @ISA = qw(Exporter);
our @EXPORT = qw(generatePatches propagatePatches copyPatches);
# Some patch generations options.
# Max size of NAR archives to generate patches for.
my $maxNarSize = $ENV{"NIX_MAX_NAR_SIZE"};
$maxNarSize = 160 * 1024 * 1024 if !defined $maxNarSize;
# If patch is bigger than this fraction of full archive, reject.
my $maxPatchFraction = $ENV{"NIX_PATCH_FRACTION"};
$maxPatchFraction = 0.60 if !defined $maxPatchFraction;
my $timeLimit = $ENV{"NIX_BSDIFF_TIME_LIMIT"};
$timeLimit = 180 if !defined $timeLimit;
my $hashAlgo = "sha256";
sub findOutputPaths {
my $narFiles = shift;
my %outPaths;
foreach my $p (keys %{$narFiles}) {
# Ignore derivations.
next if ($p =~ /\.drv$/);
# Ignore builders (too much ambiguity -- they're all called
# `builder.sh').
next if ($p =~ /\.sh$/);
next if ($p =~ /\.patch$/);
# Don't bother including tar files etc.
next if ($p =~ /\.tar$/ || $p =~ /\.tar\.(gz|bz2|Z|lzma|xz)$/ || $p =~ /\.zip$/ || $p =~ /\.bin$/ || $p =~ /\.tgz$/ || $p =~ /\.rpm$/ || $p =~ /cvs-export$/ || $p =~ /fetchhg$/);
$outPaths{$p} = 1;
}
return %outPaths;
}
sub getNameVersion {
my $p = shift;
$p =~ /\/[0-9a-z]+((?:-[a-zA-Z][^\/-]*)+)([^\/]*)$/;
my $name = $1;
my $version = $2;
return undef unless defined $name && defined $version;
$name =~ s/^-//;
$version =~ s/^-//;
return ($name, $version);
}
# A quick hack to get a measure of the `distance' between two
# versions: it's just the position of the first character that differs
# (or 999 if they are the same).
sub versionDiff {
my $s = shift;
my $t = shift;
my $i;
return 999 if $s eq $t;
for ($i = 0; $i < length $s; $i++) {
return $i if $i >= length $t or
substr($s, $i, 1) ne substr($t, $i, 1);
}
return $i;
}
sub getNarBz2 {
my $narPath = shift;
my $narFiles = shift;
my $storePath = shift;
my $narFileList = $$narFiles{$storePath};
die "missing path $storePath" unless defined $narFileList;
my $narFile = @{$narFileList}[0];
die unless defined $narFile;
$narFile->{url} =~ /\/([^\/]+)$/;
die unless defined $1;
return "$narPath/$1";
}
sub containsPatch {
my $patches = shift;
my $storePath = shift;
my $basePath = shift;
my $patchList = $$patches{$storePath};
return 0 if !defined $patchList;
my $found = 0;
foreach my $patch (@{$patchList}) {
# !!! baseHash might differ
return 1 if $patch->{basePath} eq $basePath;
}
return 0;
}
sub generatePatches {
my ($srcNarFiles, $dstNarFiles, $srcPatches, $dstPatches, $narPath, $patchesPath, $patchesURL, $tmpDir) = @_;
my %srcOutPaths = findOutputPaths $srcNarFiles;
my %dstOutPaths = findOutputPaths $dstNarFiles;
# For each output path in the destination, see if we need to / can
# create a patch.
print STDERR "creating patches...\n";
foreach my $p (keys %dstOutPaths) {
# If exactly the same path already exists in the source, skip it.
next if defined $srcOutPaths{$p};
print " $p\n";
# If not, then we should find the paths in the source that are
# `most' likely to be present on a system that wants to
# install this path.
(my $name, my $version) = getNameVersion $p;
next unless defined $name && defined $version;
my @closest = ();
my $closestVersion;
my $minDist = -1; # actually, larger means closer
# Find all source paths with the same name.
foreach my $q (keys %srcOutPaths) {
(my $name2, my $version2) = getNameVersion $q;
next unless defined $name2 && defined $version2;
if ($name eq $name2) {
my $srcSystem = @{$$dstNarFiles{$p}}[0]->{system};
my $dstSystem = @{$$srcNarFiles{$q}}[0]->{system};
if (defined $srcSystem && defined $dstSystem && $srcSystem ne $dstSystem) {
print " SKIPPING $q due to different systems ($srcSystem vs. $dstSystem)\n";
next;
}
# If the sizes differ too much, then skip. This
# disambiguates between, e.g., a real component and a
# wrapper component (cf. Firefox in Nixpkgs).
my $srcSize = @{$$srcNarFiles{$q}}[0]->{size};
my $dstSize = @{$$dstNarFiles{$p}}[0]->{size};
my $ratio = $srcSize / $dstSize;
$ratio = 1 / $ratio if $ratio < 1;
# print " SIZE $srcSize $dstSize $ratio $q\n";
if ($ratio >= 3) {
print " SKIPPING $q due to size ratio $ratio ($srcSize vs. $dstSize)\n";
next;
}
# If there are multiple matching names, include the
# ones with the closest version numbers.
my $dist = versionDiff $version, $version2;
if ($dist > $minDist) {
$minDist = $dist;
@closest = ($q);
$closestVersion = $version2;
} elsif ($dist == $minDist) {
push @closest, $q;
}
}
}
if (scalar(@closest) == 0) {
print " NO BASE: $p\n";
next;
}
foreach my $closest (@closest) {
# Generate a patch between $closest and $p.
print STDERR " $p <- $closest\n";
# If the patch already exists, skip it.
if (containsPatch($srcPatches, $p, $closest) ||
containsPatch($dstPatches, $p, $closest))
{
print " skipping, already exists\n";
next;
}
my $srcNarBz2 = getNarBz2 $narPath, $srcNarFiles, $closest;
my $dstNarBz2 = getNarBz2 $narPath, $dstNarFiles, $p;
if (! -f $srcNarBz2) {
warn "patch source archive $srcNarBz2 is missing\n";
next;
}
system("$Nix::Config::bzip2 -d < $srcNarBz2 > $tmpDir/A") == 0
or die "cannot unpack $srcNarBz2";
if (stat("$tmpDir/A")->size >= $maxNarSize) {
print " skipping, source is too large\n";
next;
}
system("$Nix::Config::bzip2 -d < $dstNarBz2 > $tmpDir/B") == 0
or die "cannot unpack $dstNarBz2";
if (stat("$tmpDir/B")->size >= $maxNarSize) {
print " skipping, destination is too large\n";
next;
}
my $time1 = time();
my $res = system("ulimit -t $timeLimit; $Nix::Config::libexecDir/nix/bsdiff $tmpDir/A $tmpDir/B $tmpDir/DIFF");
my $time2 = time();
if ($res) {
warn "binary diff computation aborted after ", $time2 - $time1, " seconds\n";
next;
}
my $baseHash = `$Nix::Config::binDir/nix-hash --flat --type $hashAlgo --base32 $tmpDir/A` or die;
chomp $baseHash;
my $narHash = `$Nix::Config::binDir/nix-hash --flat --type $hashAlgo --base32 $tmpDir/B` or die;
chomp $narHash;
my $narDiffHash = `$Nix::Config::binDir/nix-hash --flat --type $hashAlgo --base32 $tmpDir/DIFF` or die;
chomp $narDiffHash;
my $narDiffSize = stat("$tmpDir/DIFF")->size;
my $dstNarBz2Size = stat($dstNarBz2)->size;
print " size $narDiffSize; full size $dstNarBz2Size; ", $time2 - $time1, " seconds\n";
if ($narDiffSize >= $dstNarBz2Size) {
print " rejecting; patch bigger than full archive\n";
next;
}
if ($narDiffSize / $dstNarBz2Size >= $maxPatchFraction) {
print " rejecting; patch too large relative to full archive\n";
next;
}
my $finalName = "$narDiffHash.nar-bsdiff";
if (-e "$patchesPath/$finalName") {
print " not copying, already exists\n";
}
else {
system("cp '$tmpDir/DIFF' '$patchesPath/$finalName.tmp'") == 0
or die "cannot copy diff";
rename("$patchesPath/$finalName.tmp", "$patchesPath/$finalName")
or die "cannot rename $patchesPath/$finalName.tmp";
}
# Add the patch to the manifest.
addPatch $dstPatches, $p,
{ url => "$patchesURL/$finalName", hash => "$hashAlgo:$narDiffHash"
, size => $narDiffSize, basePath => $closest, baseHash => "$hashAlgo:$baseHash"
, narHash => "$hashAlgo:$narHash", patchType => "nar-bsdiff"
};
}
}
}
# Propagate useful patches from $srcPatches to $dstPatches. A patch
# is useful if it produces either paths in the $dstNarFiles or paths
# that can be used as the base for other useful patches.
sub propagatePatches {
my ($srcPatches, $dstNarFiles, $dstPatches) = @_;
print STDERR "propagating patches...\n";
my $changed;
do {
# !!! we repeat this to reach the transitive closure; inefficient
$changed = 0;
print STDERR "loop\n";
my %dstBasePaths;
foreach my $q (keys %{$dstPatches}) {
foreach my $patch (@{$$dstPatches{$q}}) {
$dstBasePaths{$patch->{basePath}} = 1;
}
}
foreach my $p (keys %{$srcPatches}) {
my $patchList = $$srcPatches{$p};
my $include = 0;
# Is path $p included in the destination? If so, include
# patches that produce it.
$include = 1 if defined $$dstNarFiles{$p};
# Is path $p a path that serves as a base for paths in the
# destination? If so, include patches that produce it.
# !!! check baseHash
$include = 1 if defined $dstBasePaths{$p};
if ($include) {
foreach my $patch (@{$patchList}) {
$changed = 1 if addPatch $dstPatches, $p, $patch;
}
}
}
} while $changed;
}
# Add all new patches in $srcPatches to $dstPatches.
sub copyPatches {
my ($srcPatches, $dstPatches) = @_;
foreach my $p (keys %{$srcPatches}) {
addPatch $dstPatches, $p, $_ foreach @{$$srcPatches{$p}};
}
}
return 1;

View file

@ -13,7 +13,7 @@ use Nix::Config;
use Nix::Store; use Nix::Store;
our @ISA = qw(Exporter); our @ISA = qw(Exporter);
our @EXPORT = qw(readManifest writeManifest updateManifestDB addPatch deleteOldManifests parseNARInfo fingerprintPath); our @EXPORT = qw(readManifest writeManifest addPatch parseNARInfo fingerprintPath);
sub addNAR { sub addNAR {
@ -228,172 +228,6 @@ sub writeManifest {
} }
sub updateManifestDB {
my $manifestDir = $Nix::Config::manifestDir;
my @manifests = glob "$manifestDir/*.nixmanifest";
return undef if scalar @manifests == 0;
mkpath($manifestDir);
unlink "$manifestDir/cache.sqlite"; # remove obsolete cache
my $dbPath = "$manifestDir/cache-v2.sqlite";
# Open/create the database.
our $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
or die "cannot open database $dbPath";
$dbh->{RaiseError} = 1;
$dbh->{PrintError} = 0;
$dbh->do("pragma foreign_keys = on");
$dbh->do("pragma synchronous = off"); # we can always reproduce the cache
$dbh->do("pragma journal_mode = truncate");
# Initialise the database schema, if necessary.
$dbh->do(<<EOF);
create table if not exists Manifests (
id integer primary key autoincrement not null,
path text unique not null,
timestamp integer not null
);
EOF
$dbh->do(<<EOF);
create table if not exists NARs (
id integer primary key autoincrement not null,
manifest integer not null,
storePath text not null,
url text not null,
compressionType text not null,
hash text,
size integer,
narHash text,
narSize integer,
refs text,
deriver text,
system text,
foreign key (manifest) references Manifests(id) on delete cascade
);
EOF
$dbh->do("create index if not exists NARs_storePath on NARs(storePath)");
$dbh->do(<<EOF);
create table if not exists Patches (
id integer primary key autoincrement not null,
manifest integer not null,
storePath text not null,
basePath text not null,
baseHash text not null,
url text not null,
hash text,
size integer,
narHash text,
narSize integer,
patchType text not null,
foreign key (manifest) references Manifests(id) on delete cascade
);
EOF
$dbh->do("create index if not exists Patches_storePath on Patches(storePath)");
# Acquire an exclusive lock to ensure that only one process
# updates the DB at the same time. This isn't really necessary,
# but it prevents work duplication and lock contention in SQLite.
my $lockFile = "$manifestDir/cache.lock";
open MAINLOCK, ">>$lockFile" or die "unable to acquire lock $lockFile: $!\n";
flock(MAINLOCK, LOCK_EX) or die;
our $insertNAR = $dbh->prepare(
"insert into NARs(manifest, storePath, url, compressionType, hash, size, narHash, " .
"narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
our $insertPatch = $dbh->prepare(
"insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
"size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
$dbh->begin_work;
# Read each manifest in $manifestDir and add it to the database,
# unless we've already done so on a previous run.
my %seen;
for my $manifestLink (@manifests) {
my $manifest = Cwd::abs_path($manifestLink);
next unless -f $manifest;
my $timestamp = lstat($manifest)->mtime;
$seen{$manifest} = 1;
next if scalar @{$dbh->selectcol_arrayref(
"select 1 from Manifests where path = ? and timestamp = ?",
{}, $manifest, $timestamp)} == 1;
print STDERR "caching $manifest...\n";
$dbh->do("delete from Manifests where path = ?", {}, $manifest);
$dbh->do("insert into Manifests(path, timestamp) values (?, ?)",
{}, $manifest, $timestamp);
our $id = $dbh->last_insert_id("", "", "", "");
sub addNARToDB {
my ($storePath, $narFile) = @_;
$insertNAR->execute(
$id, $storePath, $narFile->{url}, $narFile->{compressionType}, $narFile->{hash},
$narFile->{size}, $narFile->{narHash}, $narFile->{narSize}, $narFile->{references},
$narFile->{deriver}, $narFile->{system});
};
sub addPatchToDB {
my ($storePath, $patch) = @_;
$insertPatch->execute(
$id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
$patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize},
$patch->{patchType});
};
my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB);
if ($version < 3) {
die "you have an old-style or corrupt manifest $manifestLink; please delete it\n";
}
if ($version >= 10) {
die "manifest $manifestLink is too new; please delete it or upgrade Nix\n";
}
}
# Removed cached information for removed manifests from the DB.
foreach my $manifest (@{$dbh->selectcol_arrayref("select path from Manifests")}) {
next if defined $seen{$manifest};
$dbh->do("delete from Manifests where path = ?", {}, $manifest);
}
$dbh->commit;
close MAINLOCK;
return $dbh;
}
# Delete all old manifests downloaded from a given URL.
sub deleteOldManifests {
my ($url, $curUrlFile) = @_;
for my $urlFile (glob "$Nix::Config::manifestDir/*.url") {
next if defined $curUrlFile && $urlFile eq $curUrlFile;
open URL, "<$urlFile" or die;
my $url2 = <URL>;
chomp $url2;
close URL;
next unless $url eq $url2;
my $base = $urlFile; $base =~ s/.url$//;
unlink "${base}.url";
unlink "${base}.nixmanifest";
}
}
# Return a fingerprint of a store path to be used in binary cache # Return a fingerprint of a store path to be used in binary cache
# signatures. It contains the store path, the base-32 SHA-256 hash of # signatures. It contains the store path, the base-32 SHA-256 hash of
# the contents of the path, and the references. # the contents of the path, and the references.

View file

@ -1,7 +1,6 @@
nix_perl_sources := \ nix_perl_sources := \
$(d)/lib/Nix/Store.pm \ $(d)/lib/Nix/Store.pm \
$(d)/lib/Nix/Manifest.pm \ $(d)/lib/Nix/Manifest.pm \
$(d)/lib/Nix/GeneratePatches.pm \
$(d)/lib/Nix/SSH.pm \ $(d)/lib/Nix/SSH.pm \
$(d)/lib/Nix/CopyClosure.pm \ $(d)/lib/Nix/CopyClosure.pm \
$(d)/lib/Nix/Config.pm.in \ $(d)/lib/Nix/Config.pm.in \

View file

@ -1,376 +0,0 @@
#! @perl@ -w @perlFlags@
use utf8;
use strict;
use Nix::Config;
use Nix::Manifest;
use Nix::Store;
use Nix::Utils;
use POSIX qw(strftime);
STDOUT->autoflush(1);
binmode STDERR, ":encoding(utf8)";
my $logFile = "$Nix::Config::logDir/downloads";
# For queries, skip expensive calls to nix-hash etc. We're just
# estimating the expected download size.
my $fast = 1;
my $curl = "$Nix::Config::curl --fail --location";
# Open the manifest cache and update it if necessary.
my $dbh = updateManifestDB();
exit 0 unless defined $dbh; # exit if there are no manifests
print "\n";
# $hashCache->{$algo}->{$path} yields the $algo-hash of $path.
my $hashCache;
sub parseHash {
my $hash = shift;
if ($hash =~ /^(.+):(.+)$/) {
return ($1, $2);
} else {
return ("md5", $hash);
}
}
# Compute the most efficient sequence of downloads to produce the
# given path.
sub computeSmallestDownload {
my $targetPath = shift;
# Build a graph of all store paths that might contribute to the
# construction of $targetPath, and the special node "start". The
# edges are either patch operations, or downloads of full NAR
# files. The latter edges only occur between "start" and a store
# path.
my %graph;
$graph{"start"} = {d => 0, pred => undef, edges => []};
my @queue = ();
my $queueFront = 0;
my %done;
sub addNode {
my $graph = shift;
my $u = shift;
$$graph{$u} = {d => 999999999999, pred => undef, edges => []}
unless defined $$graph{$u};
}
sub addEdge {
my $graph = shift;
my $u = shift;
my $v = shift;
my $w = shift;
my $type = shift;
my $info = shift;
addNode $graph, $u;
push @{$$graph{$u}->{edges}},
{weight => $w, start => $u, end => $v, type => $type, info => $info};
my $n = scalar @{$$graph{$u}->{edges}};
}
push @queue, $targetPath;
while ($queueFront < scalar @queue) {
my $u = $queue[$queueFront++];
next if defined $done{$u};
$done{$u} = 1;
addNode \%graph, $u;
# If the path already exists, it has distance 0 from the
# "start" node.
if (isValidPath($u)) {
addEdge \%graph, "start", $u, 0, "present", undef;
}
else {
# Add patch edges.
my $patchList = $dbh->selectall_arrayref(
"select * from Patches where storePath = ?",
{ Slice => {} }, $u);
foreach my $patch (@{$patchList}) {
if (isValidPath($patch->{basePath})) {
my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}};
if (!defined $hash) {
$hash = $fast && $baseHashAlgo eq "sha256"
? queryPathHash($patch->{basePath})
: hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath});
$hash =~ s/.*://;
$hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash;
}
next if $hash ne $baseHash;
}
push @queue, $patch->{basePath};
addEdge \%graph, $patch->{basePath}, $u, $patch->{size}, "patch", $patch;
}
# Add NAR file edges to the start node.
my $narFileList = $dbh->selectall_arrayref(
"select * from NARs where storePath = ?",
{ Slice => {} }, $u);
foreach my $narFile (@{$narFileList}) {
# !!! how to handle files whose size is not known in advance?
# For now, assume some arbitrary size (1 GB).
# This has the side-effect of preferring non-Hydra downloads.
addEdge \%graph, "start", $u, ($narFile->{size} || 1000000000), "narfile", $narFile;
}
}
}
# Run Dijkstra's shortest path algorithm to determine the shortest
# sequence of download and/or patch actions that will produce
# $targetPath.
my @todo = keys %graph;
while (scalar @todo > 0) {
# Remove the closest element from the todo list.
# !!! inefficient, use a priority queue
@todo = sort { -($graph{$a}->{d} <=> $graph{$b}->{d}) } @todo;
my $u = pop @todo;
my $u_ = $graph{$u};
foreach my $edge (@{$u_->{edges}}) {
my $v_ = $graph{$edge->{end}};
if ($v_->{d} > $u_->{d} + $edge->{weight}) {
$v_->{d} = $u_->{d} + $edge->{weight};
# Store the edge; to edge->start is actually the
# predecessor.
$v_->{pred} = $edge;
}
}
}
# Retrieve the shortest path from "start" to $targetPath.
my @path = ();
my $cur = $targetPath;
return () unless defined $graph{$targetPath}->{pred};
while ($cur ne "start") {
push @path, $graph{$cur}->{pred};
$cur = $graph{$cur}->{pred}->{start};
}
return @path;
}
# Parse the arguments.
if ($ARGV[0] eq "--query") {
while (<STDIN>) {
chomp;
my ($cmd, @args) = split " ", $_;
if ($cmd eq "have") {
foreach my $storePath (@args) {
print "$storePath\n" if scalar @{$dbh->selectcol_arrayref("select 1 from NARs where storePath = ?", {}, $storePath)} > 0;
}
print "\n";
}
elsif ($cmd eq "info") {
foreach my $storePath (@args) {
my $infos = $dbh->selectall_arrayref(
"select * from NARs where storePath = ?",
{ Slice => {} }, $storePath);
next unless scalar @{$infos} > 0;
my $info = @{$infos}[0];
print "$storePath\n";
print "$info->{deriver}\n";
my @references = split " ", $info->{refs};
print scalar @references, "\n";
print "$_\n" foreach @references;
my @path = computeSmallestDownload $storePath;
my $downloadSize = 0;
while (scalar @path > 0) {
my $edge = pop @path;
my $u = $edge->{start};
my $v = $edge->{end};
if ($edge->{type} eq "patch") {
$downloadSize += $edge->{info}->{size} || 0;
}
elsif ($edge->{type} eq "narfile") {
$downloadSize += $edge->{info}->{size} || 0;
}
}
print "$downloadSize\n";
my $narSize = $info->{narSize} || 0;
print "$narSize\n";
}
print "\n";
}
else { die "unknown command $cmd"; }
}
exit 0;
}
elsif ($ARGV[0] ne "--substitute") {
die;
}
die unless scalar @ARGV == 3;
my $targetPath = $ARGV[1];
my $destPath = $ARGV[2];
$fast = 0;
# Create a temporary directory.
my $tmpDir = mkTempDir("nix-download");
my $tmpNar = "$tmpDir/nar";
my $tmpNar2 = "$tmpDir/nar2";
open LOGFILE, ">>$logFile" or die "cannot open log file $logFile";
my $date = strftime ("%F %H:%M:%S UTC", gmtime (time));
print LOGFILE "$$ get $targetPath $date\n";
print STDERR "\n*** Trying to download/patch $targetPath\n";
# Compute the shortest path.
my @path = computeSmallestDownload $targetPath;
die "don't know how to produce $targetPath\n" if scalar @path == 0;
# We don't need the manifest anymore, so close it as an optimisation:
# if we still have SQLite locks blocking other processes (we
# shouldn't), this gets rid of them.
$dbh->disconnect;
# Traverse the shortest path, perform the actions described by the
# edges.
my $curStep = 1;
my $maxStep = scalar @path;
my $finalNarHash;
while (scalar @path > 0) {
my $edge = pop @path;
my $u = $edge->{start};
my $v = $edge->{end};
print STDERR "\n*** Step $curStep/$maxStep: ";
if ($edge->{type} eq "present") {
print STDERR "using already present path $v\n";
print LOGFILE "$$ present $v\n";
if ($curStep < $maxStep) {
# Since this is not the last step, the path will be used
# as a base to one or more patches. So turn the base path
# into a NAR archive, to which we can apply the patch.
print STDERR " packing base path...\n";
system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0
or die "cannot dump $v";
}
}
elsif ($edge->{type} eq "patch") {
my $patch = $edge->{info};
print STDERR "applying patch $patch->{url} to $u to create $v\n";
print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n";
# Download the patch.
print STDERR " downloading patch...\n";
my $patchPath = "$tmpDir/patch";
checkURL $patch->{url};
system("$curl '$patch->{url}' -o $patchPath") == 0
or die "cannot download patch $patch->{url}\n";
# Apply the patch to the NAR archive produced in step 1 (for
# the already present path) or a later step (for patch sequences).
print STDERR " applying patch...\n";
system("$Nix::Config::libexecDir/nix/bspatch $tmpNar $tmpNar2 $patchPath") == 0
or die "cannot apply patch $patchPath to $tmpNar\n";
if ($curStep < $maxStep) {
# The archive will be used as the base of the next patch.
rename "$tmpNar2", "$tmpNar" or die "cannot rename NAR archive: $!";
} else {
# This was the last patch. Unpack the final NAR archive
# into the target path.
print STDERR " unpacking patched archive...\n";
system("$Nix::Config::binDir/nix-store --restore $destPath < $tmpNar2") == 0
or die "cannot unpack $tmpNar2 to $v\n";
}
$finalNarHash = $patch->{narHash};
}
elsif ($edge->{type} eq "narfile") {
my $narFile = $edge->{info};
print STDERR "downloading $narFile->{url} to $v\n";
my $size = $narFile->{size} || -1;
print LOGFILE "$$ narfile $narFile->{url} $size $v\n";
checkURL $narFile->{url};
my $decompressor =
$narFile->{compressionType} eq "bzip2" ? "| $Nix::Config::bzip2 -d" :
$narFile->{compressionType} eq "xz" ? "| $Nix::Config::xz -d" :
$narFile->{compressionType} eq "none" ? "" :
die "unknown compression type $narFile->{compressionType}";
if ($curStep < $maxStep) {
# The archive will be used a base to a patch.
system("$curl '$narFile->{url}' $decompressor > $tmpNar") == 0
or die "cannot download and unpack $narFile->{url} to $v\n";
} else {
# Unpack the archive to the target path.
system("$curl '$narFile->{url}' $decompressor | $Nix::Config::binDir/nix-store --restore '$destPath'") == 0
or die "cannot download and unpack $narFile->{url} to $v\n";
}
$finalNarHash = $narFile->{narHash};
}
$curStep++;
}
# Tell Nix about the expected hash so it can verify it.
die "cannot check integrity of the downloaded path since its hash is not known\n"
unless defined $finalNarHash;
print "$finalNarHash\n";
print STDERR "\n";
print LOGFILE "$$ success\n";
close LOGFILE;

View file

@ -2,17 +2,14 @@ nix_bin_scripts := \
$(d)/nix-build \ $(d)/nix-build \
$(d)/nix-channel \ $(d)/nix-channel \
$(d)/nix-copy-closure \ $(d)/nix-copy-closure \
$(d)/nix-generate-patches \
$(d)/nix-install-package \ $(d)/nix-install-package \
$(d)/nix-pull \
$(d)/nix-push $(d)/nix-push
bin-scripts += $(nix_bin_scripts) bin-scripts += $(nix_bin_scripts)
nix_substituters := \ nix_substituters := \
$(d)/copy-from-other-stores.pl \ $(d)/copy-from-other-stores.pl \
$(d)/download-from-binary-cache.pl \ $(d)/download-from-binary-cache.pl
$(d)/download-using-manifests.pl
nix_noinst_scripts := \ nix_noinst_scripts := \
$(d)/build-remote.pl \ $(d)/build-remote.pl \

View file

@ -12,8 +12,6 @@ binmode STDERR, ":encoding(utf8)";
Nix::Config::readConfig; Nix::Config::readConfig;
my $manifestDir = $Nix::Config::manifestDir;
# Turn on caching in nix-prefetch-url. # Turn on caching in nix-prefetch-url.
my $channelCache = "$Nix::Config::stateDir/channel-cache"; my $channelCache = "$Nix::Config::stateDir/channel-cache";
@ -75,7 +73,6 @@ sub removeChannel {
my ($name) = @_; my ($name) = @_;
readChannels; readChannels;
my $url = $channels{$name}; my $url = $channels{$name};
deleteOldManifests($url . "/MANIFEST", undef) if defined $url;
delete $channels{$name}; delete $channels{$name};
writeChannels; writeChannels;
@ -84,8 +81,7 @@ sub removeChannel {
} }
# Fetch Nix expressions and pull manifests from the subscribed # Fetch Nix expressions and binary cache URLs from the subscribed channels.
# channels.
sub update { sub update {
my @channelNames = @_; my @channelNames = @_;
@ -97,7 +93,6 @@ sub update {
next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames}; next if scalar @channelNames > 0 && ! grep { $_ eq $name } @{channelNames};
my $url = $channels{$name}; my $url = $channels{$name};
my $origUrl = "$url/MANIFEST";
# We want to download the url to a file to see if it's a tarball while also checking if we # We want to download the url to a file to see if it's a tarball while also checking if we
# got redirected in the process, so that we can grab the various parts of a nix channel # got redirected in the process, so that we can grab the various parts of a nix channel
@ -132,22 +127,8 @@ sub update {
if ($ret != 0) { if ($ret != 0) {
# Check if the channel advertises a binary cache. # Check if the channel advertises a binary cache.
my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`; my $binaryCacheURL = `$Nix::Config::curl --silent '$url'/binary-cache-url`;
my $getManifest = ($Nix::Config::config{"force-manifest"} // "false") eq "true"; $extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; "
if ($? == 0 && $binaryCacheURL ne "") { if $? == 0 && $binaryCacheURL ne "";
$extraAttrs .= "binaryCacheURL = \"$binaryCacheURL\"; ";
deleteOldManifests($origUrl, undef);
} else {
$getManifest = 1;
}
if ($getManifest) {
# No binary cache, so pull the channel manifest.
mkdir $manifestDir, 0755 unless -e $manifestDir;
die "$0: you do not have write permission to $manifestDir!\n" unless -W $manifestDir;
$ENV{'NIX_ORIG_URL'} = $origUrl;
system("$Nix::Config::binDir/nix-pull", "--skip-wrong-store", "$url/MANIFEST") == 0
or die "cannot pull manifest from $url\n";
}
# Download the channel tarball. # Download the channel tarball.
my $fullURL = "$url/nixexprs.tar.xz"; my $fullURL = "$url/nixexprs.tar.xz";

View file

@ -1,51 +0,0 @@
#! @perl@ -w @perlFlags@
use strict;
use Nix::Manifest;
use Nix::GeneratePatches;
use Nix::Utils;
if (scalar @ARGV != 5) {
print STDERR <<EOF;
Usage: nix-generate-patches NAR-DIR PATCH-DIR PATCH-URI OLD-MANIFEST NEW-MANIFEST
This command generates binary patches between NAR files listed in
OLD-MANIFEST and NEW-MANIFEST. The patches are written to the
directory PATCH-DIR, and the prefix PATCH-URI is used to generate URIs
for the patches. The patches are added to NEW-MANIFEST. All NARs are
required to exist in NAR-DIR. Patches are generated between
succeeding versions of packages with the same name.
EOF
exit 1;
}
my $narPath = $ARGV[0];
my $patchesPath = $ARGV[1];
my $patchesURL = $ARGV[2];
my $srcManifest = $ARGV[3];
my $dstManifest = $ARGV[4];
my (%srcNarFiles, %srcLocalPaths, %srcPatches);
readManifest $srcManifest, \%srcNarFiles, \%srcPatches;
my (%dstNarFiles, %dstLocalPaths, %dstPatches);
readManifest $dstManifest, \%dstNarFiles, \%dstPatches;
my $tmpDir = mkTempDir("nix-generate-patches");
generatePatches \%srcNarFiles, \%dstNarFiles, \%srcPatches, \%dstPatches,
$narPath, $patchesPath, $patchesURL, $tmpDir;
propagatePatches \%srcPatches, \%dstNarFiles, \%dstPatches;
# Optionally add all new patches to the manifest in $NIX_ALL_PATCHES.
my $allPatchesFile = $ENV{"NIX_ALL_PATCHES"};
if (defined $allPatchesFile) {
my (%dummy, %allPatches);
readManifest("$patchesPath/all-patches", \%dummy, \%allPatches)
if -f $allPatchesFile;
copyPatches \%dstPatches, \%allPatches;
writeManifest($allPatchesFile, {}, \%allPatches, 0);
}
writeManifest $dstManifest, \%dstNarFiles, \%dstPatches;

View file

@ -89,7 +89,7 @@ my $pathRE = "(?: \/ [\/A-Za-z0-9\+\-\.\_\?\=]* )";
# store path. We'll let nix-env do that. # store path. We'll let nix-env do that.
$contents =~ $contents =~
/ ^ \s* (\S+) \s+ ($Nix::Utils::urlRE) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )? /x / ^ \s* (\S+) \s+ (\S+) \s+ ($nameRE) \s+ ($systemRE) \s+ ($pathRE) \s+ ($pathRE) ( \s+ ($Nix::Utils::urlRE) )? /x
or barf "invalid package contents"; or barf "invalid package contents";
my $version = $1; my $version = $1;
my $manifestURL = $2; my $manifestURL = $2;
@ -111,26 +111,10 @@ if ($interactive) {
} }
if (defined $binaryCacheURL) { die "$0: package does not supply a binary cache\n" unless defined $binaryCacheURL;
push @extraNixEnvArgs, "--option", "extra-binary-caches", $binaryCacheURL; push @extraNixEnvArgs, "--option", "extra-binary-caches", $binaryCacheURL;
} else {
# Store the manifest in the temporary directory so that we don't
# pollute /nix/var/nix/manifests. This also requires that we
# don't use the Nix daemon (because otherwise
# download-using-manifests won't see our NIX_MANIFESTS_DIRS
# environment variable).
$ENV{NIX_MANIFESTS_DIR} = $tmpDir;
$ENV{NIX_REMOTE} = "";
print "\nPulling manifests...\n";
system("$Nix::Config::binDir/nix-pull", $manifestURL) == 0
or barf "nix-pull failed: $?";
}
print "\nInstalling package...\n"; print "\nInstalling package...\n";
system("$Nix::Config::binDir/nix-env", $op, $outPath, "--force-name", $drvName, @extraNixEnvArgs) == 0 system("$Nix::Config::binDir/nix-env", $op, $outPath, "--force-name", $drvName, @extraNixEnvArgs) == 0

View file

@ -1,102 +0,0 @@
#! @perl@ -w @perlFlags@
use utf8;
use strict;
use Nix::Config;
use Nix::Manifest;
binmode STDERR, ":encoding(utf8)";
my $manifestDir = $Nix::Config::manifestDir;
# Prevent access problems in shared-stored installations.
umask 0022;
# Create the manifests directory if it doesn't exist.
if (! -e $manifestDir) {
mkdir $manifestDir, 0755 or die "cannot create directory $manifestDir";
}
# Make sure that the manifests directory is scanned for GC roots.
my $gcRootsDir = "$Nix::Config::stateDir/gcroots";
my $manifestDirLink = "$gcRootsDir/manifests";
if (! -l $manifestDirLink) {
symlink($manifestDir, $manifestDirLink) or die "cannot create symlink $manifestDirLink";
}
# Process the URLs specified on the command line.
sub downloadFile {
my $url = shift;
$ENV{"PRINT_PATH"} = 1;
$ENV{"QUIET"} = 1;
my ($dummy, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`;
die "cannot fetch $url" if $? != 0;
die "nix-prefetch-url did not return a path" unless defined $path;
chomp $path;
return $path;
}
sub processURL {
my $url = shift;
$url =~ s/\/$//;
my $manifest;
my $origUrl = $ENV{'NIX_ORIG_URL'} || $url;
# First see if a bzipped manifest is available.
if (system("$Nix::Config::curl --fail --silent --location --head '$url'.bz2 > /dev/null") == 0) {
print "fetching list of Nix archives at $url.bz2...\n";
$manifest = downloadFile "$url.bz2";
}
# Otherwise, just get the uncompressed manifest.
else {
print "fetching list of Nix archives at $url...\n";
$manifest = downloadFile $url;
}
my $baseName = "unnamed";
if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component
$baseName = $1;
}
my $hash = `$Nix::Config::binDir/nix-hash --flat '$manifest'`
or die "cannot hash $manifest";
chomp $hash;
my $urlFile = "$manifestDir/$baseName-$hash.url";
open URL, ">$urlFile" or die "cannot create $urlFile";
print URL $origUrl;
close URL;
my $finalPath = "$manifestDir/$baseName-$hash.nixmanifest";
unlink $finalPath if -e $finalPath;
symlink("$manifest", "$finalPath")
or die "cannot link $finalPath to $manifest";
deleteOldManifests($origUrl, $urlFile);
}
while (@ARGV) {
my $url = shift @ARGV;
if ($url eq "--help") {
exec "man nix-pull" or die;
} elsif ($url eq "--skip-wrong-store") {
# No-op, no longer supported.
} else {
processURL $url;
}
}
# Update the cache.
updateManifestDB();

View file

@ -1,63 +0,0 @@
.\"-
.\" Copyright 2003-2005 Colin Percival
.\" All rights reserved
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted providing that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
.\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.\" $FreeBSD: src/usr.bin/bsdiff/bsdiff/bsdiff.1,v 1.1 2005/08/06 01:59:05 cperciva Exp $
.\"
.Dd May 18, 2003
.Dt BSDIFF 1
.Os FreeBSD
.Sh NAME
.Nm bsdiff
.Nd generate a patch between two binary files
.Sh SYNOPSIS
.Nm
.Ao Ar oldfile Ac Ao Ar newfile Ac Ao Ar patchfile Ac
.Sh DESCRIPTION
.Nm
compares
.Ao Ar oldfile Ac
to
.Ao Ar newfile Ac
and writes to
.Ao Ar patchfile Ac
a binary patch suitable for use by bspatch(1).
When
.Ao Ar oldfile Ac
and
.Ao Ar newfile Ac
are two versions of an executable program, the
patches produced are on average a factor of five smaller
than those produced by any other binary patch tool known
to the author.
.Pp
.Nm
uses memory equal to 17 times the size of
.Ao Ar oldfile Ac ,
and requires
an absolute minimum working set size of 8 times the size of oldfile.
.Sh SEE ALSO
.Xr bspatch 1
.Sh AUTHORS
.An Colin Percival Aq cperciva@freebsd.org

View file

@ -1,405 +0,0 @@
/*-
* Copyright 2003-2005 Colin Percival
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#if 0
__FBSDID("$FreeBSD: src/usr.bin/bsdiff/bsdiff/bsdiff.c,v 1.1 2005/08/06 01:59:05 cperciva Exp $");
#endif
#include <sys/types.h>
#include <bzlib.h>
#include <err.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#define MIN(x,y) (((x)<(y)) ? (x) : (y))
static void split(off_t *I,off_t *V,off_t start,off_t len,off_t h)
{
off_t i,j,k,x,tmp,jj,kk;
if(len<16) {
for(k=start;k<start+len;k+=j) {
j=1;x=V[I[k]+h];
for(i=1;k+i<start+len;i++) {
if(V[I[k+i]+h]<x) {
x=V[I[k+i]+h];
j=0;
};
if(V[I[k+i]+h]==x) {
tmp=I[k+j];I[k+j]=I[k+i];I[k+i]=tmp;
j++;
};
};
for(i=0;i<j;i++) V[I[k+i]]=k+j-1;
if(j==1) I[k]=-1;
};
return;
};
x=V[I[start+len/2]+h];
jj=0;kk=0;
for(i=start;i<start+len;i++) {
if(V[I[i]+h]<x) jj++;
if(V[I[i]+h]==x) kk++;
};
jj+=start;kk+=jj;
i=start;j=0;k=0;
while(i<jj) {
if(V[I[i]+h]<x) {
i++;
} else if(V[I[i]+h]==x) {
tmp=I[i];I[i]=I[jj+j];I[jj+j]=tmp;
j++;
} else {
tmp=I[i];I[i]=I[kk+k];I[kk+k]=tmp;
k++;
};
};
while(jj+j<kk) {
if(V[I[jj+j]+h]==x) {
j++;
} else {
tmp=I[jj+j];I[jj+j]=I[kk+k];I[kk+k]=tmp;
k++;
};
};
if(jj>start) split(I,V,start,jj-start,h);
for(i=0;i<kk-jj;i++) V[I[jj+i]]=kk-1;
if(jj==kk-1) I[jj]=-1;
if(start+len>kk) split(I,V,kk,start+len-kk,h);
}
static void qsufsort(off_t *I,off_t *V,u_char *old,off_t oldsize)
{
off_t buckets[256];
off_t i,h,len;
for(i=0;i<256;i++) buckets[i]=0;
for(i=0;i<oldsize;i++) buckets[old[i]]++;
for(i=1;i<256;i++) buckets[i]+=buckets[i-1];
for(i=255;i>0;i--) buckets[i]=buckets[i-1];
buckets[0]=0;
for(i=0;i<oldsize;i++) I[++buckets[old[i]]]=i;
I[0]=oldsize;
for(i=0;i<oldsize;i++) V[i]=buckets[old[i]];
V[oldsize]=0;
for(i=1;i<256;i++) if(buckets[i]==buckets[i-1]+1) I[buckets[i]]=-1;
I[0]=-1;
for(h=1;I[0]!=-(oldsize+1);h+=h) {
len=0;
for(i=0;i<oldsize+1;) {
if(I[i]<0) {
len-=I[i];
i-=I[i];
} else {
if(len) I[i-len]=-len;
len=V[I[i]]+1-i;
split(I,V,i,len,h);
i+=len;
len=0;
};
};
if(len) I[i-len]=-len;
};
for(i=0;i<oldsize+1;i++) I[V[i]]=i;
}
static off_t matchlen(u_char *old,off_t oldsize,u_char *new,off_t newsize)
{
off_t i;
for(i=0;(i<oldsize)&&(i<newsize);i++)
if(old[i]!=new[i]) break;
return i;
}
static off_t search(off_t *I,u_char *old,off_t oldsize,
u_char *new,off_t newsize,off_t st,off_t en,off_t *pos)
{
off_t x,y;
if(en-st<2) {
x=matchlen(old+I[st],oldsize-I[st],new,newsize);
y=matchlen(old+I[en],oldsize-I[en],new,newsize);
if(x>y) {
*pos=I[st];
return x;
} else {
*pos=I[en];
return y;
}
};
x=st+(en-st)/2;
if(memcmp(old+I[x],new,MIN(oldsize-I[x],newsize))<0) {
return search(I,old,oldsize,new,newsize,x,en,pos);
} else {
return search(I,old,oldsize,new,newsize,st,x,pos);
};
}
static void offtout(off_t x,u_char *buf)
{
off_t y;
if(x<0) y=-x; else y=x;
buf[0]=y%256;y-=buf[0];
y=y/256;buf[1]=y%256;y-=buf[1];
y=y/256;buf[2]=y%256;y-=buf[2];
y=y/256;buf[3]=y%256;y-=buf[3];
y=y/256;buf[4]=y%256;y-=buf[4];
y=y/256;buf[5]=y%256;y-=buf[5];
y=y/256;buf[6]=y%256;y-=buf[6];
y=y/256;buf[7]=y%256;
if(x<0) buf[7]|=0x80;
}
int main(int argc,char *argv[])
{
int fd;
u_char *old,*new;
off_t oldsize,newsize;
off_t *I,*V;
off_t scan,pos,len;
off_t lastscan,lastpos,lastoffset;
off_t oldscore,scsc;
off_t s,Sf,lenf,Sb,lenb;
off_t overlap,Ss,lens;
off_t i;
off_t dblen,eblen;
u_char *db,*eb;
u_char buf[8];
u_char header[32];
FILE * pf;
BZFILE * pfbz2;
int bz2err;
if(argc!=4) errx(1,"usage: %s oldfile newfile patchfile\n",argv[0]);
/* Allocate oldsize+1 bytes instead of oldsize bytes to ensure
that we never try to malloc(0) and get a NULL pointer */
if(((fd=open(argv[1],O_RDONLY,0))<0) ||
((oldsize=lseek(fd,0,SEEK_END))==-1) ||
((old=malloc(oldsize+1))==NULL) ||
(lseek(fd,0,SEEK_SET)!=0) ||
(read(fd,old,oldsize)!=oldsize) ||
(close(fd)==-1)) err(1,"%s",argv[1]);
if(((I=malloc((oldsize+1)*sizeof(off_t)))==NULL) ||
((V=malloc((oldsize+1)*sizeof(off_t)))==NULL)) err(1,NULL);
qsufsort(I,V,old,oldsize);
free(V);
/* Allocate newsize+1 bytes instead of newsize bytes to ensure
that we never try to malloc(0) and get a NULL pointer */
if(((fd=open(argv[2],O_RDONLY,0))<0) ||
((newsize=lseek(fd,0,SEEK_END))==-1) ||
((new=malloc(newsize+1))==NULL) ||
(lseek(fd,0,SEEK_SET)!=0) ||
(read(fd,new,newsize)!=newsize) ||
(close(fd)==-1)) err(1,"%s",argv[2]);
if(((db=malloc(newsize+1))==NULL) ||
((eb=malloc(newsize+1))==NULL)) err(1,NULL);
dblen=0;
eblen=0;
/* Create the patch file */
if ((pf = fopen(argv[3], "w")) == NULL)
err(1, "%s", argv[3]);
/* Header is
0 8 "BSDIFF40"
8 8 length of bzip2ed ctrl block
16 8 length of bzip2ed diff block
24 8 length of new file */
/* File is
0 32 Header
32 ?? Bzip2ed ctrl block
?? ?? Bzip2ed diff block
?? ?? Bzip2ed extra block */
memcpy(header,"BSDIFF40",8);
offtout(0, header + 8);
offtout(0, header + 16);
offtout(newsize, header + 24);
if (fwrite(header, 32, 1, pf) != 1)
err(1, "fwrite(%s)", argv[3]);
/* Compute the differences, writing ctrl as we go */
if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL)
errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err);
scan=0;len=0;
lastscan=0;lastpos=0;lastoffset=0;
while(scan<newsize) {
oldscore=0;
for(scsc=scan+=len;scan<newsize;scan++) {
len=search(I,old,oldsize,new+scan,newsize-scan,
0,oldsize,&pos);
if (len > 64 * 1024) break;
for(;scsc<scan+len;scsc++)
if((scsc+lastoffset<oldsize) &&
(old[scsc+lastoffset] == new[scsc]))
oldscore++;
if(((len==oldscore) && (len!=0)) ||
(len>oldscore+8)) break;
if((scan+lastoffset<oldsize) &&
(old[scan+lastoffset] == new[scan]))
oldscore--;
};
if((len!=oldscore) || (scan==newsize)) {
s=0;Sf=0;lenf=0;
for(i=0;(lastscan+i<scan)&&(lastpos+i<oldsize);) {
if(old[lastpos+i]==new[lastscan+i]) s++;
i++;
if(s*2-i>Sf*2-lenf) { Sf=s; lenf=i; };
};
lenb=0;
if(scan<newsize) {
s=0;Sb=0;
for(i=1;(scan>=lastscan+i)&&(pos>=i);i++) {
if(old[pos-i]==new[scan-i]) s++;
if(s*2-i>Sb*2-lenb) { Sb=s; lenb=i; };
};
};
if(lastscan+lenf>scan-lenb) {
overlap=(lastscan+lenf)-(scan-lenb);
s=0;Ss=0;lens=0;
for(i=0;i<overlap;i++) {
if(new[lastscan+lenf-overlap+i]==
old[lastpos+lenf-overlap+i]) s++;
if(new[scan-lenb+i]==
old[pos-lenb+i]) s--;
if(s>Ss) { Ss=s; lens=i+1; };
};
lenf+=lens-overlap;
lenb-=lens;
};
for(i=0;i<lenf;i++)
db[dblen+i]=new[lastscan+i]-old[lastpos+i];
for(i=0;i<(scan-lenb)-(lastscan+lenf);i++)
eb[eblen+i]=new[lastscan+lenf+i];
dblen+=lenf;
eblen+=(scan-lenb)-(lastscan+lenf);
offtout(lenf,buf);
BZ2_bzWrite(&bz2err, pfbz2, buf, 8);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
offtout((scan-lenb)-(lastscan+lenf),buf);
BZ2_bzWrite(&bz2err, pfbz2, buf, 8);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
offtout((pos-lenb)-(lastpos+lenf),buf);
BZ2_bzWrite(&bz2err, pfbz2, buf, 8);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
lastscan=scan-lenb;
lastpos=pos-lenb;
lastoffset=pos-scan;
};
};
BZ2_bzWriteClose(&bz2err, pfbz2, 0, NULL, NULL);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWriteClose, bz2err = %d", bz2err);
/* Compute size of compressed ctrl data */
if ((len = ftello(pf)) == -1)
err(1, "ftello");
offtout(len-32, header + 8);
/* Write compressed diff data */
if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL)
errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err);
BZ2_bzWrite(&bz2err, pfbz2, db, dblen);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
BZ2_bzWriteClose(&bz2err, pfbz2, 0, NULL, NULL);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWriteClose, bz2err = %d", bz2err);
/* Compute size of compressed diff data */
if ((newsize = ftello(pf)) == -1)
err(1, "ftello");
offtout(newsize - len, header + 16);
/* Write compressed extra data */
if ((pfbz2 = BZ2_bzWriteOpen(&bz2err, pf, 9, 0, 0)) == NULL)
errx(1, "BZ2_bzWriteOpen, bz2err = %d", bz2err);
BZ2_bzWrite(&bz2err, pfbz2, eb, eblen);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWrite, bz2err = %d", bz2err);
BZ2_bzWriteClose(&bz2err, pfbz2, 0, NULL, NULL);
if (bz2err != BZ_OK)
errx(1, "BZ2_bzWriteClose, bz2err = %d", bz2err);
/* Seek to the beginning, write the header, and close the file */
if (fseeko(pf, 0, SEEK_SET))
err(1, "fseeko");
if (fwrite(header, 32, 1, pf) != 1)
err(1, "fwrite(%s)", argv[3]);
if (fclose(pf))
err(1, "fclose");
/* Free the memory we used */
free(db);
free(eb);
free(I);
free(old);
free(new);
return 0;
}

View file

@ -1,59 +0,0 @@
.\"-
.\" Copyright 2003-2005 Colin Percival
.\" All rights reserved
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted providing that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
.\" WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
.\" DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.\" $FreeBSD: src/usr.bin/bsdiff/bspatch/bspatch.1,v 1.1 2005/08/06 01:59:06 cperciva Exp $
.\"
.Dd May 18, 2003
.Dt BSPATCH 1
.Os FreeBSD
.Sh NAME
.Nm bspatch
.Nd apply a patch built with bsdiff(1)
.Sh SYNOPSIS
.Nm
.Ao Ar oldfile Ac Ao Ar newfile Ac Ao Ar patchfile Ac
.Sh DESCRIPTION
.Nm
generates
.Ao Ar newfile Ac
from
.Ao Ar oldfile Ac
and
.Ao Ar patchfile Ac
where
.Ao Ar patchfile Ac
is a binary patch built by bsdiff(1).
.Pp
.Nm
uses memory equal to the size of
.Ao Ar oldfile Ac
plus the size of
.Ao Ar newfile Ac ,
but can tolerate a very small working set without a dramatic loss
of performance.
.Sh SEE ALSO
.Xr bsdiff 1
.Sh AUTHORS
.An Colin Percival Aq cperciva@freebsd.org

View file

@ -1,224 +0,0 @@
/*-
* Copyright 2003-2005 Colin Percival
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#if 0
__FBSDID("$FreeBSD: src/usr.bin/bsdiff/bspatch/bspatch.c,v 1.1 2005/08/06 01:59:06 cperciva Exp $");
#endif
#include <bzlib.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <err.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
static off_t offtin(u_char *buf)
{
off_t y;
y=buf[7]&0x7F;
y=y*256;y+=buf[6];
y=y*256;y+=buf[5];
y=y*256;y+=buf[4];
y=y*256;y+=buf[3];
y=y*256;y+=buf[2];
y=y*256;y+=buf[1];
y=y*256;y+=buf[0];
if(buf[7]&0x80) y=-y;
return y;
}
void writeFull(const char * name, int fd,
const unsigned char * buf, size_t count)
{
while (count) {
ssize_t res = write(fd, (char *) buf, count);
if (res == -1) {
if (errno == EINTR) continue;
err(1,"writing to %s",name);
}
count -= res;
buf += res;
}
}
int main(int argc,char * argv[])
{
FILE * f, * cpf, * dpf, * epf;
BZFILE * cpfbz2, * dpfbz2, * epfbz2;
int cbz2err, dbz2err, ebz2err;
int fd;
ssize_t oldsize,newsize;
ssize_t bzctrllen,bzdatalen;
u_char header[32],buf[8];
u_char *old, *new;
off_t oldpos,newpos;
off_t ctrl[3];
off_t lenread;
off_t i;
if(argc!=4) errx(1,"usage: %s oldfile newfile patchfile\n",argv[0]);
/* Open patch file */
if ((f = fopen(argv[3], "r")) == NULL)
err(1, "fopen(%s)", argv[3]);
/*
File format:
0 8 "BSDIFF40"
8 8 X
16 8 Y
24 8 sizeof(newfile)
32 X bzip2(control block)
32+X Y bzip2(diff block)
32+X+Y ??? bzip2(extra block)
with control block a set of triples (x,y,z) meaning "add x bytes
from oldfile to x bytes from the diff block; copy y bytes from the
extra block; seek forwards in oldfile by z bytes".
*/
/* Read header */
if (fread(header, 1, 32, f) < 32) {
if (feof(f))
errx(1, "Corrupt patch\n");
err(1, "fread(%s)", argv[3]);
}
/* Check for appropriate magic */
if (memcmp(header, "BSDIFF40", 8) != 0)
errx(1, "Corrupt patch\n");
/* Read lengths from header */
bzctrllen=offtin(header+8);
bzdatalen=offtin(header+16);
newsize=offtin(header+24);
if((bzctrllen<0) || (bzdatalen<0) || (newsize<0))
errx(1,"Corrupt patch\n");
/* Close patch file and re-open it via libbzip2 at the right places */
if (fclose(f))
err(1, "fclose(%s)", argv[3]);
if ((cpf = fopen(argv[3], "r")) == NULL)
err(1, "fopen(%s)", argv[3]);
if (fseeko(cpf, 32, SEEK_SET))
err(1, "fseeko(%s, %lld)", argv[3],
(long long)32);
if ((cpfbz2 = BZ2_bzReadOpen(&cbz2err, cpf, 0, 0, NULL, 0)) == NULL)
errx(1, "BZ2_bzReadOpen, bz2err = %d", cbz2err);
if ((dpf = fopen(argv[3], "r")) == NULL)
err(1, "fopen(%s)", argv[3]);
if (fseeko(dpf, 32 + bzctrllen, SEEK_SET))
err(1, "fseeko(%s, %lld)", argv[3],
(long long)(32 + bzctrllen));
if ((dpfbz2 = BZ2_bzReadOpen(&dbz2err, dpf, 0, 0, NULL, 0)) == NULL)
errx(1, "BZ2_bzReadOpen, bz2err = %d", dbz2err);
if ((epf = fopen(argv[3], "r")) == NULL)
err(1, "fopen(%s)", argv[3]);
if (fseeko(epf, 32 + bzctrllen + bzdatalen, SEEK_SET))
err(1, "fseeko(%s, %lld)", argv[3],
(long long)(32 + bzctrllen + bzdatalen));
if ((epfbz2 = BZ2_bzReadOpen(&ebz2err, epf, 0, 0, NULL, 0)) == NULL)
errx(1, "BZ2_bzReadOpen, bz2err = %d", ebz2err);
if(((fd=open(argv[1],O_RDONLY,0))<0) ||
((oldsize=lseek(fd,0,SEEK_END))==-1) ||
((old=malloc(oldsize+1))==NULL) ||
(lseek(fd,0,SEEK_SET)!=0) ||
(read(fd,old,oldsize)!=oldsize) ||
(close(fd)==-1)) err(1,"%s",argv[1]);
if((new=malloc(newsize+1))==NULL) err(1,NULL);
oldpos=0;newpos=0;
while(newpos<newsize) {
/* Read control data */
for(i=0;i<=2;i++) {
lenread = BZ2_bzRead(&cbz2err, cpfbz2, buf, 8);
if ((lenread < 8) || ((cbz2err != BZ_OK) &&
(cbz2err != BZ_STREAM_END)))
errx(1, "Corrupt patch\n");
ctrl[i]=offtin(buf);
};
/* Sanity-check */
if(newpos+ctrl[0]>newsize)
errx(1,"Corrupt patch\n");
/* Read diff string */
lenread = BZ2_bzRead(&dbz2err, dpfbz2, new + newpos, ctrl[0]);
if ((lenread < ctrl[0]) ||
((dbz2err != BZ_OK) && (dbz2err != BZ_STREAM_END)))
errx(1, "Corrupt patch\n");
/* Add old data to diff string */
for(i=0;i<ctrl[0];i++)
if((oldpos+i>=0) && (oldpos+i<oldsize))
new[newpos+i]+=old[oldpos+i];
/* Adjust pointers */
newpos+=ctrl[0];
oldpos+=ctrl[0];
/* Sanity-check */
if(newpos+ctrl[1]>newsize)
errx(1,"Corrupt patch\n");
/* Read extra string */
lenread = BZ2_bzRead(&ebz2err, epfbz2, new + newpos, ctrl[1]);
if ((lenread < ctrl[1]) ||
((ebz2err != BZ_OK) && (ebz2err != BZ_STREAM_END)))
errx(1, "Corrupt patch\n");
/* Adjust pointers */
newpos+=ctrl[1];
oldpos+=ctrl[2];
};
/* Clean up the bzip2 reads */
BZ2_bzReadClose(&cbz2err, cpfbz2);
BZ2_bzReadClose(&dbz2err, dpfbz2);
BZ2_bzReadClose(&ebz2err, epfbz2);
if (fclose(cpf) || fclose(dpf) || fclose(epf))
err(1, "fclose(%s)", argv[3]);
/* Write the new file */
if((fd=open(argv[2],O_CREAT|O_TRUNC|O_WRONLY,0666))<0)
err(1,"%s",argv[2]);
writeFull(argv[2], fd, new, newsize);
if(close(fd)==-1)
err(1,"%s",argv[2]);
free(new);
free(old);
return 0;
}

View file

@ -1,12 +0,0 @@
/* Simulate BSD's <err.h> functionality. */
#ifndef COMPAT_ERR_H_INCLUDED
#define COMPAT_ERR_H_INCLUDED 1
#include <stdio.h>
#include <stdlib.h>
#define err(rc,...) do { fprintf(stderr,__VA_ARGS__); exit(rc); } while(0)
#define errx(rc,...) do { fprintf(stderr,__VA_ARGS__); exit(rc); } while(0)
#endif

View file

@ -1,11 +0,0 @@
programs += bsdiff bspatch
bsdiff_DIR := $(d)
bsdiff_SOURCES := $(d)/bsdiff.c
bsdiff_LDFLAGS = -lbz2 $(bsddiff_compat_include)
bsdiff_INSTALL_DIR = $(libexecdir)/nix
bspatch_DIR := $(d)
bspatch_SOURCES := $(d)/bspatch.c
bspatch_LDFLAGS = -lbz2 $(bsddiff_compat_include)
bspatch_INSTALL_DIR = $(libexecdir)/nix

View file

@ -194,7 +194,6 @@ void Settings::update()
if (getEnv("NIX_OTHER_STORES") != "") if (getEnv("NIX_OTHER_STORES") != "")
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl"); substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
#endif #endif
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl"); substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl");
if (useSshSubstituter && !sshSubstituterHosts.empty()) if (useSshSubstituter && !sshSubstituterHosts.empty())
substituters.push_back(nixLibexecDir + "/nix/substituters/download-via-ssh"); substituters.push_back(nixLibexecDir + "/nix/substituters/download-via-ssh");

View file

@ -1,7 +1,6 @@
source common.sh source common.sh
clearStore clearStore
clearManifests
clearCache clearCache
# Create the binary cache. # Create the binary cache.

View file

@ -1,18 +0,0 @@
{ version }:
with import ./config.nix;
mkDerivation {
name = "foo-${toString version}";
builder = builtins.toFile "builder.sh"
''
mkdir $out
(for ((n = 1; n < 100000; n++)); do echo $n; done) > $out/foo
${if version != 1 then ''
(for ((n = 100000; n < 110000; n++)); do echo $n; done) >> $out/foo
'' else ""}
${if version == 3 then ''
echo foobar >> $out/foo
'' else ""}
'';
}

View file

@ -1,61 +0,0 @@
source common.sh
clearManifests
mkdir -p $TEST_ROOT/cache2 $TEST_ROOT/patches
RESULT=$TEST_ROOT/result
# Build version 1 and 2 of the "foo" package.
nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 \
$(nix-build -o $RESULT binary-patching.nix --arg version 1)
mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest1
out2=$(nix-build -o $RESULT binary-patching.nix --arg version 2)
nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out2
mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest2
out3=$(nix-build -o $RESULT binary-patching.nix --arg version 3)
nix-push --dest $TEST_ROOT/cache2 --manifest --bzip2 $out3
mv $TEST_ROOT/cache2/MANIFEST $TEST_ROOT/manifest3
rm $RESULT
# Generate binary patches.
nix-generate-patches $TEST_ROOT/cache2 $TEST_ROOT/patches \
file://$TEST_ROOT/patches $TEST_ROOT/manifest1 $TEST_ROOT/manifest2
nix-generate-patches $TEST_ROOT/cache2 $TEST_ROOT/patches \
file://$TEST_ROOT/patches $TEST_ROOT/manifest2 $TEST_ROOT/manifest3
grep -q "patch {" $TEST_ROOT/manifest3
# Get rid of versions 2 and 3.
nix-store --delete $out2 $out3
# Pull the manifest containing the patches.
clearManifests
nix-pull file://$TEST_ROOT/manifest3
# Make sure that the download size prediction uses the patches rather
# than the full download.
nix-build -o $RESULT binary-patching.nix --arg version 3 --dry-run 2>&1 | grep -q "0.01 MiB"
# Now rebuild it. This should use the two patches generated above.
rm -f $TEST_ROOT/var/log/nix/downloads
nix-build -o $RESULT binary-patching.nix --arg version 3
rm $RESULT
[ "$(grep ' patch ' $TEST_ROOT/var/log/nix/downloads | wc -l)" -eq 2 ]
# Add a patch from version 1 directly to version 3.
nix-generate-patches $TEST_ROOT/cache2 $TEST_ROOT/patches \
file://$TEST_ROOT/patches $TEST_ROOT/manifest1 $TEST_ROOT/manifest3
# Rebuild version 3. This should use the direct patch rather than the
# sequence of two patches.
nix-store --delete $out2 $out3
clearManifests
rm $TEST_ROOT/var/log/nix/downloads
nix-pull file://$TEST_ROOT/manifest3
nix-build -o $RESULT binary-patching.nix --arg version 3
[ "$(grep ' patch ' $TEST_ROOT/var/log/nix/downloads | wc -l)" -eq 1 ]

View file

@ -54,10 +54,6 @@ clearStore() {
rm -f "$NIX_STATE_DIR"/gcroots/ref rm -f "$NIX_STATE_DIR"/gcroots/ref
} }
clearManifests() {
rm -f $NIX_STATE_DIR/manifests/*
}
clearCache() { clearCache() {
rm -rf "$cacheDir" rm -rf "$cacheDir"
} }

View file

@ -1,15 +1,14 @@
source common.sh source common.sh
# Note: this test expects to be run *after* nix-push.sh.
drvPath=$(nix-instantiate ./dependencies.nix) drvPath=$(nix-instantiate ./dependencies.nix)
outPath=$(nix-store -q $drvPath) outPath=$(nix-store -r $drvPath)
nix-push --dest $cacheDir $outPath
clearStore clearStore
clearProfiles clearProfiles
cat > $TEST_ROOT/foo.nixpkg <<EOF cat > $TEST_ROOT/foo.nixpkg <<EOF
NIXPKG1 file://$TEST_ROOT/cache/MANIFEST simple $system $drvPath $outPath NIXPKG1 - simple $system $drvPath $outPath file://$cacheDir
EOF EOF
nix-install-package --non-interactive -p $profiles/test $TEST_ROOT/foo.nixpkg nix-install-package --non-interactive -p $profiles/test $TEST_ROOT/foo.nixpkg

View file

@ -4,11 +4,11 @@ check:
nix_tests = \ nix_tests = \
init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \ init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
build-hook.sh substitutes.sh substitutes2.sh \ build-hook.sh substitutes.sh substitutes2.sh \
fallback.sh nix-push.sh gc.sh gc-concurrent.sh nix-pull.sh \ fallback.sh nix-push.sh gc.sh gc-concurrent.sh \
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \ referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \ gc-runtime.sh install-package.sh check-refs.sh filter-source.sh \
remote-store.sh export.sh export-graph.sh \ remote-store.sh export.sh export-graph.sh \
binary-patching.sh timeout.sh secure-drv-outputs.sh nix-channel.sh \ timeout.sh secure-drv-outputs.sh nix-channel.sh \
multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \ multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \ binary-cache.sh nix-profile.sh repair.sh dump-db.sh case-hack.sh \
check-reqs.sh pass-as-file.sh tarball.sh check-reqs.sh pass-as-file.sh tarball.sh

View file

@ -1,7 +1,6 @@
source common.sh source common.sh
clearProfiles clearProfiles
clearManifests
rm -f $TEST_ROOT/.nix-channels rm -f $TEST_ROOT/.nix-channels
@ -45,7 +44,6 @@ nix-env -i dependencies
clearProfiles clearProfiles
clearManifests
rm -f $TEST_ROOT/.nix-channels rm -f $TEST_ROOT/.nix-channels
# Test updating from a tarball # Test updating from a tarball

View file

@ -1,33 +0,0 @@
source common.sh
pullCache () {
echo "pulling cache..."
nix-pull file://$TEST_ROOT/cache/MANIFEST
}
clearStore
clearManifests
pullCache
drvPath=$(nix-instantiate dependencies.nix)
outPath=$(nix-store -q $drvPath)
echo "building $outPath using substitutes..."
nix-store -r $outPath
cat $outPath/input-2/bar
clearStore
clearManifests
pullCache
echo "building $drvPath using substitutes..."
nix-store -r $drvPath
cat $outPath/input-2/bar
# Check that the derivers are set properly.
test $(nix-store -q --deriver "$outPath") = "$drvPath"
nix-store -q --deriver $(readLink $outPath/input-2) | grep -q -- "-input-2.drv"
clearManifests

View file

@ -1,7 +1,6 @@
source common.sh source common.sh
clearStore clearStore
clearManifests
startDaemon startDaemon

View file

@ -5,7 +5,6 @@
source common.sh source common.sh
clearStore clearStore
clearManifests
startDaemon startDaemon