2007-03-30 16:24:35 +03:00
|
|
|
#include "config.h"
|
2006-11-30 19:43:04 +02:00
|
|
|
#include "local-store.hh"
|
2006-09-05 00:06:23 +03:00
|
|
|
#include "globals.hh"
|
|
|
|
#include "archive.hh"
|
|
|
|
#include "pathlocks.hh"
|
|
|
|
#include "aterm.hh"
|
|
|
|
#include "derivations-ast.hh"
|
2007-02-21 17:45:32 +02:00
|
|
|
#include "worker-protocol.hh"
|
2006-09-05 00:06:23 +03:00
|
|
|
|
2003-06-23 16:27:59 +03:00
|
|
|
#include <iostream>
|
2003-12-22 18:40:46 +02:00
|
|
|
#include <algorithm>
|
2003-06-23 16:27:59 +03:00
|
|
|
|
2005-01-19 18:39:47 +02:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
2003-10-15 15:42:39 +03:00
|
|
|
#include <unistd.h>
|
2005-01-19 18:39:47 +02:00
|
|
|
#include <utime.h>
|
2008-06-09 16:52:45 +03:00
|
|
|
#include <fcntl.h>
|
2008-07-18 18:34:46 +03:00
|
|
|
#include <errno.h>
|
2003-06-23 16:27:59 +03:00
|
|
|
|
2006-11-30 20:35:36 +02:00
|
|
|
|
2006-09-05 00:06:23 +03:00
|
|
|
namespace nix {
|
2003-06-23 16:27:59 +03:00
|
|
|
|
2006-09-05 00:06:23 +03:00
|
|
|
|
2006-03-11 00:27:26 +02:00
|
|
|
void checkStoreNotSymlink()
|
|
|
|
{
|
|
|
|
if (getEnv("NIX_IGNORE_SYMLINK_STORE") == "1") return;
|
|
|
|
Path path = nixStore;
|
|
|
|
struct stat st;
|
|
|
|
while (path != "/") {
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting status of `%1%'") % path);
|
|
|
|
if (S_ISLNK(st.st_mode))
|
|
|
|
throw Error(format(
|
|
|
|
"the path `%1%' is a symlink; "
|
|
|
|
"this is not allowed for the Nix store and its parent directories")
|
|
|
|
% path);
|
|
|
|
path = dirOf(path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
LocalStore::LocalStore()
|
2003-10-15 15:42:39 +03:00
|
|
|
{
|
2007-08-12 03:29:28 +03:00
|
|
|
substitutablePathsLoaded = false;
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
schemaPath = nixDBPath + "/schema";
|
|
|
|
|
2004-10-25 17:38:23 +03:00
|
|
|
if (readOnlyMode) return;
|
2005-02-09 11:50:29 +02:00
|
|
|
|
2009-03-27 16:19:04 +02:00
|
|
|
createDirs(nixStore);
|
|
|
|
|
2006-03-11 00:27:26 +02:00
|
|
|
checkStoreNotSymlink();
|
|
|
|
|
2008-07-18 18:34:46 +03:00
|
|
|
try {
|
|
|
|
Path globalLockPath = nixDBPath + "/big-lock";
|
|
|
|
globalLock = openLockFile(globalLockPath.c_str(), true);
|
|
|
|
} catch (SysError & e) {
|
|
|
|
if (e.errNo != EACCES) throw;
|
|
|
|
readOnlyMode = true;
|
|
|
|
return;
|
|
|
|
}
|
2008-06-09 16:52:45 +03:00
|
|
|
|
|
|
|
if (!lockFile(globalLock, ltRead, false)) {
|
|
|
|
printMsg(lvlError, "waiting for the big Nix store lock...");
|
|
|
|
lockFile(globalLock, ltRead, true);
|
2006-02-16 15:19:15 +02:00
|
|
|
}
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
createDirs(nixDBPath + "/info");
|
|
|
|
createDirs(nixDBPath + "/referrer");
|
2009-03-25 23:05:42 +02:00
|
|
|
createDirs(nixDBPath + "/failed");
|
2005-02-09 11:50:29 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
int curSchema = getSchema();
|
2005-02-09 11:50:29 +02:00
|
|
|
if (curSchema > nixSchemaVersion)
|
|
|
|
throw Error(format("current Nix store schema is version %1%, but I only support %2%")
|
|
|
|
% curSchema % nixSchemaVersion);
|
2008-06-09 16:52:45 +03:00
|
|
|
if (curSchema == 0) { /* new store */
|
|
|
|
curSchema = nixSchemaVersion;
|
|
|
|
writeFile(schemaPath, (format("%1%") % nixSchemaVersion).str());
|
2005-02-09 11:50:29 +02:00
|
|
|
}
|
2008-06-09 16:52:45 +03:00
|
|
|
if (curSchema == 1) throw Error("your Nix store is no longer supported");
|
|
|
|
if (curSchema < nixSchemaVersion) upgradeStore12();
|
2003-10-15 15:42:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-30 19:43:04 +02:00
|
|
|
LocalStore::~LocalStore()
|
2006-03-01 18:36:35 +02:00
|
|
|
{
|
2007-05-01 18:16:17 +03:00
|
|
|
try {
|
2008-06-09 16:52:45 +03:00
|
|
|
flushDelayedUpdates();
|
2008-08-02 15:54:35 +03:00
|
|
|
|
|
|
|
foreach (RunningSubstituters::iterator, i, runningSubstituters) {
|
2009-03-28 21:41:53 +02:00
|
|
|
i->second.to.close();
|
|
|
|
i->second.from.close();
|
2008-08-02 15:54:35 +03:00
|
|
|
i->second.pid.wait(true);
|
|
|
|
}
|
|
|
|
|
2007-05-01 18:16:17 +03:00
|
|
|
} catch (...) {
|
|
|
|
ignoreException();
|
|
|
|
}
|
2006-03-01 18:36:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
int LocalStore::getSchema()
|
2003-10-15 15:42:39 +03:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
int curSchema = 0;
|
|
|
|
if (pathExists(schemaPath)) {
|
|
|
|
string s = readFile(schemaPath);
|
|
|
|
if (!string2Int(s, curSchema))
|
|
|
|
throw Error(format("`%1%' is corrupt") % schemaPath);
|
|
|
|
}
|
|
|
|
return curSchema;
|
2003-10-15 15:42:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
void canonicalisePathMetaData(const Path & path, bool recurse)
|
2005-01-19 18:39:47 +02:00
|
|
|
{
|
|
|
|
checkInterrupt();
|
|
|
|
|
|
|
|
struct stat st;
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Change ownership to the current uid. If it's a symlink, use
|
2006-12-09 22:02:27 +02:00
|
|
|
lchown if available, otherwise don't bother. Wrong ownership
|
|
|
|
of a symlink doesn't matter, since the owning user can't change
|
|
|
|
the symlink and can't delete it because the directory is not
|
|
|
|
writable. The only exception is top-level paths in the Nix
|
|
|
|
store (since that directory is group-writable for the Nix build
|
|
|
|
users group); we check for this case below. */
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
#if HAVE_LCHOWN
|
2007-02-06 22:03:53 +02:00
|
|
|
if (lchown(path.c_str(), geteuid(), (gid_t) -1) == -1)
|
2006-12-09 22:02:27 +02:00
|
|
|
#else
|
|
|
|
if (!S_ISLNK(st.st_mode) &&
|
2007-02-06 22:03:53 +02:00
|
|
|
chown(path.c_str(), geteuid(), (gid_t) -1) == -1)
|
2006-12-09 22:02:27 +02:00
|
|
|
#endif
|
|
|
|
throw SysError(format("changing owner of `%1%' to %2%")
|
|
|
|
% path % geteuid());
|
|
|
|
}
|
|
|
|
|
2005-01-19 18:39:47 +02:00
|
|
|
if (!S_ISLNK(st.st_mode)) {
|
|
|
|
|
|
|
|
/* Mask out all type related bits. */
|
|
|
|
mode_t mode = st.st_mode & ~S_IFMT;
|
|
|
|
|
|
|
|
if (mode != 0444 && mode != 0555) {
|
|
|
|
mode = (st.st_mode & S_IFMT)
|
|
|
|
| 0444
|
|
|
|
| (st.st_mode & S_IXUSR ? 0111 : 0);
|
|
|
|
if (chmod(path.c_str(), mode) == -1)
|
|
|
|
throw SysError(format("changing mode of `%1%' to %2$o") % path % mode);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (st.st_mtime != 0) {
|
|
|
|
struct utimbuf utimbuf;
|
|
|
|
utimbuf.actime = st.st_atime;
|
|
|
|
utimbuf.modtime = 0;
|
|
|
|
if (utime(path.c_str(), &utimbuf) == -1)
|
|
|
|
throw SysError(format("changing modification time of `%1%'") % path);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2007-10-10 01:14:27 +03:00
|
|
|
if (recurse && S_ISDIR(st.st_mode)) {
|
2005-01-19 18:39:47 +02:00
|
|
|
Strings names = readDirectory(path);
|
|
|
|
for (Strings::iterator i = names.begin(); i != names.end(); ++i)
|
2008-06-09 16:52:45 +03:00
|
|
|
canonicalisePathMetaData(path + "/" + *i, true);
|
2006-12-09 22:02:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void canonicalisePathMetaData(const Path & path)
|
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
canonicalisePathMetaData(path, true);
|
2006-12-09 22:02:27 +02:00
|
|
|
|
|
|
|
/* On platforms that don't have lchown(), the top-level path can't
|
|
|
|
be a symlink, since we can't change its ownership. */
|
|
|
|
struct stat st;
|
|
|
|
if (lstat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
|
|
|
|
|
|
|
if (st.st_uid != geteuid()) {
|
|
|
|
assert(S_ISLNK(st.st_mode));
|
|
|
|
throw Error(format("wrong ownership of top-level store path `%1%'") % path);
|
2005-01-19 18:39:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
static Path infoFileFor(const Path & path)
|
2003-12-05 13:05:19 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
string baseName = baseNameOf(path);
|
|
|
|
return (format("%1%/info/%2%") % nixDBPath % baseName).str();
|
2003-12-05 13:05:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
static Path referrersFileFor(const Path & path)
|
2003-12-05 13:05:19 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
string baseName = baseNameOf(path);
|
|
|
|
return (format("%1%/referrer/%2%") % nixDBPath % baseName).str();
|
2005-01-25 23:28:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-25 23:05:42 +02:00
|
|
|
static Path failedFileFor(const Path & path)
|
|
|
|
{
|
|
|
|
string baseName = baseNameOf(path);
|
|
|
|
return (format("%1%/failed/%2%") % nixDBPath % baseName).str();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
static Path tmpFileForAtomicUpdate(const Path & path)
|
2008-01-29 20:17:36 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
return (format("%1%/.%2%.%3%") % dirOf(path) % getpid() % baseNameOf(path)).str();
|
2008-01-29 20:17:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
static void appendReferrer(const Path & from, const Path & to, bool lock)
|
2005-12-12 20:24:42 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
Path referrersFile = referrersFileFor(from);
|
|
|
|
|
|
|
|
PathLocks referrersLock;
|
|
|
|
if (lock) {
|
|
|
|
referrersLock.lockPaths(singleton<PathSet, Path>(referrersFile));
|
|
|
|
referrersLock.setDeletion(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
AutoCloseFD fd = open(referrersFile.c_str(), O_WRONLY | O_APPEND | O_CREAT, 0666);
|
|
|
|
if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile);
|
|
|
|
|
|
|
|
string s = " " + to;
|
|
|
|
writeFull(fd, (const unsigned char *) s.c_str(), s.size());
|
2005-12-12 20:24:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Atomically update the referrers file. If `purge' is true, the set
|
|
|
|
of referrers is set to `referrers'. Otherwise, the current set of
|
|
|
|
referrers is purged of invalid paths. */
|
|
|
|
void LocalStore::rewriteReferrers(const Path & path, bool purge, PathSet referrers)
|
2005-12-12 20:24:42 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
Path referrersFile = referrersFileFor(path);
|
|
|
|
|
|
|
|
PathLocks referrersLock(singleton<PathSet, Path>(referrersFile));
|
|
|
|
referrersLock.setDeletion(true);
|
|
|
|
|
|
|
|
if (purge)
|
|
|
|
/* queryReferrers() purges invalid paths, so that's all we
|
|
|
|
need. */
|
|
|
|
queryReferrers(path, referrers);
|
|
|
|
|
|
|
|
Path tmpFile = tmpFileForAtomicUpdate(referrersFile);
|
|
|
|
|
|
|
|
AutoCloseFD fd = open(tmpFile.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 0666);
|
|
|
|
if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile);
|
|
|
|
|
|
|
|
string s;
|
|
|
|
foreach (PathSet::const_iterator, i, referrers) {
|
|
|
|
s += " "; s += *i;
|
|
|
|
}
|
|
|
|
|
|
|
|
writeFull(fd, (const unsigned char *) s.c_str(), s.size());
|
|
|
|
|
|
|
|
fd.close(); /* for Windows; can't rename open file */
|
|
|
|
|
|
|
|
if (rename(tmpFile.c_str(), referrersFile.c_str()) == -1)
|
|
|
|
throw SysError(format("cannot rename `%1%' to `%2%'") % tmpFile % referrersFile);
|
2005-12-12 20:24:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
void LocalStore::flushDelayedUpdates()
|
2005-01-27 18:18:39 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
foreach (PathSet::iterator, i, delayedUpdates) {
|
|
|
|
rewriteReferrers(*i, true, PathSet());
|
|
|
|
}
|
|
|
|
delayedUpdates.clear();
|
2005-01-27 18:18:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
void LocalStore::registerValidPath(const Path & path,
|
|
|
|
const Hash & hash, const PathSet & references,
|
|
|
|
const Path & deriver)
|
|
|
|
{
|
|
|
|
ValidPathInfo info;
|
|
|
|
info.path = path;
|
|
|
|
info.hash = hash;
|
|
|
|
info.references = references;
|
|
|
|
info.deriver = deriver;
|
|
|
|
registerValidPath(info);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LocalStore::registerValidPath(const ValidPathInfo & info, bool ignoreValidity)
|
2003-10-10 17:46:28 +03:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
Path infoFile = infoFileFor(info.path);
|
2005-01-27 18:18:39 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
ValidPathInfo oldInfo;
|
|
|
|
if (pathExists(infoFile)) oldInfo = queryPathInfo(info.path);
|
2005-03-03 15:10:44 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Note that it's possible for infoFile to already exist. */
|
|
|
|
|
|
|
|
/* Acquire a lock on each referrer file. This prevents those
|
|
|
|
paths from being invalidated. (It would be a violation of the
|
|
|
|
store invariants if we registered info.path as valid while some
|
|
|
|
of its references are invalid.) NB: there can be no deadlock
|
|
|
|
here since we're acquiring the locks in sorted order. */
|
|
|
|
PathSet lockNames;
|
|
|
|
foreach (PathSet::const_iterator, i, info.references)
|
|
|
|
if (*i != info.path) lockNames.insert(referrersFileFor(*i));
|
|
|
|
PathLocks referrerLocks(lockNames);
|
|
|
|
referrerLocks.setDeletion(true);
|
|
|
|
|
|
|
|
string refs;
|
|
|
|
foreach (PathSet::const_iterator, i, info.references) {
|
|
|
|
if (!refs.empty()) refs += " ";
|
|
|
|
refs += *i;
|
|
|
|
|
|
|
|
if (!ignoreValidity && *i != info.path && !isValidPath(*i))
|
|
|
|
throw Error(format("cannot register `%1%' as valid, because its reference `%2%' isn't valid")
|
|
|
|
% info.path % *i);
|
|
|
|
|
|
|
|
/* Update the referrer mapping for *i. This must be done
|
|
|
|
before the info file is written to maintain the invariant
|
|
|
|
that if `path' is a valid path, then all its references
|
|
|
|
have referrer mappings back to `path'. A " " is prefixed
|
|
|
|
to separate it from the previous entry. It's not suffixed
|
|
|
|
to deal with interrupted partial writes to this file. */
|
|
|
|
if (oldInfo.references.find(*i) == oldInfo.references.end())
|
|
|
|
appendReferrer(*i, info.path, false);
|
|
|
|
}
|
|
|
|
|
2008-12-03 17:51:17 +02:00
|
|
|
assert(info.hash.type == htSHA256);
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
string s = (format(
|
|
|
|
"Hash: sha256:%1%\n"
|
|
|
|
"References: %2%\n"
|
|
|
|
"Deriver: %3%\n"
|
|
|
|
"Registered-At: %4%\n")
|
|
|
|
% printHash(info.hash) % refs % info.deriver %
|
|
|
|
(oldInfo.registrationTime ? oldInfo.registrationTime : time(0))).str();
|
|
|
|
|
|
|
|
/* Atomically rewrite the info file. */
|
|
|
|
Path tmpFile = tmpFileForAtomicUpdate(infoFile);
|
|
|
|
writeFile(tmpFile, s);
|
|
|
|
if (rename(tmpFile.c_str(), infoFile.c_str()) == -1)
|
|
|
|
throw SysError(format("cannot rename `%1%' to `%2%'") % tmpFile % infoFile);
|
|
|
|
|
|
|
|
pathInfoCache[info.path] = info;
|
2003-10-15 15:42:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-25 23:05:42 +02:00
|
|
|
void LocalStore::registerFailedPath(const Path & path)
|
|
|
|
{
|
|
|
|
/* Write an empty file in the .../failed directory to denote the
|
|
|
|
failure of the builder for `path'. */
|
|
|
|
writeFile(failedFileFor(path), "");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool LocalStore::hasPathFailed(const Path & path)
|
|
|
|
{
|
|
|
|
return pathExists(failedFileFor(path));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
Hash parseHashField(const Path & path, const string & s)
|
2003-10-10 18:25:21 +03:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
string::size_type colon = s.find(':');
|
|
|
|
if (colon == string::npos)
|
|
|
|
throw Error(format("corrupt hash `%1%' in valid-path entry for `%2%'")
|
|
|
|
% s % path);
|
|
|
|
HashType ht = parseHashType(string(s, 0, colon));
|
|
|
|
if (ht == htUnknown)
|
|
|
|
throw Error(format("unknown hash type `%1%' in valid-path entry for `%2%'")
|
|
|
|
% string(s, 0, colon) % path);
|
|
|
|
return parseHash(ht, string(s, colon + 1));
|
2003-10-10 18:25:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-16 15:28:18 +02:00
|
|
|
ValidPathInfo LocalStore::queryPathInfo(const Path & path, bool ignoreErrors)
|
2006-11-30 19:43:04 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
ValidPathInfo res;
|
|
|
|
res.path = path;
|
|
|
|
|
|
|
|
assertStorePath(path);
|
|
|
|
|
|
|
|
std::map<Path, ValidPathInfo>::iterator lookup = pathInfoCache.find(path);
|
|
|
|
if (lookup != pathInfoCache.end()) return lookup->second;
|
|
|
|
|
|
|
|
/* Read the info file. */
|
|
|
|
Path infoFile = infoFileFor(path);
|
|
|
|
if (!pathExists(infoFile))
|
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
string info = readFile(infoFile);
|
|
|
|
|
|
|
|
/* Parse it. */
|
|
|
|
Strings lines = tokenizeString(info, "\n");
|
|
|
|
|
|
|
|
for (Strings::iterator i = lines.begin(); i != lines.end(); ++i) {
|
2009-04-16 15:03:17 +03:00
|
|
|
string::size_type p = i->find(':');
|
2008-06-09 16:52:45 +03:00
|
|
|
if (p == string::npos) continue; /* bad line */
|
|
|
|
string name(*i, 0, p);
|
|
|
|
string value(*i, p + 2);
|
|
|
|
if (name == "References") {
|
|
|
|
Strings refs = tokenizeString(value, " ");
|
|
|
|
res.references = PathSet(refs.begin(), refs.end());
|
|
|
|
} else if (name == "Deriver") {
|
|
|
|
res.deriver = value;
|
|
|
|
} else if (name == "Hash") {
|
2008-12-16 15:28:18 +02:00
|
|
|
try {
|
|
|
|
res.hash = parseHashField(path, value);
|
|
|
|
} catch (Error & e) {
|
|
|
|
if (!ignoreErrors) throw;
|
|
|
|
printMsg(lvlError, format("cannot parse hash field in `%1%': %2%") % infoFile % e.msg());
|
|
|
|
}
|
2008-06-09 16:52:45 +03:00
|
|
|
} else if (name == "Registered-At") {
|
|
|
|
int n = 0;
|
|
|
|
string2Int(value, n);
|
|
|
|
res.registrationTime = n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return pathInfoCache[path] = res;
|
2006-11-30 19:43:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
bool LocalStore::isValidPath(const Path & path)
|
2005-01-19 18:59:56 +02:00
|
|
|
{
|
2008-11-19 18:27:07 +02:00
|
|
|
/* Files in the info directory starting with a `.' are temporary
|
|
|
|
files. */
|
|
|
|
if (baseNameOf(path).at(0) == '.') return false;
|
2008-06-09 16:52:45 +03:00
|
|
|
return pathExists(infoFileFor(path));
|
2005-01-19 18:59:56 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
PathSet LocalStore::queryValidPaths()
|
2006-11-30 19:43:04 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
PathSet paths;
|
|
|
|
Strings entries = readDirectory(nixDBPath + "/info");
|
2008-11-19 18:27:07 +02:00
|
|
|
for (Strings::iterator i = entries.begin(); i != entries.end(); ++i)
|
|
|
|
if (i->at(0) != '.') paths.insert(nixStore + "/" + *i);
|
2008-06-09 16:52:45 +03:00
|
|
|
return paths;
|
2006-11-30 19:43:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
void LocalStore::queryReferences(const Path & path,
|
|
|
|
PathSet & references)
|
2005-02-07 15:40:40 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
ValidPathInfo info = queryPathInfo(path);
|
|
|
|
references.insert(info.references.begin(), info.references.end());
|
2005-02-07 15:40:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
bool LocalStore::queryReferrersInternal(const Path & path, PathSet & referrers)
|
2005-02-07 15:40:40 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
bool allValid = true;
|
|
|
|
|
|
|
|
if (!isValidPath(path))
|
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
|
|
|
|
/* No locking is necessary here: updates are only done by
|
|
|
|
appending or by atomically replacing the file. When appending,
|
|
|
|
there is a possibility that we see a partial entry, but it will
|
|
|
|
just be filtered out below (the partially written path will not
|
|
|
|
be valid, so it will be ignored). */
|
|
|
|
|
|
|
|
Path referrersFile = referrersFileFor(path);
|
|
|
|
if (!pathExists(referrersFile)) return true;
|
|
|
|
|
|
|
|
AutoCloseFD fd = open(referrersFile.c_str(), O_RDONLY);
|
|
|
|
if (fd == -1) throw SysError(format("opening file `%1%'") % referrersFile);
|
|
|
|
|
|
|
|
Paths refs = tokenizeString(readFile(fd), " ");
|
|
|
|
|
|
|
|
for (Paths::iterator i = refs.begin(); i != refs.end(); ++i)
|
|
|
|
/* Referrers can be invalid (see registerValidPath() for the
|
|
|
|
invariant), so we only return one if it is valid. */
|
|
|
|
if (isStorePath(*i) && isValidPath(*i)) referrers.insert(*i); else allValid = false;
|
|
|
|
|
|
|
|
return allValid;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LocalStore::queryReferrers(const Path & path, PathSet & referrers)
|
|
|
|
{
|
|
|
|
queryReferrersInternal(path, referrers);
|
2005-02-07 15:40:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-12 19:53:44 +03:00
|
|
|
Path LocalStore::queryDeriver(const Path & path)
|
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
return queryPathInfo(path).deriver;
|
2007-06-12 19:53:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-02 15:54:35 +03:00
|
|
|
void LocalStore::startSubstituter(const Path & substituter, RunningSubstituter & run)
|
2003-07-10 18:11:48 +03:00
|
|
|
{
|
2008-08-02 15:54:35 +03:00
|
|
|
if (run.pid != -1) return;
|
|
|
|
|
|
|
|
debug(format("starting substituter program `%1%'") % substituter);
|
|
|
|
|
|
|
|
Pipe toPipe, fromPipe;
|
|
|
|
|
|
|
|
toPipe.create();
|
|
|
|
fromPipe.create();
|
|
|
|
|
|
|
|
run.pid = fork();
|
|
|
|
|
|
|
|
switch (run.pid) {
|
|
|
|
|
|
|
|
case -1:
|
|
|
|
throw SysError("unable to fork");
|
|
|
|
|
|
|
|
case 0: /* child */
|
|
|
|
try {
|
|
|
|
fromPipe.readSide.close();
|
|
|
|
toPipe.writeSide.close();
|
|
|
|
if (dup2(toPipe.readSide, STDIN_FILENO) == -1)
|
|
|
|
throw SysError("dupping stdin");
|
|
|
|
if (dup2(fromPipe.writeSide, STDOUT_FILENO) == -1)
|
|
|
|
throw SysError("dupping stdout");
|
|
|
|
closeMostFDs(set<int>());
|
|
|
|
execl(substituter.c_str(), substituter.c_str(), "--query", NULL);
|
|
|
|
throw SysError(format("executing `%1%'") % substituter);
|
|
|
|
} catch (std::exception & e) {
|
|
|
|
std::cerr << "error: " << e.what() << std::endl;
|
2004-06-20 22:17:54 +03:00
|
|
|
}
|
2008-08-02 15:54:35 +03:00
|
|
|
quickExit(1);
|
2004-06-20 22:17:54 +03:00
|
|
|
}
|
2003-12-05 13:05:19 +02:00
|
|
|
|
2008-08-02 15:54:35 +03:00
|
|
|
/* Parent. */
|
|
|
|
|
2009-03-28 21:41:53 +02:00
|
|
|
run.to = toPipe.writeSide.borrow();
|
|
|
|
run.from = fromPipe.readSide.borrow();
|
2004-06-20 22:17:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-28 21:41:53 +02:00
|
|
|
template<class T> T getIntLine(int fd)
|
2008-08-05 13:57:53 +03:00
|
|
|
{
|
2009-03-28 21:41:53 +02:00
|
|
|
string s = readLine(fd);
|
2008-08-05 13:57:53 +03:00
|
|
|
T res;
|
2009-03-28 21:41:53 +02:00
|
|
|
if (!string2Int(s, res)) throw Error("integer expected from stream");
|
2008-08-05 13:57:53 +03:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-12 03:29:28 +03:00
|
|
|
bool LocalStore::hasSubstitutes(const Path & path)
|
2004-06-20 22:17:54 +03:00
|
|
|
{
|
2008-08-02 15:54:35 +03:00
|
|
|
foreach (Paths::iterator, i, substituters) {
|
|
|
|
RunningSubstituter & run(runningSubstituters[*i]);
|
|
|
|
startSubstituter(*i, run);
|
2009-03-28 21:41:53 +02:00
|
|
|
writeLine(run.to, "have\n" + path);
|
|
|
|
if (getIntLine<int>(run.from)) return true;
|
2008-08-02 15:54:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-04 16:15:35 +03:00
|
|
|
bool LocalStore::querySubstitutablePathInfo(const Path & substituter,
|
|
|
|
const Path & path, SubstitutablePathInfo & info)
|
2008-08-02 15:54:35 +03:00
|
|
|
{
|
2008-08-04 16:15:35 +03:00
|
|
|
RunningSubstituter & run(runningSubstituters[substituter]);
|
|
|
|
startSubstituter(substituter, run);
|
2008-08-02 15:54:35 +03:00
|
|
|
|
2009-03-28 21:41:53 +02:00
|
|
|
writeLine(run.to, "info\n" + path);
|
|
|
|
|
|
|
|
if (!getIntLine<int>(run.from)) return false;
|
2008-08-04 16:15:35 +03:00
|
|
|
|
2009-03-28 21:41:53 +02:00
|
|
|
info.deriver = readLine(run.from);
|
2008-08-05 13:57:53 +03:00
|
|
|
if (info.deriver != "") assertStorePath(info.deriver);
|
2009-03-28 21:41:53 +02:00
|
|
|
int nrRefs = getIntLine<int>(run.from);
|
2008-08-04 16:15:35 +03:00
|
|
|
while (nrRefs--) {
|
2009-03-28 21:41:53 +02:00
|
|
|
Path p = readLine(run.from);
|
2008-08-05 13:57:53 +03:00
|
|
|
assertStorePath(p);
|
2008-08-04 16:15:35 +03:00
|
|
|
info.references.insert(p);
|
2008-08-02 15:54:35 +03:00
|
|
|
}
|
2009-03-28 21:41:53 +02:00
|
|
|
info.downloadSize = getIntLine<long long>(run.from);
|
2008-08-02 15:54:35 +03:00
|
|
|
|
2008-08-04 16:15:35 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool LocalStore::querySubstitutablePathInfo(const Path & path,
|
|
|
|
SubstitutablePathInfo & info)
|
|
|
|
{
|
|
|
|
foreach (Paths::iterator, i, substituters)
|
|
|
|
if (querySubstitutablePathInfo(*i, path, info)) return true;
|
2008-08-02 15:54:35 +03:00
|
|
|
return false;
|
2004-12-20 15:43:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
Hash LocalStore::queryPathHash(const Path & path)
|
2005-02-09 11:50:29 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
return queryPathInfo(path).hash;
|
2005-02-09 11:50:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
static void dfsVisit(std::map<Path, ValidPathInfo> & infos,
|
|
|
|
const Path & path, PathSet & visited, Paths & sorted)
|
2005-03-02 17:57:06 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
if (visited.find(path) != visited.end()) return;
|
|
|
|
visited.insert(path);
|
2005-03-02 17:57:06 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
ValidPathInfo & info(infos[path]);
|
|
|
|
|
|
|
|
for (PathSet::iterator i = info.references.begin();
|
|
|
|
i != info.references.end(); ++i)
|
|
|
|
if (infos.find(*i) != infos.end())
|
|
|
|
dfsVisit(infos, *i, visited, sorted);
|
2005-03-02 17:57:06 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
sorted.push_back(path);
|
2005-03-23 15:07:28 +02:00
|
|
|
}
|
2005-01-19 18:39:47 +02:00
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
void LocalStore::registerValidPaths(const ValidPathInfos & infos)
|
2005-03-23 15:07:28 +02:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
std::map<Path, ValidPathInfo> infosMap;
|
|
|
|
|
|
|
|
/* Sort the paths topologically under the references relation, so
|
|
|
|
that if path A is referenced by B, then A is registered before
|
|
|
|
B. */
|
|
|
|
for (ValidPathInfos::const_iterator i = infos.begin(); i != infos.end(); ++i)
|
|
|
|
infosMap[i->path] = *i;
|
2005-03-23 15:07:28 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
PathSet visited;
|
|
|
|
Paths sorted;
|
|
|
|
for (ValidPathInfos::const_iterator i = infos.begin(); i != infos.end(); ++i)
|
|
|
|
dfsVisit(infosMap, i->path, visited, sorted);
|
2005-02-07 15:40:40 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
for (Paths::iterator i = sorted.begin(); i != sorted.end(); ++i)
|
|
|
|
registerValidPath(infosMap[*i]);
|
2003-10-08 18:06:59 +03:00
|
|
|
}
|
2003-07-07 12:25:26 +03:00
|
|
|
|
2003-07-31 19:05:35 +03:00
|
|
|
|
2005-01-31 16:00:43 +02:00
|
|
|
/* Invalidate a path. The caller is responsible for checking that
|
2005-12-13 23:04:48 +02:00
|
|
|
there are no referrers. */
|
2008-06-09 16:52:45 +03:00
|
|
|
void LocalStore::invalidatePath(const Path & path)
|
2003-07-08 12:54:47 +03:00
|
|
|
{
|
2007-08-12 03:29:28 +03:00
|
|
|
debug(format("invalidating path `%1%'") % path);
|
2003-07-08 12:54:47 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
ValidPathInfo info;
|
|
|
|
|
|
|
|
if (pathExists(infoFileFor(path))) {
|
|
|
|
info = queryPathInfo(path);
|
|
|
|
|
|
|
|
/* Remove the info file. */
|
|
|
|
Path p = infoFileFor(path);
|
|
|
|
if (unlink(p.c_str()) == -1)
|
|
|
|
throw SysError(format("unlinking `%1%'") % p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the referrers file for `path'. */
|
|
|
|
Path p = referrersFileFor(path);
|
|
|
|
if (pathExists(p) && unlink(p.c_str()) == -1)
|
|
|
|
throw SysError(format("unlinking `%1%'") % p);
|
|
|
|
|
|
|
|
/* Clear `path' from the info cache. */
|
|
|
|
pathInfoCache.erase(path);
|
|
|
|
delayedUpdates.erase(path);
|
|
|
|
|
|
|
|
/* Cause the referrer files for each path referenced by this one
|
|
|
|
to be updated. This has to happen after removing the info file
|
|
|
|
to preserve the invariant (see registerValidPath()).
|
|
|
|
|
|
|
|
The referrer files are updated lazily in flushDelayedUpdates()
|
|
|
|
to prevent quadratic performance in the garbage collector
|
|
|
|
(i.e., when N referrers to some path X are deleted, we have to
|
|
|
|
rewrite the referrers file for X N times, causing O(N^2) I/O).
|
|
|
|
|
|
|
|
What happens if we die before the referrer file can be updated?
|
|
|
|
That's not a problem, because stale (invalid) entries in the
|
|
|
|
referrer file are ignored by queryReferrers(). Thus a referrer
|
|
|
|
file is allowed to have stale entries; removing them is just an
|
|
|
|
optimisation. verifyStore() gets rid of them eventually.
|
|
|
|
*/
|
|
|
|
foreach (PathSet::iterator, i, info.references)
|
|
|
|
if (*i != path) delayedUpdates.insert(*i);
|
2003-07-08 12:54:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 20:05:14 +02:00
|
|
|
Path LocalStore::addToStoreFromDump(const string & dump, const string & name,
|
|
|
|
bool recursive, HashType hashAlgo)
|
2003-07-07 12:25:26 +03:00
|
|
|
{
|
2008-12-03 20:05:14 +02:00
|
|
|
Hash h = hashString(hashAlgo, dump);
|
2008-12-03 17:51:17 +02:00
|
|
|
|
2008-12-03 20:05:14 +02:00
|
|
|
Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
|
2003-07-10 18:11:48 +03:00
|
|
|
|
2006-12-01 22:51:18 +02:00
|
|
|
addTempRoot(dstPath);
|
2005-01-31 12:27:25 +02:00
|
|
|
|
2006-12-01 22:51:18 +02:00
|
|
|
if (!isValidPath(dstPath)) {
|
2003-07-10 18:11:48 +03:00
|
|
|
|
2003-10-08 18:06:59 +03:00
|
|
|
/* The first check above is an optimisation to prevent
|
|
|
|
unnecessary lock acquisition. */
|
2003-07-22 18:15:15 +03:00
|
|
|
|
2006-06-01 21:13:33 +03:00
|
|
|
PathLocks outputLock(singleton<PathSet, Path>(dstPath));
|
2003-07-22 18:15:15 +03:00
|
|
|
|
2003-10-08 18:06:59 +03:00
|
|
|
if (!isValidPath(dstPath)) {
|
2004-06-21 10:46:02 +03:00
|
|
|
|
2006-12-09 02:26:24 +02:00
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
2004-10-25 17:38:23 +03:00
|
|
|
|
2008-12-03 17:51:17 +02:00
|
|
|
if (recursive) {
|
2008-12-03 20:05:14 +02:00
|
|
|
StringSource source(dump);
|
2008-12-03 17:51:17 +02:00
|
|
|
restorePath(dstPath, source);
|
|
|
|
} else
|
2008-12-03 20:05:14 +02:00
|
|
|
writeStringToFile(dstPath, dump);
|
2005-01-14 15:51:38 +02:00
|
|
|
|
2005-01-19 18:39:47 +02:00
|
|
|
canonicalisePathMetaData(dstPath);
|
2008-12-03 17:51:17 +02:00
|
|
|
|
|
|
|
/* Register the SHA-256 hash of the NAR serialisation of
|
|
|
|
the path in the database. We may just have computed it
|
|
|
|
above (if called with recursive == true and hashAlgo ==
|
|
|
|
sha256); otherwise, compute it here. */
|
|
|
|
registerValidPath(dstPath,
|
2008-12-03 18:10:17 +02:00
|
|
|
(recursive && hashAlgo == htSHA256) ? h :
|
2008-12-03 20:05:14 +02:00
|
|
|
(recursive ? hashString(htSHA256, dump) : hashPath(htSHA256, dstPath)),
|
2008-12-03 17:51:17 +02:00
|
|
|
PathSet(), "");
|
2003-08-01 12:01:51 +03:00
|
|
|
}
|
2003-11-22 20:45:56 +02:00
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-06-16 16:33:38 +03:00
|
|
|
}
|
2003-08-04 10:09:36 +03:00
|
|
|
|
2003-10-08 18:06:59 +03:00
|
|
|
return dstPath;
|
2003-06-16 16:33:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 20:05:14 +02:00
|
|
|
Path LocalStore::addToStore(const Path & _srcPath,
|
|
|
|
bool recursive, HashType hashAlgo, PathFilter & filter)
|
|
|
|
{
|
|
|
|
Path srcPath(absPath(_srcPath));
|
|
|
|
debug(format("adding `%1%' to the store") % srcPath);
|
|
|
|
|
|
|
|
/* Read the whole path into memory. This is not a very scalable
|
|
|
|
method for very large paths, but `copyPath' is mainly used for
|
|
|
|
small files. */
|
|
|
|
StringSink sink;
|
|
|
|
if (recursive)
|
|
|
|
dumpPath(srcPath, sink, filter);
|
|
|
|
else
|
|
|
|
sink.s = readFile(srcPath);
|
|
|
|
|
|
|
|
return addToStoreFromDump(sink.s, baseNameOf(srcPath), recursive, hashAlgo);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 17:06:30 +02:00
|
|
|
Path LocalStore::addTextToStore(const string & name, const string & s,
|
2005-01-25 23:28:25 +02:00
|
|
|
const PathSet & references)
|
2003-10-15 15:42:39 +03:00
|
|
|
{
|
2008-12-03 17:06:30 +02:00
|
|
|
Path dstPath = computeStorePathForText(name, s, references);
|
2004-02-14 23:44:18 +02:00
|
|
|
|
2006-12-01 22:51:18 +02:00
|
|
|
addTempRoot(dstPath);
|
2005-01-31 12:27:25 +02:00
|
|
|
|
2006-12-01 22:51:18 +02:00
|
|
|
if (!isValidPath(dstPath)) {
|
2003-10-15 15:42:39 +03:00
|
|
|
|
2006-06-01 21:13:33 +03:00
|
|
|
PathLocks outputLock(singleton<PathSet, Path>(dstPath));
|
2003-10-23 13:51:55 +03:00
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
2004-06-21 10:46:02 +03:00
|
|
|
|
2006-12-09 02:26:24 +02:00
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
2004-06-21 10:46:02 +03:00
|
|
|
|
2003-11-22 17:58:34 +02:00
|
|
|
writeStringToFile(dstPath, s);
|
2003-10-15 15:42:39 +03:00
|
|
|
|
2005-01-19 18:39:47 +02:00
|
|
|
canonicalisePathMetaData(dstPath);
|
2004-09-10 00:19:20 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
registerValidPath(dstPath,
|
2005-02-07 15:40:40 +02:00
|
|
|
hashPath(htSHA256, dstPath), references, "");
|
2003-10-23 13:51:55 +03:00
|
|
|
}
|
2003-11-22 20:45:56 +02:00
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
2003-10-15 15:42:39 +03:00
|
|
|
}
|
2005-01-14 15:51:38 +02:00
|
|
|
|
|
|
|
return dstPath;
|
2003-10-15 15:42:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 16:31:42 +02:00
|
|
|
struct HashAndWriteSink : Sink
|
|
|
|
{
|
|
|
|
Sink & writeSink;
|
|
|
|
HashSink hashSink;
|
|
|
|
bool hashing;
|
|
|
|
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
|
|
|
|
{
|
|
|
|
hashing = true;
|
|
|
|
}
|
|
|
|
virtual void operator ()
|
|
|
|
(const unsigned char * data, unsigned int len)
|
|
|
|
{
|
|
|
|
writeSink(data, len);
|
|
|
|
if (hashing) hashSink(data, len);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
#define EXPORT_MAGIC 0x4558494e
|
|
|
|
|
|
|
|
|
2007-02-21 19:51:10 +02:00
|
|
|
static void checkSecrecy(const Path & path)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
if (stat(path.c_str(), &st))
|
|
|
|
throw SysError(format("getting status of `%1%'") % path);
|
|
|
|
if ((st.st_mode & (S_IRWXG | S_IRWXO)) != 0)
|
|
|
|
throw Error(format("file `%1%' should be secret (inaccessible to everybody else)!") % path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 01:17:20 +02:00
|
|
|
void LocalStore::exportPath(const Path & path, bool sign,
|
|
|
|
Sink & sink)
|
|
|
|
{
|
|
|
|
assertStorePath(path);
|
2007-02-21 16:31:42 +02:00
|
|
|
|
2007-02-21 18:23:25 +02:00
|
|
|
addTempRoot(path);
|
2008-03-01 23:05:33 +02:00
|
|
|
if (!isValidPath(path))
|
2007-02-21 18:23:25 +02:00
|
|
|
throw Error(format("path `%1%' is not valid") % path);
|
|
|
|
|
2007-02-21 16:31:42 +02:00
|
|
|
HashAndWriteSink hashAndWriteSink(sink);
|
2007-02-21 01:17:20 +02:00
|
|
|
|
2007-02-21 16:31:42 +02:00
|
|
|
dumpPath(path, hashAndWriteSink);
|
2007-02-21 01:17:20 +02:00
|
|
|
|
2007-02-21 16:31:42 +02:00
|
|
|
writeInt(EXPORT_MAGIC, hashAndWriteSink);
|
|
|
|
|
|
|
|
writeString(path, hashAndWriteSink);
|
2007-02-21 01:17:20 +02:00
|
|
|
|
|
|
|
PathSet references;
|
2008-03-01 23:05:33 +02:00
|
|
|
queryReferences(path, references);
|
2007-02-21 16:31:42 +02:00
|
|
|
writeStringSet(references, hashAndWriteSink);
|
2007-02-21 01:17:20 +02:00
|
|
|
|
2008-03-01 23:05:33 +02:00
|
|
|
Path deriver = queryDeriver(path);
|
2007-02-21 16:31:42 +02:00
|
|
|
writeString(deriver, hashAndWriteSink);
|
|
|
|
|
|
|
|
if (sign) {
|
|
|
|
Hash hash = hashAndWriteSink.hashSink.finish();
|
|
|
|
hashAndWriteSink.hashing = false;
|
|
|
|
|
|
|
|
writeInt(1, hashAndWriteSink);
|
|
|
|
|
|
|
|
Path tmpDir = createTempDir();
|
|
|
|
AutoDelete delTmp(tmpDir);
|
|
|
|
Path hashFile = tmpDir + "/hash";
|
|
|
|
writeStringToFile(hashFile, printHash(hash));
|
|
|
|
|
2007-02-21 19:51:10 +02:00
|
|
|
Path secretKey = nixConfDir + "/signing-key.sec";
|
|
|
|
checkSecrecy(secretKey);
|
|
|
|
|
2007-02-21 16:31:42 +02:00
|
|
|
Strings args;
|
|
|
|
args.push_back("rsautl");
|
|
|
|
args.push_back("-sign");
|
|
|
|
args.push_back("-inkey");
|
2007-02-21 19:51:10 +02:00
|
|
|
args.push_back(secretKey);
|
2007-02-21 16:31:42 +02:00
|
|
|
args.push_back("-in");
|
|
|
|
args.push_back(hashFile);
|
2007-03-01 15:30:46 +02:00
|
|
|
string signature = runProgram(OPENSSL_PATH, true, args);
|
2007-02-21 16:31:42 +02:00
|
|
|
|
|
|
|
writeString(signature, hashAndWriteSink);
|
|
|
|
|
|
|
|
} else
|
|
|
|
writeInt(0, hashAndWriteSink);
|
2007-02-21 01:17:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-21 17:45:32 +02:00
|
|
|
struct HashAndReadSource : Source
|
|
|
|
{
|
|
|
|
Source & readSource;
|
|
|
|
HashSink hashSink;
|
|
|
|
bool hashing;
|
|
|
|
HashAndReadSource(Source & readSource) : readSource(readSource), hashSink(htSHA256)
|
|
|
|
{
|
|
|
|
hashing = true;
|
|
|
|
}
|
|
|
|
virtual void operator ()
|
|
|
|
(unsigned char * data, unsigned int len)
|
|
|
|
{
|
|
|
|
readSource(data, len);
|
|
|
|
if (hashing) hashSink(data, len);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
Path LocalStore::importPath(bool requireSignature, Source & source)
|
|
|
|
{
|
|
|
|
HashAndReadSource hashAndReadSource(source);
|
|
|
|
|
|
|
|
/* We don't yet know what store path this archive contains (the
|
|
|
|
store path follows the archive data proper), and besides, we
|
|
|
|
don't know yet whether the signature is valid. */
|
|
|
|
Path tmpDir = createTempDir(nixStore);
|
2009-03-22 19:36:43 +02:00
|
|
|
AutoDelete delTmp(tmpDir); /* !!! could be GC'ed! */
|
2007-02-21 17:45:32 +02:00
|
|
|
Path unpacked = tmpDir + "/unpacked";
|
|
|
|
|
|
|
|
restorePath(unpacked, hashAndReadSource);
|
|
|
|
|
|
|
|
unsigned int magic = readInt(hashAndReadSource);
|
|
|
|
if (magic != EXPORT_MAGIC)
|
|
|
|
throw Error("Nix archive cannot be imported; wrong format");
|
|
|
|
|
|
|
|
Path dstPath = readStorePath(hashAndReadSource);
|
|
|
|
|
|
|
|
PathSet references = readStorePaths(hashAndReadSource);
|
|
|
|
|
2007-02-28 01:18:57 +02:00
|
|
|
Path deriver = readString(hashAndReadSource);
|
|
|
|
if (deriver != "") assertStorePath(deriver);
|
2007-02-21 17:45:32 +02:00
|
|
|
|
|
|
|
Hash hash = hashAndReadSource.hashSink.finish();
|
|
|
|
hashAndReadSource.hashing = false;
|
|
|
|
|
|
|
|
bool haveSignature = readInt(hashAndReadSource) == 1;
|
|
|
|
|
|
|
|
if (requireSignature && !haveSignature)
|
|
|
|
throw Error("imported archive lacks a signature");
|
|
|
|
|
|
|
|
if (haveSignature) {
|
|
|
|
string signature = readString(hashAndReadSource);
|
|
|
|
|
2007-03-01 14:30:24 +02:00
|
|
|
if (requireSignature) {
|
|
|
|
Path sigFile = tmpDir + "/sig";
|
|
|
|
writeStringToFile(sigFile, signature);
|
|
|
|
|
|
|
|
Strings args;
|
|
|
|
args.push_back("rsautl");
|
|
|
|
args.push_back("-verify");
|
|
|
|
args.push_back("-inkey");
|
|
|
|
args.push_back(nixConfDir + "/signing-key.pub");
|
|
|
|
args.push_back("-pubin");
|
|
|
|
args.push_back("-in");
|
|
|
|
args.push_back(sigFile);
|
2007-03-01 15:30:46 +02:00
|
|
|
string hash2 = runProgram(OPENSSL_PATH, true, args);
|
2007-03-01 14:30:24 +02:00
|
|
|
|
|
|
|
/* Note: runProgram() throws an exception if the signature
|
|
|
|
is invalid. */
|
|
|
|
|
|
|
|
if (printHash(hash) != hash2)
|
|
|
|
throw Error(
|
|
|
|
"signed hash doesn't match actual contents of imported "
|
|
|
|
"archive; archive could be corrupt, or someone is trying "
|
|
|
|
"to import a Trojan horse");
|
|
|
|
}
|
2007-02-21 17:45:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do the actual import. */
|
|
|
|
|
|
|
|
/* !!! way too much code duplication with addTextToStore() etc. */
|
|
|
|
addTempRoot(dstPath);
|
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
|
|
|
|
2009-02-02 19:24:10 +02:00
|
|
|
PathLocks outputLock;
|
|
|
|
|
|
|
|
/* Lock the output path. But don't lock if we're being called
|
|
|
|
from a build hook (whose parent process already acquired a
|
|
|
|
lock on this path). */
|
|
|
|
Strings locksHeld = tokenizeString(getEnv("NIX_HELD_LOCKS"));
|
|
|
|
if (find(locksHeld.begin(), locksHeld.end(), dstPath) == locksHeld.end())
|
|
|
|
outputLock.lockPaths(singleton<PathSet, Path>(dstPath));
|
2007-02-21 17:45:32 +02:00
|
|
|
|
|
|
|
if (!isValidPath(dstPath)) {
|
|
|
|
|
|
|
|
if (pathExists(dstPath)) deletePathWrapped(dstPath);
|
|
|
|
|
|
|
|
if (rename(unpacked.c_str(), dstPath.c_str()) == -1)
|
|
|
|
throw SysError(format("cannot move `%1%' to `%2%'")
|
|
|
|
% unpacked % dstPath);
|
|
|
|
|
|
|
|
canonicalisePathMetaData(dstPath);
|
|
|
|
|
|
|
|
/* !!! if we were clever, we could prevent the hashPath()
|
|
|
|
here. */
|
2008-06-09 16:52:45 +03:00
|
|
|
if (deriver != "" && !isValidPath(deriver)) deriver = "";
|
|
|
|
registerValidPath(dstPath,
|
2007-02-21 18:23:25 +02:00
|
|
|
hashPath(htSHA256, dstPath), references, deriver);
|
2007-02-21 17:45:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
outputLock.setDeletion(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
return dstPath;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-18 12:34:17 +03:00
|
|
|
void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed,
|
|
|
|
unsigned long long & blocksFreed)
|
2003-06-23 17:40:49 +03:00
|
|
|
{
|
2005-12-15 23:11:39 +02:00
|
|
|
bytesFreed = 0;
|
2003-10-08 18:06:59 +03:00
|
|
|
|
2004-02-14 23:44:18 +02:00
|
|
|
assertStorePath(path);
|
2003-07-08 12:54:47 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
if (isValidPath(path)) {
|
|
|
|
/* Acquire a lock on the referrers file to prevent new
|
|
|
|
referrers to this path from appearing while we're deleting
|
|
|
|
it. */
|
|
|
|
PathLocks referrersLock(singleton<PathSet, Path>(referrersFileFor(path)));
|
|
|
|
referrersLock.setDeletion(true);
|
|
|
|
PathSet referrers; queryReferrers(path, referrers);
|
|
|
|
referrers.erase(path); /* ignore self-references */
|
|
|
|
if (!referrers.empty())
|
|
|
|
throw PathInUse(format("cannot delete path `%1%' because it is in use by `%2%'")
|
|
|
|
% path % showPaths(referrers));
|
|
|
|
invalidatePath(path);
|
2005-01-31 16:00:43 +02:00
|
|
|
}
|
2003-07-08 12:54:47 +03:00
|
|
|
|
2008-06-18 12:34:17 +03:00
|
|
|
deletePathWrapped(path, bytesFreed, blocksFreed);
|
2003-06-23 17:40:49 +03:00
|
|
|
}
|
2003-07-17 15:27:55 +03:00
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
void LocalStore::verifyStore(bool checkContents)
|
2003-07-17 15:27:55 +03:00
|
|
|
{
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Check whether all valid paths actually exist. */
|
2007-01-14 19:28:30 +02:00
|
|
|
printMsg(lvlInfo, "checking path existence");
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
PathSet validPaths2 = queryValidPaths(), validPaths;
|
|
|
|
|
|
|
|
for (PathSet::iterator i = validPaths2.begin(); i != validPaths2.end(); ++i) {
|
2007-05-01 16:21:05 +03:00
|
|
|
checkInterrupt();
|
2008-06-09 16:52:45 +03:00
|
|
|
if (!isStorePath(*i)) {
|
2005-02-08 15:48:53 +02:00
|
|
|
printMsg(lvlError, format("path `%1%' is not in the Nix store") % *i);
|
2008-06-09 16:52:45 +03:00
|
|
|
invalidatePath(*i);
|
|
|
|
} else if (!pathExists(*i)) {
|
|
|
|
printMsg(lvlError, format("path `%1%' disappeared") % *i);
|
|
|
|
invalidatePath(*i);
|
|
|
|
} else
|
2005-02-08 15:48:53 +02:00
|
|
|
validPaths.insert(*i);
|
2003-07-17 15:27:55 +03:00
|
|
|
}
|
|
|
|
|
2007-01-14 19:28:30 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Check the store path meta-information. */
|
|
|
|
printMsg(lvlInfo, "checking path meta-information");
|
2007-01-14 19:28:30 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
std::map<Path, PathSet> referrersCache;
|
|
|
|
|
|
|
|
for (PathSet::iterator i = validPaths.begin(); i != validPaths.end(); ++i) {
|
|
|
|
bool update = false;
|
2008-12-16 15:28:18 +02:00
|
|
|
ValidPathInfo info = queryPathInfo(*i, true);
|
2008-06-09 16:52:45 +03:00
|
|
|
|
|
|
|
/* Check the references: each reference should be valid, and
|
|
|
|
it should have a matching referrer. */
|
|
|
|
for (PathSet::iterator j = info.references.begin();
|
|
|
|
j != info.references.end(); ++j)
|
|
|
|
{
|
|
|
|
if (referrersCache.find(*j) == referrersCache.end())
|
|
|
|
queryReferrers(*j, referrersCache[*j]);
|
|
|
|
if (referrersCache[*j].find(*i) == referrersCache[*j].end()) {
|
|
|
|
printMsg(lvlError, format("adding missing referrer mapping from `%1%' to `%2%'")
|
|
|
|
% *j % *i);
|
|
|
|
appendReferrer(*j, *i, true);
|
2005-02-08 15:23:55 +02:00
|
|
|
}
|
2008-06-09 16:52:45 +03:00
|
|
|
if (validPaths.find(*j) == validPaths.end()) {
|
|
|
|
printMsg(lvlError, format("incomplete closure: `%1%' needs missing `%2%'")
|
|
|
|
% *i % *j);
|
|
|
|
/* nothing we can do about it... */
|
2005-02-08 15:23:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Check the deriver. (Note that the deriver doesn't have to
|
|
|
|
be a valid path.) */
|
|
|
|
if (!info.deriver.empty() && !isStorePath(info.deriver)) {
|
|
|
|
info.deriver = "";
|
|
|
|
update = true;
|
2007-01-14 19:28:30 +02:00
|
|
|
}
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Check the content hash (optionally - slow). */
|
2008-12-16 15:28:18 +02:00
|
|
|
if (info.hash.hashSize == 0) {
|
|
|
|
printMsg(lvlError, format("re-hashing `%1%'") % *i);
|
|
|
|
info.hash = hashPath(htSHA256, *i);
|
|
|
|
update = true;
|
|
|
|
} else if (checkContents) {
|
2008-06-09 16:52:45 +03:00
|
|
|
debug(format("checking contents of `%1%'") % *i);
|
|
|
|
Hash current = hashPath(info.hash.type, *i);
|
|
|
|
if (current != info.hash) {
|
|
|
|
printMsg(lvlError, format("path `%1%' was modified! "
|
|
|
|
"expected hash `%2%', got `%3%'")
|
|
|
|
% *i % printHash(info.hash) % printHash(current));
|
2005-02-08 15:23:55 +02:00
|
|
|
}
|
|
|
|
}
|
2003-10-10 18:14:29 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
if (update) registerValidPath(info);
|
2007-10-10 01:14:27 +03:00
|
|
|
}
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
referrersCache.clear();
|
2007-10-10 01:14:27 +03:00
|
|
|
|
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Check the referrers. */
|
|
|
|
printMsg(lvlInfo, "checking referrers");
|
2007-10-10 01:14:27 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
std::map<Path, PathSet> referencesCache;
|
2005-12-15 18:53:21 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
Strings entries = readDirectory(nixDBPath + "/referrer");
|
|
|
|
for (Strings::iterator i = entries.begin(); i != entries.end(); ++i) {
|
|
|
|
Path from = nixStore + "/" + *i;
|
|
|
|
|
|
|
|
if (validPaths.find(from) == validPaths.end()) {
|
2008-12-16 15:28:18 +02:00
|
|
|
/* !!! This removes lock files as well. Need to check
|
|
|
|
whether that's okay. */
|
2008-06-09 16:52:45 +03:00
|
|
|
printMsg(lvlError, format("removing referrers file for invalid `%1%'") % from);
|
|
|
|
Path p = referrersFileFor(from);
|
|
|
|
if (unlink(p.c_str()) == -1)
|
|
|
|
throw SysError(format("unlinking `%1%'") % p);
|
|
|
|
continue;
|
2005-12-15 18:53:21 +02:00
|
|
|
}
|
2005-12-12 21:14:38 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
PathSet referrers;
|
|
|
|
bool allValid = queryReferrersInternal(from, referrers);
|
|
|
|
bool update = false;
|
2005-12-12 21:14:38 +02:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
if (!allValid) {
|
|
|
|
printMsg(lvlError, format("removing some stale referrers for `%1%'") % from);
|
|
|
|
update = true;
|
|
|
|
}
|
2007-08-13 14:37:39 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
/* Each referrer should have a matching reference. */
|
|
|
|
PathSet referrersNew;
|
|
|
|
for (PathSet::iterator j = referrers.begin(); j != referrers.end(); ++j) {
|
|
|
|
if (referencesCache.find(*j) == referencesCache.end())
|
|
|
|
queryReferences(*j, referencesCache[*j]);
|
|
|
|
if (referencesCache[*j].find(from) == referencesCache[*j].end()) {
|
|
|
|
printMsg(lvlError, format("removing unexpected referrer mapping from `%1%' to `%2%'")
|
|
|
|
% from % *j);
|
|
|
|
update = true;
|
|
|
|
} else referrersNew.insert(*j);
|
|
|
|
}
|
2007-08-13 14:37:39 +03:00
|
|
|
|
2008-06-09 16:52:45 +03:00
|
|
|
if (update) rewriteReferrers(from, false, referrersNew);
|
2007-08-13 14:37:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-09-05 00:06:23 +03:00
|
|
|
}
|