mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-23 06:26:15 +02:00
Implement S3BinaryCacheStore::queryAllValidPaths()
This allows commands like "nix verify --all" or "nix path-info --all" to work on S3 caches. Unfortunately, this requires some ugly hackery: when querying the contents of the bucket, we don't want to have to read every .narinfo file. But the S3 bucket keys only include the hash part of each store path, not the name part. So as a special exception queryAllValidPaths() can now return store paths *without* the name part, and queryPathInfo() accepts such store paths (returning a ValidPathInfo object containing the full name).
This commit is contained in:
parent
d155d80155
commit
7d14f5c331
9 changed files with 113 additions and 54 deletions
|
@ -94,13 +94,15 @@ void BinaryCacheStore::addToCache(const ValidPathInfo & info,
|
|||
|
||||
upsertFile(narInfoFile, narInfo->to_string());
|
||||
|
||||
auto hashPart = storePathToHash(narInfo->path);
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(narInfo->path, std::shared_ptr<NarInfo>(narInfo));
|
||||
state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
if (diskCache)
|
||||
diskCache->upsertNarInfo(getUri(), std::shared_ptr<NarInfo>(narInfo));
|
||||
diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
|
||||
stats.narInfoWrite++;
|
||||
}
|
||||
|
@ -197,8 +199,6 @@ std::shared_ptr<ValidPathInfo> BinaryCacheStore::queryPathInfoUncached(const Pat
|
|||
if (!data) return 0;
|
||||
|
||||
auto narInfo = make_ref<NarInfo>(*data, narInfoFile);
|
||||
if (narInfo->path != storePath)
|
||||
throw Error(format("NAR info file for store path ‘%1%’ does not match ‘%2%’") % narInfo->path % storePath);
|
||||
|
||||
stats.narInfoRead++;
|
||||
|
||||
|
|
|
@ -579,7 +579,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
|||
|
||||
{
|
||||
auto state_(Store::state.lock());
|
||||
state_->pathInfoCache.upsert(info.path, std::make_shared<ValidPathInfo>(info));
|
||||
state_->pathInfoCache.upsert(storePathToHash(info.path), std::make_shared<ValidPathInfo>(info));
|
||||
}
|
||||
|
||||
return id;
|
||||
|
@ -1067,7 +1067,7 @@ void LocalStore::invalidatePath(State & state, const Path & path)
|
|||
|
||||
{
|
||||
auto state_(Store::state.lock());
|
||||
state_->pathInfoCache.erase(path);
|
||||
state_->pathInfoCache.erase(storePathToHash(path));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ create table if not exists BinaryCaches (
|
|||
|
||||
create table if not exists NARs (
|
||||
cache integer not null,
|
||||
storePath text not null,
|
||||
hashPart text not null,
|
||||
namePart text not null,
|
||||
url text,
|
||||
compression text,
|
||||
fileHash text,
|
||||
|
@ -31,7 +32,7 @@ create table if not exists NARs (
|
|||
deriver text,
|
||||
sigs text,
|
||||
timestamp integer not null,
|
||||
primary key (cache, storePath),
|
||||
primary key (cache, hashPart),
|
||||
foreign key (cache) references BinaryCaches(id) on delete cascade
|
||||
);
|
||||
|
||||
|
@ -66,7 +67,7 @@ public:
|
|||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
Path dbPath = getCacheDir() + "/nix/binary-cache-v3.sqlite";
|
||||
Path dbPath = getCacheDir() + "/nix/binary-cache-v4.sqlite";
|
||||
createDirs(dirOf(dbPath));
|
||||
|
||||
if (sqlite3_open_v2(dbPath.c_str(), &state->db.db,
|
||||
|
@ -92,11 +93,11 @@ public:
|
|||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
|
||||
|
||||
state->insertNAR.create(state->db,
|
||||
"insert or replace into NARs(cache, storePath, url, compression, fileHash, fileSize, narHash, "
|
||||
"narSize, refs, deriver, sigs, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
|
||||
"insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
|
||||
"narSize, refs, deriver, sigs, timestamp) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
|
||||
|
||||
state->queryNAR.create(state->db,
|
||||
"select * from NARs where cache = ? and storePath = ?");
|
||||
"select * from NARs where cache = ? and hashPart = ?");
|
||||
|
||||
state->insertNARExistence.create(state->db,
|
||||
"insert or replace into NARExistence(cache, storePath, exist, timestamp) values (?, ?, ?, ?)");
|
||||
|
@ -141,13 +142,13 @@ public:
|
|||
}
|
||||
|
||||
std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const Path & storePath) override
|
||||
const std::string & uri, const std::string & hashPart) override
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
auto queryNAR(state->queryNAR.use()
|
||||
(uriToInt(*state, uri))
|
||||
(baseNameOf(storePath)));
|
||||
(hashPart));
|
||||
|
||||
if (!queryNAR.next())
|
||||
// FIXME: check NARExistence
|
||||
|
@ -157,26 +158,29 @@ public:
|
|||
|
||||
// FIXME: implement TTL.
|
||||
|
||||
narInfo->path = storePath;
|
||||
narInfo->url = queryNAR.getStr(2);
|
||||
narInfo->compression = queryNAR.getStr(3);
|
||||
if (!queryNAR.isNull(4))
|
||||
narInfo->fileHash = parseHash(queryNAR.getStr(4));
|
||||
narInfo->fileSize = queryNAR.getInt(5);
|
||||
narInfo->narHash = parseHash(queryNAR.getStr(6));
|
||||
narInfo->narSize = queryNAR.getInt(7);
|
||||
for (auto & r : tokenizeString<Strings>(queryNAR.getStr(8), " "))
|
||||
auto namePart = queryNAR.getStr(2);
|
||||
narInfo->path = settings.nixStore + "/" +
|
||||
hashPart + (namePart.empty() ? "" : "-" + namePart);
|
||||
narInfo->url = queryNAR.getStr(3);
|
||||
narInfo->compression = queryNAR.getStr(4);
|
||||
if (!queryNAR.isNull(5))
|
||||
narInfo->fileHash = parseHash(queryNAR.getStr(5));
|
||||
narInfo->fileSize = queryNAR.getInt(6);
|
||||
narInfo->narHash = parseHash(queryNAR.getStr(7));
|
||||
narInfo->narSize = queryNAR.getInt(8);
|
||||
for (auto & r : tokenizeString<Strings>(queryNAR.getStr(9), " "))
|
||||
narInfo->references.insert(settings.nixStore + "/" + r);
|
||||
if (!queryNAR.isNull(9))
|
||||
narInfo->deriver = settings.nixStore + "/" + queryNAR.getStr(9);
|
||||
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
|
||||
if (!queryNAR.isNull(10))
|
||||
narInfo->deriver = settings.nixStore + "/" + queryNAR.getStr(10);
|
||||
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(11), " "))
|
||||
narInfo->sigs.insert(sig);
|
||||
|
||||
return {oValid, narInfo};
|
||||
}
|
||||
|
||||
void upsertNarInfo(
|
||||
const std::string & uri, std::shared_ptr<ValidPathInfo> info) override
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<ValidPathInfo> info) override
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
|
@ -184,9 +188,12 @@ public:
|
|||
|
||||
auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
|
||||
|
||||
assert(hashPart == storePathToHash(info->path));
|
||||
|
||||
state->insertNAR.use()
|
||||
(uriToInt(*state, uri))
|
||||
(baseNameOf(info->path))
|
||||
(hashPart)
|
||||
(storePathToName(info->path))
|
||||
(narInfo ? narInfo->url : "", narInfo != 0)
|
||||
(narInfo ? narInfo->compression : "", narInfo != 0)
|
||||
(narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
|
||||
|
|
|
@ -15,10 +15,11 @@ public:
|
|||
virtual bool cacheExists(const std::string & uri) = 0;
|
||||
|
||||
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const Path & storePath) = 0;
|
||||
const std::string & uri, const std::string & hashPart) = 0;
|
||||
|
||||
virtual void upsertNarInfo(
|
||||
const std::string & uri, std::shared_ptr<ValidPathInfo> narInfo) = 0;
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<ValidPathInfo> info) = 0;
|
||||
};
|
||||
|
||||
/* Return a singleton cache object that can be used concurrently by
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <aws/s3/model/GetObjectRequest.h>
|
||||
#include <aws/s3/model/HeadObjectRequest.h>
|
||||
#include <aws/s3/model/PutObjectRequest.h>
|
||||
#include <aws/s3/model/ListObjectsRequest.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -164,7 +165,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
|
||||
std::shared_ptr<std::string> getFile(const std::string & path)
|
||||
{
|
||||
printMsg(lvlDebug, format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
|
||||
debug(format("fetching ‘s3://%1%/%2%’...") % bucketName % path);
|
||||
|
||||
auto request =
|
||||
Aws::S3::Model::GetObjectRequest()
|
||||
|
@ -204,6 +205,38 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
}
|
||||
}
|
||||
|
||||
PathSet queryAllValidPaths() override
|
||||
{
|
||||
PathSet paths;
|
||||
std::string marker;
|
||||
|
||||
do {
|
||||
debug(format("listing bucket ‘s3://%s’ from key ‘%s’...") % bucketName % marker);
|
||||
|
||||
auto res = checkAws(format("AWS error listing bucket ‘%s’") % bucketName,
|
||||
client->ListObjects(
|
||||
Aws::S3::Model::ListObjectsRequest()
|
||||
.WithBucket(bucketName)
|
||||
.WithDelimiter("/")
|
||||
.WithMarker(marker)));
|
||||
|
||||
auto & contents = res.GetContents();
|
||||
|
||||
debug(format("got %d keys, next marker ‘%s’")
|
||||
% contents.size() % res.GetNextMarker());
|
||||
|
||||
for (auto object : contents) {
|
||||
auto & key = object.GetKey();
|
||||
if (!hasSuffix(key, ".narinfo")) continue;
|
||||
paths.insert(settings.nixStore + "/" + std::string(key, 0, key.size() - 8));
|
||||
}
|
||||
|
||||
marker = res.GetNextMarker();
|
||||
} while (!marker.empty());
|
||||
|
||||
return paths;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation regStore([](const std::string & uri) -> std::shared_ptr<Store> {
|
||||
|
|
|
@ -63,13 +63,16 @@ Path followLinksToStorePath(const Path & path)
|
|||
string storePathToName(const Path & path)
|
||||
{
|
||||
assertStorePath(path);
|
||||
return string(path, settings.nixStore.size() + storePathHashLen + 2);
|
||||
auto l = settings.nixStore.size() + 1 + storePathHashLen;
|
||||
assert(path.size() >= l);
|
||||
return path.size() == l ? "" : string(path, l + 1);
|
||||
}
|
||||
|
||||
|
||||
string storePathToHash(const Path & path)
|
||||
{
|
||||
assertStorePath(path);
|
||||
assert(path.size() >= settings.nixStore.size() + 1 + storePathHashLen);
|
||||
return string(path, settings.nixStore.size() + 1, storePathHashLen);
|
||||
}
|
||||
|
||||
|
@ -234,9 +237,11 @@ std::string Store::getUri()
|
|||
|
||||
bool Store::isValidPath(const Path & storePath)
|
||||
{
|
||||
auto hashPart = storePathToHash(storePath);
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
auto res = state_->pathInfoCache.get(storePath);
|
||||
auto res = state_->pathInfoCache.get(hashPart);
|
||||
if (res) {
|
||||
stats.narInfoReadAverted++;
|
||||
return *res != 0;
|
||||
|
@ -244,10 +249,11 @@ bool Store::isValidPath(const Path & storePath)
|
|||
}
|
||||
|
||||
if (diskCache) {
|
||||
auto res = diskCache->lookupNarInfo(getUri(), storePath);
|
||||
auto res = diskCache->lookupNarInfo(getUri(), hashPart);
|
||||
if (res.first != NarInfoDiskCache::oUnknown) {
|
||||
stats.narInfoReadAverted++;
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(storePath,
|
||||
state_->pathInfoCache.upsert(hashPart,
|
||||
res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
|
||||
return res.first == NarInfoDiskCache::oValid;
|
||||
}
|
||||
|
@ -261,9 +267,11 @@ bool Store::isValidPath(const Path & storePath)
|
|||
|
||||
ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
||||
{
|
||||
auto hashPart = storePathToHash(storePath);
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
auto res = state_->pathInfoCache.get(storePath);
|
||||
auto res = state_->pathInfoCache.get(hashPart);
|
||||
if (res) {
|
||||
stats.narInfoReadAverted++;
|
||||
if (!*res)
|
||||
|
@ -273,12 +281,14 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
|||
}
|
||||
|
||||
if (diskCache) {
|
||||
auto res = diskCache->lookupNarInfo(getUri(), storePath);
|
||||
auto res = diskCache->lookupNarInfo(getUri(), hashPart);
|
||||
if (res.first != NarInfoDiskCache::oUnknown) {
|
||||
stats.narInfoReadAverted++;
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(storePath,
|
||||
state_->pathInfoCache.upsert(hashPart,
|
||||
res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
|
||||
if (res.first == NarInfoDiskCache::oInvalid)
|
||||
if (res.first == NarInfoDiskCache::oInvalid ||
|
||||
(res.second->path != storePath && storePathToName(storePath) != ""))
|
||||
throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
|
||||
return ref<ValidPathInfo>(res.second);
|
||||
}
|
||||
|
@ -287,14 +297,16 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
|||
auto info = queryPathInfoUncached(storePath);
|
||||
|
||||
if (diskCache && info)
|
||||
diskCache->upsertNarInfo(getUri(), info);
|
||||
diskCache->upsertNarInfo(getUri(), hashPart, info);
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(storePath, info);
|
||||
state_->pathInfoCache.upsert(hashPart, info);
|
||||
}
|
||||
|
||||
if (!info) {
|
||||
if (!info
|
||||
|| (info->path != storePath && storePathToName(storePath) != ""))
|
||||
{
|
||||
stats.narInfoMissing++;
|
||||
throw InvalidPath(format("path ‘%s’ is not valid") % storePath);
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ protected:
|
|||
|
||||
struct State
|
||||
{
|
||||
LRUCache<Path, std::shared_ptr<ValidPathInfo>> pathInfoCache{64 * 1024};
|
||||
LRUCache<std::string, std::shared_ptr<ValidPathInfo>> pathInfoCache{64 * 1024};
|
||||
};
|
||||
|
||||
Sync<State> state;
|
||||
|
@ -206,10 +206,15 @@ public:
|
|||
/* Query which of the given paths is valid. */
|
||||
virtual PathSet queryValidPaths(const PathSet & paths) = 0;
|
||||
|
||||
/* Query the set of all valid paths. */
|
||||
/* Query the set of all valid paths. Note that for some store
|
||||
backends, the name part of store paths may be omitted
|
||||
(i.e. you'll get /nix/store/<hash> rather than
|
||||
/nix/store/<hash>-<name>). Use queryPathInfo() to obtain the
|
||||
full store path. */
|
||||
virtual PathSet queryAllValidPaths() = 0;
|
||||
|
||||
/* Query information about a valid path. */
|
||||
/* Query information about a valid path. It is permitted to omit
|
||||
the name part of the store path. */
|
||||
ref<const ValidPathInfo> queryPathInfo(const Path & path);
|
||||
|
||||
protected:
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include "store-api.hh"
|
||||
|
||||
#include <iomanip>
|
||||
#include <algorithm>
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
@ -48,14 +49,14 @@ struct CmdPathInfo : StorePathsCommand
|
|||
for (auto & storePath : storePaths)
|
||||
pathLen = std::max(pathLen, storePath.size());
|
||||
|
||||
for (auto & storePath : storePaths) {
|
||||
if (!store->isValidPath(storePath))
|
||||
throw Error(format("path ‘%s’ is not valid") % storePath);
|
||||
for (auto storePath : storePaths) {
|
||||
auto info = store->queryPathInfo(storePath);
|
||||
storePath = info->path; // FIXME: screws up padding
|
||||
|
||||
std::cout << storePath << std::string(pathLen - storePath.size(), ' ');
|
||||
std::cout << storePath << std::string(std::max(0, (int) pathLen - (int) storePath.size()), ' ');
|
||||
|
||||
if (showSize) {
|
||||
std::cout << '\t' << std::setw(11) << store->queryPathInfo(storePath)->narSize;
|
||||
std::cout << '\t' << std::setw(11) << info->narSize;
|
||||
}
|
||||
|
||||
if (showClosureSize) {
|
||||
|
|
|
@ -98,7 +98,7 @@ struct CmdVerify : StorePathsCommand
|
|||
if (!noContents) {
|
||||
|
||||
HashSink sink(info->narHash.type);
|
||||
store->narFromPath(storePath, sink);
|
||||
store->narFromPath(info->path, sink);
|
||||
|
||||
auto hash = sink.finish();
|
||||
|
||||
|
@ -106,7 +106,7 @@ struct CmdVerify : StorePathsCommand
|
|||
corrupted = 1;
|
||||
printMsg(lvlError,
|
||||
format("path ‘%s’ was modified! expected hash ‘%s’, got ‘%s’")
|
||||
% storePath % printHash(info->narHash) % printHash(hash.first));
|
||||
% info->path % printHash(info->narHash) % printHash(hash.first));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ struct CmdVerify : StorePathsCommand
|
|||
for (auto & store2 : substituters) {
|
||||
if (validSigs >= actualSigsNeeded) break;
|
||||
try {
|
||||
doSigs(store2->queryPathInfo(storePath)->sigs);
|
||||
doSigs(store2->queryPathInfo(info->path)->sigs);
|
||||
} catch (InvalidPath &) {
|
||||
} catch (Error & e) {
|
||||
printMsg(lvlError, format(ANSI_RED "error:" ANSI_NORMAL " %s") % e.what());
|
||||
|
@ -151,7 +151,7 @@ struct CmdVerify : StorePathsCommand
|
|||
|
||||
if (!good) {
|
||||
untrusted++;
|
||||
printMsg(lvlError, format("path ‘%s’ is untrusted") % storePath);
|
||||
printMsg(lvlError, format("path ‘%s’ is untrusted") % info->path);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue