mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-30 17:46:15 +02:00
Merge remote-tracking branch 'origin/master' into flakes
This commit is contained in:
commit
4205234f26
8 changed files with 145 additions and 162 deletions
|
@ -10,8 +10,6 @@
|
||||||
#include "nar-info-disk-cache.hh"
|
#include "nar-info-disk-cache.hh"
|
||||||
#include "nar-accessor.hh"
|
#include "nar-accessor.hh"
|
||||||
#include "json.hh"
|
#include "json.hh"
|
||||||
#include "retry.hh"
|
|
||||||
#include "download.hh"
|
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
|
||||||
|
@ -81,15 +79,13 @@ void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
|
||||||
|
|
||||||
std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
|
std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
|
||||||
{
|
{
|
||||||
return retry<std::shared_ptr<std::string>>(downloadSettings.tries, [&]() -> std::shared_ptr<std::string> {
|
StringSink sink;
|
||||||
StringSink sink;
|
try {
|
||||||
try {
|
getFile(path, sink);
|
||||||
getFile(path, sink);
|
} catch (NoSuchBinaryCacheFile &) {
|
||||||
} catch (NoSuchBinaryCacheFile &) {
|
return nullptr;
|
||||||
return nullptr;
|
}
|
||||||
}
|
return sink.s;
|
||||||
return sink.s;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
|
Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
#include "compression.hh"
|
#include "compression.hh"
|
||||||
#include "pathlocks.hh"
|
#include "pathlocks.hh"
|
||||||
#include "finally.hh"
|
#include "finally.hh"
|
||||||
#include "retry.hh"
|
|
||||||
|
|
||||||
#ifdef ENABLE_S3
|
#ifdef ENABLE_S3
|
||||||
#include <aws/core/client/ClientConfiguration.h>
|
#include <aws/core/client/ClientConfiguration.h>
|
||||||
|
@ -20,9 +19,11 @@
|
||||||
#include <curl/curl.h>
|
#include <curl/curl.h>
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <cmath>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
#include <random>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
|
||||||
using namespace std::string_literals;
|
using namespace std::string_literals;
|
||||||
|
@ -45,6 +46,9 @@ struct CurlDownloader : public Downloader
|
||||||
{
|
{
|
||||||
CURLM * curlm = 0;
|
CURLM * curlm = 0;
|
||||||
|
|
||||||
|
std::random_device rd;
|
||||||
|
std::mt19937 mt19937;
|
||||||
|
|
||||||
struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
|
struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
|
||||||
{
|
{
|
||||||
CurlDownloader & downloader;
|
CurlDownloader & downloader;
|
||||||
|
@ -57,10 +61,20 @@ struct CurlDownloader : public Downloader
|
||||||
bool active = false; // whether the handle has been added to the multi object
|
bool active = false; // whether the handle has been added to the multi object
|
||||||
std::string status;
|
std::string status;
|
||||||
|
|
||||||
|
unsigned int attempt = 0;
|
||||||
|
|
||||||
|
/* Don't start this download until the specified time point
|
||||||
|
has been reached. */
|
||||||
|
std::chrono::steady_clock::time_point embargo;
|
||||||
|
|
||||||
struct curl_slist * requestHeaders = 0;
|
struct curl_slist * requestHeaders = 0;
|
||||||
|
|
||||||
std::string encoding;
|
std::string encoding;
|
||||||
|
|
||||||
|
bool acceptRanges = false;
|
||||||
|
|
||||||
|
curl_off_t writtenToSink = 0;
|
||||||
|
|
||||||
DownloadItem(CurlDownloader & downloader,
|
DownloadItem(CurlDownloader & downloader,
|
||||||
const DownloadRequest & request,
|
const DownloadRequest & request,
|
||||||
Callback<DownloadResult> callback)
|
Callback<DownloadResult> callback)
|
||||||
|
@ -71,9 +85,10 @@ struct CurlDownloader : public Downloader
|
||||||
{request.uri}, request.parentAct)
|
{request.uri}, request.parentAct)
|
||||||
, callback(callback)
|
, callback(callback)
|
||||||
, finalSink([this](const unsigned char * data, size_t len) {
|
, finalSink([this](const unsigned char * data, size_t len) {
|
||||||
if (this->request.dataCallback)
|
if (this->request.dataCallback) {
|
||||||
|
writtenToSink += len;
|
||||||
this->request.dataCallback((char *) data, len);
|
this->request.dataCallback((char *) data, len);
|
||||||
else
|
} else
|
||||||
this->result.data->append((char *) data, len);
|
this->result.data->append((char *) data, len);
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
|
@ -151,6 +166,7 @@ struct CurlDownloader : public Downloader
|
||||||
status = ss.size() >= 2 ? ss[1] : "";
|
status = ss.size() >= 2 ? ss[1] : "";
|
||||||
result.data = std::make_shared<std::string>();
|
result.data = std::make_shared<std::string>();
|
||||||
result.bodySize = 0;
|
result.bodySize = 0;
|
||||||
|
acceptRanges = false;
|
||||||
encoding = "";
|
encoding = "";
|
||||||
} else {
|
} else {
|
||||||
auto i = line.find(':');
|
auto i = line.find(':');
|
||||||
|
@ -168,7 +184,9 @@ struct CurlDownloader : public Downloader
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
} else if (name == "content-encoding")
|
} else if (name == "content-encoding")
|
||||||
encoding = trim(string(line, i + 1));;
|
encoding = trim(string(line, i + 1));
|
||||||
|
else if (name == "accept-ranges" && toLower(trim(std::string(line, i + 1))) == "bytes")
|
||||||
|
acceptRanges = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return realSize;
|
return realSize;
|
||||||
|
@ -286,6 +304,9 @@ struct CurlDownloader : public Downloader
|
||||||
curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str());
|
curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str());
|
||||||
curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
|
curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
|
||||||
|
|
||||||
|
if (writtenToSink)
|
||||||
|
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
|
||||||
|
|
||||||
result.data = std::make_shared<std::string>();
|
result.data = std::make_shared<std::string>();
|
||||||
result.bodySize = 0;
|
result.bodySize = 0;
|
||||||
}
|
}
|
||||||
|
@ -320,7 +341,7 @@ struct CurlDownloader : public Downloader
|
||||||
failEx(writeException);
|
failEx(writeException);
|
||||||
|
|
||||||
else if (code == CURLE_OK &&
|
else if (code == CURLE_OK &&
|
||||||
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
|
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
|
||||||
{
|
{
|
||||||
result.cached = httpStatus == 304;
|
result.cached = httpStatus == 304;
|
||||||
done = true;
|
done = true;
|
||||||
|
@ -377,7 +398,9 @@ struct CurlDownloader : public Downloader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fail(
|
attempt++;
|
||||||
|
|
||||||
|
auto exc =
|
||||||
code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
|
code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
|
||||||
? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
|
? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
|
||||||
: httpStatus != 0
|
: httpStatus != 0
|
||||||
|
@ -388,15 +411,41 @@ struct CurlDownloader : public Downloader
|
||||||
)
|
)
|
||||||
: DownloadError(err,
|
: DownloadError(err,
|
||||||
fmt("unable to %s '%s': %s (%d)",
|
fmt("unable to %s '%s': %s (%d)",
|
||||||
request.verb(), request.uri, curl_easy_strerror(code), code)));
|
request.verb(), request.uri, curl_easy_strerror(code), code));
|
||||||
|
|
||||||
|
/* If this is a transient error, then maybe retry the
|
||||||
|
download after a while. If we're writing to a
|
||||||
|
sink, we can only retry if the server supports
|
||||||
|
ranged requests. */
|
||||||
|
if (err == Transient
|
||||||
|
&& attempt < request.tries
|
||||||
|
&& (!this->request.dataCallback
|
||||||
|
|| writtenToSink == 0
|
||||||
|
|| (acceptRanges && encoding.empty())))
|
||||||
|
{
|
||||||
|
int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937));
|
||||||
|
if (writtenToSink)
|
||||||
|
warn("%s; retrying from offset %d in %d ms", exc.what(), writtenToSink, ms);
|
||||||
|
else
|
||||||
|
warn("%s; retrying in %d ms", exc.what(), ms);
|
||||||
|
embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
|
||||||
|
downloader.enqueueItem(shared_from_this());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
fail(exc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct State
|
struct State
|
||||||
{
|
{
|
||||||
|
struct EmbargoComparator {
|
||||||
|
bool operator() (const std::shared_ptr<DownloadItem> & i1, const std::shared_ptr<DownloadItem> & i2) {
|
||||||
|
return i1->embargo > i2->embargo;
|
||||||
|
}
|
||||||
|
};
|
||||||
bool quit = false;
|
bool quit = false;
|
||||||
std::vector<std::shared_ptr<DownloadItem>> incoming;
|
std::priority_queue<std::shared_ptr<DownloadItem>, std::vector<std::shared_ptr<DownloadItem>>, EmbargoComparator> incoming;
|
||||||
};
|
};
|
||||||
|
|
||||||
Sync<State> state_;
|
Sync<State> state_;
|
||||||
|
@ -409,6 +458,7 @@ struct CurlDownloader : public Downloader
|
||||||
std::thread workerThread;
|
std::thread workerThread;
|
||||||
|
|
||||||
CurlDownloader()
|
CurlDownloader()
|
||||||
|
: mt19937(rd())
|
||||||
{
|
{
|
||||||
static std::once_flag globalInit;
|
static std::once_flag globalInit;
|
||||||
std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
|
std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
|
||||||
|
@ -502,7 +552,9 @@ struct CurlDownloader : public Downloader
|
||||||
|
|
||||||
nextWakeup = std::chrono::steady_clock::time_point();
|
nextWakeup = std::chrono::steady_clock::time_point();
|
||||||
|
|
||||||
/* Add new curl requests from the incoming requests queue. */
|
/* Add new curl requests from the incoming requests queue,
|
||||||
|
except for requests that are embargoed (waiting for a
|
||||||
|
retry timeout to expire). */
|
||||||
if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
|
if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
|
||||||
char buf[1024];
|
char buf[1024];
|
||||||
auto res = read(extraFDs[0].fd, buf, sizeof(buf));
|
auto res = read(extraFDs[0].fd, buf, sizeof(buf));
|
||||||
|
@ -511,9 +563,22 @@ struct CurlDownloader : public Downloader
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::shared_ptr<DownloadItem>> incoming;
|
std::vector<std::shared_ptr<DownloadItem>> incoming;
|
||||||
|
auto now = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
{
|
{
|
||||||
auto state(state_.lock());
|
auto state(state_.lock());
|
||||||
std::swap(state->incoming, incoming);
|
while (!state->incoming.empty()) {
|
||||||
|
auto item = state->incoming.top();
|
||||||
|
if (item->embargo <= now) {
|
||||||
|
incoming.push_back(item);
|
||||||
|
state->incoming.pop();
|
||||||
|
} else {
|
||||||
|
if (nextWakeup == std::chrono::steady_clock::time_point()
|
||||||
|
|| item->embargo < nextWakeup)
|
||||||
|
nextWakeup = item->embargo;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
quit = state->quit;
|
quit = state->quit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,12 +600,12 @@ struct CurlDownloader : public Downloader
|
||||||
workerThreadMain();
|
workerThreadMain();
|
||||||
} catch (nix::Interrupted & e) {
|
} catch (nix::Interrupted & e) {
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError(format("unexpected error in download thread: %s") % e.what());
|
printError("unexpected error in download thread: %s", e.what());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
auto state(state_.lock());
|
auto state(state_.lock());
|
||||||
state->incoming.clear();
|
while (!state->incoming.empty()) state->incoming.pop();
|
||||||
state->quit = true;
|
state->quit = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -556,7 +621,7 @@ struct CurlDownloader : public Downloader
|
||||||
auto state(state_.lock());
|
auto state(state_.lock());
|
||||||
if (state->quit)
|
if (state->quit)
|
||||||
throw nix::Error("cannot enqueue download request because the download thread is shutting down");
|
throw nix::Error("cannot enqueue download request because the download thread is shutting down");
|
||||||
state->incoming.push_back(item);
|
state->incoming.push(item);
|
||||||
}
|
}
|
||||||
writeFull(wakeupPipe.writeSide.get(), " ");
|
writeFull(wakeupPipe.writeSide.get(), " ");
|
||||||
}
|
}
|
||||||
|
@ -639,9 +704,7 @@ std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest &
|
||||||
|
|
||||||
DownloadResult Downloader::download(const DownloadRequest & request)
|
DownloadResult Downloader::download(const DownloadRequest & request)
|
||||||
{
|
{
|
||||||
return retry<DownloadResult>(request.tries, [&]() {
|
return enqueueDownload(request).get();
|
||||||
return enqueueDownload(request).get();
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Downloader::download(DownloadRequest && request, Sink & sink)
|
void Downloader::download(DownloadRequest && request, Sink & sink)
|
||||||
|
@ -828,7 +891,7 @@ CachedDownloadResult Downloader::downloadCached(
|
||||||
writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
|
writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
|
||||||
} catch (DownloadError & e) {
|
} catch (DownloadError & e) {
|
||||||
if (storePath.empty()) throw;
|
if (storePath.empty()) throw;
|
||||||
warn("%s; using cached result", e.msg());
|
warn("warning: %s; using cached result", e.msg());
|
||||||
result.etag = expectedETag;
|
result.etag = expectedETag;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -892,4 +955,5 @@ bool isUri(const string & s)
|
||||||
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
|
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,13 +99,11 @@ struct Downloader
|
||||||
|
|
||||||
std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
|
std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
|
||||||
|
|
||||||
/* Synchronously download a file. The request will be retried in
|
/* Synchronously download a file. */
|
||||||
case of transient failures. */
|
|
||||||
DownloadResult download(const DownloadRequest & request);
|
DownloadResult download(const DownloadRequest & request);
|
||||||
|
|
||||||
/* Download a file, writing its data to a sink. The sink will be
|
/* Download a file, writing its data to a sink. The sink will be
|
||||||
invoked on the thread of the caller. The request will not be
|
invoked on the thread of the caller. */
|
||||||
retried in case of transient failures. */
|
|
||||||
void download(DownloadRequest && request, Sink & sink);
|
void download(DownloadRequest && request, Sink & sink);
|
||||||
|
|
||||||
/* Check if the specified file is already in ~/.cache/nix/tarballs
|
/* Check if the specified file is already in ~/.cache/nix/tarballs
|
||||||
|
@ -131,11 +129,6 @@ public:
|
||||||
DownloadError(Downloader::Error error, const FormatOrString & fs)
|
DownloadError(Downloader::Error error, const FormatOrString & fs)
|
||||||
: Error(fs), error(error)
|
: Error(fs), error(error)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
bool isTransient() override
|
|
||||||
{
|
|
||||||
return error == Downloader::Error::Transient;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
bool isUri(const string & s);
|
bool isUri(const string & s);
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
#include "download.hh"
|
#include "download.hh"
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "nar-info-disk-cache.hh"
|
#include "nar-info-disk-cache.hh"
|
||||||
#include "retry.hh"
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -136,46 +135,21 @@ protected:
|
||||||
{
|
{
|
||||||
checkEnabled();
|
checkEnabled();
|
||||||
|
|
||||||
struct State
|
auto request(makeRequest(path));
|
||||||
{
|
|
||||||
DownloadRequest request;
|
|
||||||
std::function<void()> tryDownload;
|
|
||||||
unsigned int attempt = 0;
|
|
||||||
State(DownloadRequest && request) : request(request) {}
|
|
||||||
};
|
|
||||||
|
|
||||||
auto state = std::make_shared<State>(makeRequest(path));
|
getDownloader()->enqueueDownload(request,
|
||||||
|
{[callback, this](std::future<DownloadResult> result) {
|
||||||
state->tryDownload = [callback, state, this]() {
|
try {
|
||||||
getDownloader()->enqueueDownload(state->request,
|
callback(result.get().data);
|
||||||
{[callback, state, this](std::future<DownloadResult> result) {
|
} catch (DownloadError & e) {
|
||||||
try {
|
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||||
callback(result.get().data);
|
return callback(std::shared_ptr<std::string>());
|
||||||
} catch (DownloadError & e) {
|
maybeDisable();
|
||||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
callback.rethrow();
|
||||||
return callback(std::shared_ptr<std::string>());
|
} catch (...) {
|
||||||
++state->attempt;
|
callback.rethrow();
|
||||||
if (state->attempt < state->request.tries && e.isTransient()) {
|
}
|
||||||
auto ms = retrySleepTime(state->attempt);
|
}});
|
||||||
warn("%s; retrying in %d ms", e.what(), ms);
|
|
||||||
/* We can't sleep here because that would
|
|
||||||
block the download thread. So use a
|
|
||||||
separate thread for sleeping. */
|
|
||||||
std::thread([state, ms]() {
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
|
|
||||||
state->tryDownload();
|
|
||||||
}).detach();
|
|
||||||
} else {
|
|
||||||
maybeDisable();
|
|
||||||
callback.rethrow();
|
|
||||||
}
|
|
||||||
} catch (...) {
|
|
||||||
callback.rethrow();
|
|
||||||
}
|
|
||||||
}});
|
|
||||||
};
|
|
||||||
|
|
||||||
state->tryDownload();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -6,11 +6,10 @@
|
||||||
#include "thread-pool.hh"
|
#include "thread-pool.hh"
|
||||||
#include "json.hh"
|
#include "json.hh"
|
||||||
#include "derivations.hh"
|
#include "derivations.hh"
|
||||||
#include "retry.hh"
|
|
||||||
#include "download.hh"
|
|
||||||
|
|
||||||
#include <future>
|
#include <future>
|
||||||
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
@ -580,57 +579,54 @@ void Store::buildPaths(const PathSet & paths, BuildMode buildMode)
|
||||||
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
||||||
const Path & storePath, RepairFlag repair, CheckSigsFlag checkSigs)
|
const Path & storePath, RepairFlag repair, CheckSigsFlag checkSigs)
|
||||||
{
|
{
|
||||||
retry<void>(downloadSettings.tries, [&]() {
|
auto srcUri = srcStore->getUri();
|
||||||
|
auto dstUri = dstStore->getUri();
|
||||||
|
|
||||||
auto srcUri = srcStore->getUri();
|
Activity act(*logger, lvlInfo, actCopyPath,
|
||||||
auto dstUri = dstStore->getUri();
|
srcUri == "local" || srcUri == "daemon"
|
||||||
|
? fmt("copying path '%s' to '%s'", storePath, dstUri)
|
||||||
|
: dstUri == "local" || dstUri == "daemon"
|
||||||
|
? fmt("copying path '%s' from '%s'", storePath, srcUri)
|
||||||
|
: fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri),
|
||||||
|
{storePath, srcUri, dstUri});
|
||||||
|
PushActivity pact(act.id);
|
||||||
|
|
||||||
Activity act(*logger, lvlInfo, actCopyPath,
|
auto info = srcStore->queryPathInfo(storePath);
|
||||||
srcUri == "local" || srcUri == "daemon"
|
|
||||||
? fmt("copying path '%s' to '%s'", storePath, dstUri)
|
|
||||||
: dstUri == "local" || dstUri == "daemon"
|
|
||||||
? fmt("copying path '%s' from '%s'", storePath, srcUri)
|
|
||||||
: fmt("copying path '%s' from '%s' to '%s'", storePath, srcUri, dstUri),
|
|
||||||
{storePath, srcUri, dstUri});
|
|
||||||
PushActivity pact(act.id);
|
|
||||||
|
|
||||||
auto info = srcStore->queryPathInfo(storePath);
|
uint64_t total = 0;
|
||||||
|
|
||||||
uint64_t total = 0;
|
if (!info->narHash) {
|
||||||
|
StringSink sink;
|
||||||
|
srcStore->narFromPath({storePath}, sink);
|
||||||
|
auto info2 = make_ref<ValidPathInfo>(*info);
|
||||||
|
info2->narHash = hashString(htSHA256, *sink.s);
|
||||||
|
if (!info->narSize) info2->narSize = sink.s->size();
|
||||||
|
if (info->ultimate) info2->ultimate = false;
|
||||||
|
info = info2;
|
||||||
|
|
||||||
if (!info->narHash) {
|
StringSource source(*sink.s);
|
||||||
StringSink sink;
|
dstStore->addToStore(*info, source, repair, checkSigs);
|
||||||
srcStore->narFromPath({storePath}, sink);
|
return;
|
||||||
auto info2 = make_ref<ValidPathInfo>(*info);
|
}
|
||||||
info2->narHash = hashString(htSHA256, *sink.s);
|
|
||||||
if (!info->narSize) info2->narSize = sink.s->size();
|
|
||||||
if (info->ultimate) info2->ultimate = false;
|
|
||||||
info = info2;
|
|
||||||
|
|
||||||
StringSource source(*sink.s);
|
if (info->ultimate) {
|
||||||
dstStore->addToStore(*info, source, repair, checkSigs);
|
auto info2 = make_ref<ValidPathInfo>(*info);
|
||||||
return;
|
info2->ultimate = false;
|
||||||
}
|
info = info2;
|
||||||
|
}
|
||||||
|
|
||||||
if (info->ultimate) {
|
auto source = sinkToSource([&](Sink & sink) {
|
||||||
auto info2 = make_ref<ValidPathInfo>(*info);
|
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
|
||||||
info2->ultimate = false;
|
sink(data, len);
|
||||||
info = info2;
|
total += len;
|
||||||
}
|
act.progress(total, info->narSize);
|
||||||
|
|
||||||
auto source = sinkToSource([&](Sink & sink) {
|
|
||||||
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
|
|
||||||
sink(data, len);
|
|
||||||
total += len;
|
|
||||||
act.progress(total, info->narSize);
|
|
||||||
});
|
|
||||||
srcStore->narFromPath({storePath}, wrapperSink);
|
|
||||||
}, [&]() {
|
|
||||||
throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
|
|
||||||
});
|
});
|
||||||
|
srcStore->narFromPath({storePath}, wrapperSink);
|
||||||
dstStore->addToStore(*info, *source, repair, checkSigs);
|
}, [&]() {
|
||||||
|
throw EndOfFile("NAR for '%s' fetched from '%s' is incomplete", storePath, srcStore->getUri());
|
||||||
});
|
});
|
||||||
|
|
||||||
|
dstStore->addToStore(*info, *source, repair, checkSigs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "logging.hh"
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <cmath>
|
|
||||||
#include <random>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
namespace nix {
|
|
||||||
|
|
||||||
inline unsigned int retrySleepTime(unsigned int attempt)
|
|
||||||
{
|
|
||||||
std::random_device rd;
|
|
||||||
std::mt19937 mt19937;
|
|
||||||
return 250.0 * std::pow(2.0f,
|
|
||||||
attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(mt19937));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename C>
|
|
||||||
C retry(unsigned int attempts, std::function<C()> && f)
|
|
||||||
{
|
|
||||||
unsigned int attempt = 0;
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
return f();
|
|
||||||
} catch (BaseError & e) {
|
|
||||||
++attempt;
|
|
||||||
if (attempt >= attempts || !e.isTransient())
|
|
||||||
throw;
|
|
||||||
auto ms = retrySleepTime(attempt);
|
|
||||||
warn("%s; retrying in %d ms", e.what(), ms);
|
|
||||||
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -109,8 +109,6 @@ public:
|
||||||
const string & msg() const { return err; }
|
const string & msg() const { return err; }
|
||||||
const string & prefix() const { return prefix_; }
|
const string & prefix() const { return prefix_; }
|
||||||
BaseError & addPrefix(const FormatOrString & fs);
|
BaseError & addPrefix(const FormatOrString & fs);
|
||||||
|
|
||||||
virtual bool isTransient() { return false; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define MakeError(newClass, superClass) \
|
#define MakeError(newClass, superClass) \
|
||||||
|
|
|
@ -36,7 +36,7 @@ struct CmdCopy : StorePathsCommand
|
||||||
.set(&checkSigs, NoCheckSigs);
|
.set(&checkSigs, NoCheckSigs);
|
||||||
|
|
||||||
mkFlag()
|
mkFlag()
|
||||||
.longName("substitute")
|
.longName("substitute-on-destination")
|
||||||
.shortName('s')
|
.shortName('s')
|
||||||
.description("whether to try substitutes on the destination store (only supported by SSH)")
|
.description("whether to try substitutes on the destination store (only supported by SSH)")
|
||||||
.set(&substitute, Substitute);
|
.set(&substitute, Substitute);
|
||||||
|
|
Loading…
Reference in a new issue