mirror of
https://github.com/privatevoid-net/nix-super.git
synced 2024-11-22 14:06:16 +02:00
s3 binary cache: support specifying an endpoint
Works for uploading and not downloading.
This commit is contained in:
parent
3193f5ff3e
commit
49a53c1d3f
5 changed files with 33 additions and 8 deletions
|
@ -118,12 +118,29 @@ fetch prebuilt binaries from <uri>cache.nixos.org</uri>.</para>
|
|||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><literal>endpoint</literal></term>
|
||||
<listitem>
|
||||
<para>
|
||||
The URL to your S3-compatible service, for when not using
|
||||
Amazon S3. Do not specify this value if you're using Amazon
|
||||
S3.
|
||||
</para>
|
||||
<note><para>This endpoint must support HTTPS and will use
|
||||
path-based addressing instead of virtual host based
|
||||
addressing.</para></note>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
|
||||
<example><title>Uploading with non-default credential profile for Amazon S3</title>
|
||||
<para><command>nix copy --to ssh://machine nixpkgs.hello s3://example-bucket?profile=cache-upload</command></para>
|
||||
</example>
|
||||
|
||||
<example><title>Uploading to an S3-Compatible Binary Cache</title>
|
||||
<para><command>nix copy --to ssh://machine nixpkgs.hello s3://example-bucket?profile=cache-upload&endpoint=minio.example.com</command></para>
|
||||
</example>
|
||||
|
||||
<para>The user writing to the bucket will need to perform the
|
||||
following actions against the bucket:</para>
|
||||
|
||||
|
|
|
@ -598,7 +598,7 @@ struct CurlDownloader : public Downloader
|
|||
// FIXME: do this on a worker thread
|
||||
try {
|
||||
#ifdef ENABLE_S3
|
||||
S3Helper s3Helper("", Aws::Region::US_EAST_1); // FIXME: make configurable
|
||||
S3Helper s3Helper("", Aws::Region::US_EAST_1, ""); // FIXME: make configurable
|
||||
auto slash = request.uri.find('/', 5);
|
||||
if (slash == std::string::npos)
|
||||
throw nix::Error("bad S3 URI '%s'", request.uri);
|
||||
|
|
|
@ -84,8 +84,8 @@ static void initAWS()
|
|||
});
|
||||
}
|
||||
|
||||
S3Helper::S3Helper(const std::string & profile, const std::string & region)
|
||||
: config(makeConfig(region))
|
||||
S3Helper::S3Helper(const std::string & profile, const std::string & region, const std::string & endpoint)
|
||||
: config(makeConfig(region, endpoint))
|
||||
, client(make_ref<Aws::S3::S3Client>(
|
||||
profile == ""
|
||||
? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
|
||||
|
@ -99,7 +99,7 @@ S3Helper::S3Helper(const std::string & profile, const std::string & region)
|
|||
#else
|
||||
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
|
||||
#endif
|
||||
false))
|
||||
endpoint.empty()))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -116,11 +116,14 @@ class RetryStrategy : public Aws::Client::DefaultRetryStrategy
|
|||
}
|
||||
};
|
||||
|
||||
ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region)
|
||||
ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region, const string & endpoint)
|
||||
{
|
||||
initAWS();
|
||||
auto res = make_ref<Aws::Client::ClientConfiguration>();
|
||||
res->region = region;
|
||||
if (!endpoint.empty()) {
|
||||
res->endpointOverride = endpoint;
|
||||
}
|
||||
res->requestTimeoutMs = 600 * 1000;
|
||||
res->retryStrategy = std::make_shared<RetryStrategy>();
|
||||
res->caFile = settings.caFile;
|
||||
|
@ -170,6 +173,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
{
|
||||
const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."};
|
||||
const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
|
||||
const Setting<std::string> endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
|
||||
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
|
||||
const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
|
||||
const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
|
||||
|
@ -186,7 +190,7 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
|||
const Params & params, const std::string & bucketName)
|
||||
: S3BinaryCacheStore(params)
|
||||
, bucketName(bucketName)
|
||||
, s3Helper(profile, region)
|
||||
, s3Helper(profile, region, endpoint)
|
||||
{
|
||||
diskCache = getNarInfoDiskCache();
|
||||
}
|
||||
|
|
|
@ -14,9 +14,9 @@ struct S3Helper
|
|||
ref<Aws::Client::ClientConfiguration> config;
|
||||
ref<Aws::S3::S3Client> client;
|
||||
|
||||
S3Helper(const std::string & profile, const std::string & region);
|
||||
S3Helper(const std::string & profile, const std::string & region, const std::string & endpoint);
|
||||
|
||||
ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region);
|
||||
ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & endpoint);
|
||||
|
||||
struct DownloadResult
|
||||
{
|
||||
|
|
|
@ -72,6 +72,10 @@ struct CmdCopy : StorePathsCommand
|
|||
"To populate the current folder build output to a S3 binary cache:",
|
||||
"nix copy --to s3://my-bucket?region=eu-west-1"
|
||||
},
|
||||
Example{
|
||||
"To populate the current folder build output to an S3-compatible binary cache:",
|
||||
"nix copy --to s3://my-bucket?region=eu-west-1&endpoint=example.com"
|
||||
},
|
||||
#endif
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue