Compare commits
1 commit
master
...
pr-flake-u
Author | SHA1 | Date | |
---|---|---|---|
46164098c2 |
35 changed files with 675 additions and 293 deletions
|
@ -3,14 +3,12 @@
|
||||||
{
|
{
|
||||||
hostLinks = lib.pipe config.services [
|
hostLinks = lib.pipe config.services [
|
||||||
(lib.filterAttrs (_: svc: svc.meshLinks != {}))
|
(lib.filterAttrs (_: svc: svc.meshLinks != {}))
|
||||||
(lib.mapAttrsToList (svcName: svc:
|
(lib.mapAttrsToList (svcName: svc: lib.mapAttrsToList (name: cfg: lib.genAttrs svc.nodes.${name} (hostName: {
|
||||||
lib.mapAttrsToList (groupName: links:
|
${cfg.name} = { ... }: {
|
||||||
lib.genAttrs svc.nodes.${groupName} (hostName: lib.mapAttrs (_: cfg: { ... }: {
|
imports = [ cfg.link ];
|
||||||
imports = [ cfg.link ];
|
ipv4 = config.vars.mesh.${hostName}.meshIp;
|
||||||
ipv4 = config.vars.mesh.${hostName}.meshIp;
|
};
|
||||||
}) links)
|
})) svc.meshLinks))
|
||||||
) svc.meshLinks
|
|
||||||
))
|
|
||||||
(map lib.mkMerge)
|
(map lib.mkMerge)
|
||||||
lib.mkMerge
|
lib.mkMerge
|
||||||
];
|
];
|
||||||
|
|
|
@ -38,8 +38,12 @@ in
|
||||||
};
|
};
|
||||||
meshLinks = mkOption {
|
meshLinks = mkOption {
|
||||||
description = "Create host links on the mesh network.";
|
description = "Create host links on the mesh network.";
|
||||||
type = types.attrsOf (types.attrsOf (types.submodule {
|
type = types.attrsOf (types.submodule ({ name, ... }: {
|
||||||
options = {
|
options = {
|
||||||
|
name = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
default = "${serviceName}-${name}";
|
||||||
|
};
|
||||||
link = mkOption {
|
link = mkOption {
|
||||||
type = types.deferredModule;
|
type = types.deferredModule;
|
||||||
default = {};
|
default = {};
|
||||||
|
|
|
@ -74,7 +74,7 @@ in
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Restart = "on-failure";
|
Restart = "on-failure";
|
||||||
RestartMaxDelaySec = 30;
|
RestartMaxDelaySec = 30;
|
||||||
RestartSteps = 5;
|
RestartStesp = 5;
|
||||||
RestartMode = "direct";
|
RestartMode = "direct";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -16,7 +16,10 @@
|
||||||
./nar-serve.nix
|
./nar-serve.nix
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
meshLinks.server.attic.link.protocol = "http";
|
meshLinks.server = {
|
||||||
|
name = "attic";
|
||||||
|
link.protocol = "http";
|
||||||
|
};
|
||||||
secrets = let
|
secrets = let
|
||||||
inherit (config.services.attic) nodes;
|
inherit (config.services.attic) nodes;
|
||||||
in {
|
in {
|
||||||
|
|
|
@ -9,13 +9,17 @@ let
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
|
imports = [
|
||||||
|
depot.inputs.attic.nixosModules.atticd
|
||||||
|
];
|
||||||
|
|
||||||
services.locksmith.waitForSecrets.atticd = [ "garage-attic" ];
|
services.locksmith.waitForSecrets.atticd = [ "garage-attic" ];
|
||||||
|
|
||||||
services.atticd = {
|
services.atticd = {
|
||||||
enable = true;
|
enable = true;
|
||||||
package = depot.inputs.attic.packages.attic-server;
|
package = depot.inputs.attic.packages.attic-server;
|
||||||
|
|
||||||
environmentFile = secrets.serverToken.path;
|
credentialsFile = secrets.serverToken.path;
|
||||||
mode = if isMonolith then "monolithic" else "api-server";
|
mode = if isMonolith then "monolithic" else "api-server";
|
||||||
|
|
||||||
settings = {
|
settings = {
|
||||||
|
@ -65,7 +69,6 @@ in
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
DynamicUser = lib.mkForce false;
|
DynamicUser = lib.mkForce false;
|
||||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
|
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
|
||||||
SystemCallFilter = lib.mkAfter [ "@resources" ];
|
|
||||||
};
|
};
|
||||||
environment = {
|
environment = {
|
||||||
AWS_SHARED_CREDENTIALS_FILE = "/run/locksmith/garage-attic";
|
AWS_SHARED_CREDENTIALS_FILE = "/run/locksmith/garage-attic";
|
||||||
|
@ -77,7 +80,6 @@ in
|
||||||
mode = if isMonolith then "manual" else "direct";
|
mode = if isMonolith then "manual" else "direct";
|
||||||
definition = {
|
definition = {
|
||||||
name = "atticd";
|
name = "atticd";
|
||||||
id = "atticd-${config.services.atticd.mode}";
|
|
||||||
address = link.ipv4;
|
address = link.ipv4;
|
||||||
inherit (link) port;
|
inherit (link) port;
|
||||||
checks = [
|
checks = [
|
||||||
|
|
|
@ -4,7 +4,10 @@
|
||||||
services.forge = {
|
services.forge = {
|
||||||
nodes.server = [ "VEGAS" ];
|
nodes.server = [ "VEGAS" ];
|
||||||
nixos.server = ./server.nix;
|
nixos.server = ./server.nix;
|
||||||
meshLinks.server.forge.link.protocol = "http";
|
meshLinks.server = {
|
||||||
|
name = "forge";
|
||||||
|
link.protocol = "http";
|
||||||
|
};
|
||||||
secrets = with config.services.forge.nodes; {
|
secrets = with config.services.forge.nodes; {
|
||||||
oidcSecret = {
|
oidcSecret = {
|
||||||
nodes = server;
|
nodes = server;
|
||||||
|
|
|
@ -33,9 +33,6 @@ in
|
||||||
ldapbindaddress = "${ldapLink.ipv4}:${ldapLink.portStr}";
|
ldapbindaddress = "${ldapLink.ipv4}:${ldapLink.portStr}";
|
||||||
origin = frontendLink.url;
|
origin = frontendLink.url;
|
||||||
inherit domain;
|
inherit domain;
|
||||||
online_backup = {
|
|
||||||
versions = 7;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,10 @@
|
||||||
io-tweaks = [ "VEGAS" ];
|
io-tweaks = [ "VEGAS" ];
|
||||||
remote-api = [ "VEGAS" ];
|
remote-api = [ "VEGAS" ];
|
||||||
};
|
};
|
||||||
meshLinks.gateway.ipfsGateway.link.protocol = "http";
|
meshLinks.gateway = {
|
||||||
|
name = "ipfsGateway";
|
||||||
|
link.protocol = "http";
|
||||||
|
};
|
||||||
nixos = {
|
nixos = {
|
||||||
node = [
|
node = [
|
||||||
./node.nix
|
./node.nix
|
||||||
|
|
|
@ -9,7 +9,7 @@ in
|
||||||
environment = {
|
environment = {
|
||||||
OTEL_TRACES_EXPORTER = "otlp";
|
OTEL_TRACES_EXPORTER = "otlp";
|
||||||
OTEL_EXPORTER_OTLP_PROTOCOL = "grpc";
|
OTEL_EXPORTER_OTLP_PROTOCOL = "grpc";
|
||||||
OTEL_EXPORTER_OTLP_ENDPOINT = "${cluster.config.ways.ingest-traces-otlp.url}:443";
|
OTEL_EXPORTER_OTLP_ENDPOINT = cluster.config.links.tempo-otlp-grpc.url;
|
||||||
OTEL_TRACES_SAMPLER = "parentbased_traceidratio";
|
OTEL_TRACES_SAMPLER = "parentbased_traceidratio";
|
||||||
OTEL_TRACES_SAMPLER_ARG = "0.50";
|
OTEL_TRACES_SAMPLER_ARG = "0.50";
|
||||||
};
|
};
|
||||||
|
|
|
@ -26,7 +26,7 @@ in {
|
||||||
name = "logging";
|
name = "logging";
|
||||||
positions.filename = "\${STATE_DIRECTORY:/tmp}/logging-positions.yaml";
|
positions.filename = "\${STATE_DIRECTORY:/tmp}/logging-positions.yaml";
|
||||||
clients = singleton {
|
clients = singleton {
|
||||||
url = "${cluster.config.ways.ingest-logs.url}/loki/api/v1/push";
|
url = "${cluster.config.ways.monitoring-logs.url}/loki/api/v1/push";
|
||||||
};
|
};
|
||||||
scrape_configs = singleton {
|
scrape_configs = singleton {
|
||||||
job_name = "journal";
|
job_name = "journal";
|
||||||
|
|
|
@ -18,6 +18,26 @@ in
|
||||||
protocol = "http";
|
protocol = "http";
|
||||||
ipv4 = meshIpFor "server";
|
ipv4 = meshIpFor "server";
|
||||||
};
|
};
|
||||||
|
tempo = {
|
||||||
|
protocol = "http";
|
||||||
|
ipv4 = meshIpFor "server";
|
||||||
|
};
|
||||||
|
tempo-grpc = {
|
||||||
|
protocol = "http";
|
||||||
|
ipv4 = "127.0.0.1";
|
||||||
|
};
|
||||||
|
tempo-otlp-http = {
|
||||||
|
protocol = "http";
|
||||||
|
ipv4 = meshIpFor "server";
|
||||||
|
};
|
||||||
|
tempo-otlp-grpc = {
|
||||||
|
protocol = "http";
|
||||||
|
ipv4 = meshIpFor "server";
|
||||||
|
};
|
||||||
|
tempo-zipkin-http = {
|
||||||
|
protocol = "http";
|
||||||
|
ipv4 = meshIpFor "server";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
hostLinks = lib.genAttrs config.services.monitoring.nodes.grafana (name: {
|
hostLinks = lib.genAttrs config.services.monitoring.nodes.grafana (name: {
|
||||||
grafana = {
|
grafana = {
|
||||||
|
@ -31,7 +51,6 @@ in
|
||||||
blackbox = [ "checkmate" "grail" "prophet" ];
|
blackbox = [ "checkmate" "grail" "prophet" ];
|
||||||
grafana = [ "VEGAS" "prophet" ];
|
grafana = [ "VEGAS" "prophet" ];
|
||||||
logging = [ "VEGAS" "grail" ];
|
logging = [ "VEGAS" "grail" ];
|
||||||
tracing = [ "VEGAS" "grail" ];
|
|
||||||
server = [ "VEGAS" ];
|
server = [ "VEGAS" ];
|
||||||
};
|
};
|
||||||
nixos = {
|
nixos = {
|
||||||
|
@ -42,19 +61,14 @@ in
|
||||||
./provisioning/dashboards.nix
|
./provisioning/dashboards.nix
|
||||||
];
|
];
|
||||||
logging = ./logging.nix;
|
logging = ./logging.nix;
|
||||||
tracing = ./tracing.nix;
|
|
||||||
server = [
|
server = [
|
||||||
./server.nix
|
./server.nix
|
||||||
|
./tracing.nix
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
meshLinks = {
|
meshLinks.logging = {
|
||||||
logging.loki.link.protocol = "http";
|
name = "loki";
|
||||||
tracing = {
|
link.protocol = "http";
|
||||||
tempo.link.protocol = "http";
|
|
||||||
tempo-otlp-http.link.protocol = "http";
|
|
||||||
tempo-otlp-grpc.link.protocol = "grpc";
|
|
||||||
tempo-zipkin-http.link.protocol = "http";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -68,51 +82,29 @@ in
|
||||||
nodes = config.services.monitoring.nodes.logging;
|
nodes = config.services.monitoring.nodes.logging;
|
||||||
format = "envFile";
|
format = "envFile";
|
||||||
};
|
};
|
||||||
tempo-ingest.locksmith = {
|
tempo = { };
|
||||||
nodes = config.services.monitoring.nodes.tracing;
|
|
||||||
format = "envFile";
|
|
||||||
};
|
|
||||||
tempo-query.locksmith = {
|
|
||||||
nodes = config.services.monitoring.nodes.tracing;
|
|
||||||
format = "envFile";
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
buckets = {
|
buckets = {
|
||||||
loki-chunks.allow = {
|
loki-chunks.allow = {
|
||||||
loki-ingest = [ "read" "write" ];
|
loki-ingest = [ "read" "write" ];
|
||||||
loki-query = [ "read" ];
|
loki-query = [ "read" ];
|
||||||
};
|
};
|
||||||
tempo-chunks.allow = {
|
tempo-chunks.allow.tempo = [ "read" "write" ];
|
||||||
tempo-ingest = [ "read" "write" ];
|
|
||||||
tempo-query = [ "read" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
ways = let
|
ways = config.lib.forService "monitoring" {
|
||||||
query = consulService: {
|
monitoring = {
|
||||||
inherit consulService;
|
consulService = "grafana";
|
||||||
internal = true;
|
extras.locations."/".proxyWebsockets = true;
|
||||||
extras.extraConfig = ''
|
|
||||||
proxy_read_timeout 3600s;
|
|
||||||
'';
|
|
||||||
};
|
};
|
||||||
ingest = consulService: {
|
monitoring-logs = {
|
||||||
inherit consulService;
|
|
||||||
internal = true;
|
internal = true;
|
||||||
|
consulService = "loki";
|
||||||
extras.extraConfig = ''
|
extras.extraConfig = ''
|
||||||
client_max_body_size 4G;
|
client_max_body_size 4G;
|
||||||
proxy_read_timeout 3600s;
|
proxy_read_timeout 3600s;
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
in config.lib.forService "monitoring" {
|
|
||||||
monitoring = {
|
|
||||||
consulService = "grafana";
|
|
||||||
extras.locations."/".proxyWebsockets = true;
|
|
||||||
};
|
|
||||||
monitoring-logs = query "loki";
|
|
||||||
monitoring-traces = query "tempo";
|
|
||||||
ingest-logs = ingest "loki";
|
|
||||||
ingest-traces-otlp = ingest "tempo-ingest-otlp-grpc" // { grpc = true; };
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,16 +73,6 @@ in
|
||||||
inherit (cluster.config.ways.monitoring-logs) url;
|
inherit (cluster.config.ways.monitoring-logs) url;
|
||||||
type = "loki";
|
type = "loki";
|
||||||
}
|
}
|
||||||
{
|
|
||||||
name = "Tempo";
|
|
||||||
uid = "P214B5B846CF3925F";
|
|
||||||
inherit (cluster.config.ways.monitoring-traces) url;
|
|
||||||
type = "tempo";
|
|
||||||
jsonData = {
|
|
||||||
serviceMap.datasourceUid = "PBFA97CFB590B2093";
|
|
||||||
nodeGraph.enabled = true;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
16
cluster/services/monitoring/secrets/tempo-secrets.age
Normal file
16
cluster/services/monitoring/secrets/tempo-secrets.age
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 NO562A KhCGp7PAMGrEdzRxBrsW4tRk30JwpI+4lPzrRCUhSw4
|
||||||
|
8s7WqA5c3zS1euN5R+jfFNBdvr8OQW8P4NFeqtNsIKo
|
||||||
|
-> ssh-ed25519 5/zT0w 79hJQ2H76EZTW7YcQFCtKaS5Kbssx4Z8dPFjIVzRgFk
|
||||||
|
A1fDJbUnyIRy+kWa3PhJNj/SdRPlcEy6FYsAfnuZ2AQ
|
||||||
|
-> ssh-ed25519 d3WGuA aylkdL1KliM1NfrYDGlG8X6YjXvVUCU4sV90I+a840U
|
||||||
|
6sXdqIPjtoNSylZRh1DCghHOwDo+fC7WB4QWQoWmG48
|
||||||
|
-> //gd+2-grease baUWA$3 z-qs3W O/2.1W
|
||||||
|
Sfq3+rkMJhpUTTmcos5TaaUtX2Ip9pciHAZLiWPix+C9N7ccac/1W5RNedMJCLsq
|
||||||
|
MQ+xKzexf8+hgNVhKOksvbKBBROXqk1bUOKk8w3OgFPmmByzmCBUwkdkeu5DFTYR
|
||||||
|
rg
|
||||||
|
--- kUl1uIPRkM5y7C68kdN22pMKXP7gazyha4PE+ap0Jqw
|
||||||
|
w>×Àè¥
|
||||||
|
<15>CÈ,\‰ßœI¯ˆúHxG@^Çá“PåÃþÙÏlw6µŽ{þ’rb‘é5æ†T>Êñ
|
||||||
|
ÚWܤX4Kp(ß?9ˆß^^oP3f </v3N$ê¤sÓbŽ¾>O™÷œ+òN0άïµàDtêŽ5Vº#è ¶³‘Uã îŸ#y|›@ŒGzSi»ô*·HùüŽ]
|
||||||
|
ꎀ5›
|
|
@ -1,16 +1,14 @@
|
||||||
{ cluster, config, pkgs, ... }:
|
{ cluster, config, pkgs, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
inherit (cluster.config.links) prometheus-ingest;
|
inherit (cluster.config) links;
|
||||||
inherit (config.links) tempo-grpc;
|
|
||||||
links = cluster.config.hostLinks.${config.networking.hostName};
|
|
||||||
dataDir = "/srv/storage/private/tempo";
|
dataDir = "/srv/storage/private/tempo";
|
||||||
tempoConfig = {
|
tempoConfig = {
|
||||||
server = {
|
server = {
|
||||||
http_listen_address = links.tempo.ipv4;
|
http_listen_address = links.tempo.ipv4;
|
||||||
http_listen_port = links.tempo.port;
|
http_listen_port = links.tempo.port;
|
||||||
grpc_listen_address = tempo-grpc.ipv4;
|
grpc_listen_address = links.tempo-grpc.ipv4;
|
||||||
grpc_listen_port = tempo-grpc.port;
|
grpc_listen_port = links.tempo-grpc.port;
|
||||||
};
|
};
|
||||||
distributor.receivers = {
|
distributor.receivers = {
|
||||||
otlp = {
|
otlp = {
|
||||||
|
@ -21,7 +19,7 @@ let
|
||||||
};
|
};
|
||||||
zipkin.endpoint = links.tempo-zipkin-http.tuple;
|
zipkin.endpoint = links.tempo-zipkin-http.tuple;
|
||||||
};
|
};
|
||||||
querier.frontend_worker.frontend_address = tempo-grpc.tuple;
|
querier.frontend_worker.frontend_address = links.tempo-grpc.tuple;
|
||||||
ingester = {
|
ingester = {
|
||||||
trace_idle_period = "30s";
|
trace_idle_period = "30s";
|
||||||
max_block_bytes = 1000000;
|
max_block_bytes = 1000000;
|
||||||
|
@ -58,7 +56,7 @@ let
|
||||||
path = "${dataDir}/generator/wal";
|
path = "${dataDir}/generator/wal";
|
||||||
remote_write = [
|
remote_write = [
|
||||||
{
|
{
|
||||||
url = "${prometheus-ingest.url}/api/v1/write";
|
url = "${links.prometheus-ingest.url}/api/v1/write";
|
||||||
send_exemplars = true;
|
send_exemplars = true;
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
|
@ -70,11 +68,7 @@ let
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
in {
|
in {
|
||||||
links.tempo-grpc.protocol = "http";
|
age.secrets.tempoSecrets.file = ./secrets/tempo-secrets.age;
|
||||||
|
|
||||||
services.locksmith.waitForSecrets.tempo = [
|
|
||||||
"garage-tempo-ingest"
|
|
||||||
];
|
|
||||||
|
|
||||||
users.users.tempo = {
|
users.users.tempo = {
|
||||||
isSystemUser = true;
|
isSystemUser = true;
|
||||||
|
@ -87,53 +81,24 @@ in {
|
||||||
|
|
||||||
systemd.services.tempo = {
|
systemd.services.tempo = {
|
||||||
wantedBy = [ "multi-user.target" ];
|
wantedBy = [ "multi-user.target" ];
|
||||||
distributed = {
|
|
||||||
enable = true;
|
|
||||||
registerServices = [
|
|
||||||
"tempo"
|
|
||||||
"tempo-ingest-otlp-grpc"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
User = "tempo";
|
User = "tempo";
|
||||||
Group = "tempo";
|
Group = "tempo";
|
||||||
ExecStart = "${pkgs.tempo}/bin/tempo -config.file=${pkgs.writeText "tempo.yaml" (builtins.toJSON tempoConfig)}";
|
ExecStart = "${pkgs.tempo}/bin/tempo -config.file=${pkgs.writeText "tempo.yaml" (builtins.toJSON tempoConfig)}";
|
||||||
PrivateTmp = true;
|
PrivateTmp = true;
|
||||||
EnvironmentFile = "/run/locksmith/garage-tempo-ingest";
|
EnvironmentFile = config.age.secrets.tempoSecrets.path;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
services.grafana.provision.datasources.settings.datasources = [
|
||||||
consul.services = {
|
{
|
||||||
tempo = {
|
name = "Tempo";
|
||||||
mode = "manual";
|
uid = "P214B5B846CF3925F";
|
||||||
definition = {
|
inherit (links.tempo) url;
|
||||||
name = "tempo";
|
type = "tempo";
|
||||||
address = links.tempo.ipv4;
|
jsonData = {
|
||||||
inherit (links.tempo) port;
|
serviceMap.datasourceUid = "PBFA97CFB590B2093"; # prometheus
|
||||||
checks = [
|
nodeGraph.enabled = true;
|
||||||
{
|
|
||||||
name = "Tempo";
|
|
||||||
id = "service:tempo:backend";
|
|
||||||
interval = "5s";
|
|
||||||
http = "${links.tempo.url}/ready";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
};
|
||||||
};
|
}
|
||||||
tempo-ingest-otlp-grpc = {
|
];
|
||||||
mode = "manual";
|
|
||||||
definition = {
|
|
||||||
name = "tempo-ingest-otlp-grpc";
|
|
||||||
address = links.tempo-otlp-grpc.ipv4;
|
|
||||||
inherit (links.tempo-otlp-grpc) port;
|
|
||||||
checks = [
|
|
||||||
{
|
|
||||||
name = "Tempo Service Status";
|
|
||||||
id = "service:tempo-ingest-otlp-grpc:tempo";
|
|
||||||
alias_service = "tempo";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
{ config, depot, ... }:
|
{ depot, ... }:
|
||||||
|
|
||||||
{
|
{
|
||||||
services.sso = {
|
services.sso = {
|
||||||
|
@ -18,12 +18,4 @@
|
||||||
login.target = ssoAddr;
|
login.target = ssoAddr;
|
||||||
account.target = ssoAddr;
|
account.target = ssoAddr;
|
||||||
};
|
};
|
||||||
|
|
||||||
patroni = config.lib.forService "sso" {
|
|
||||||
databases.keycloak = {};
|
|
||||||
users.keycloak.locksmith = {
|
|
||||||
nodes = config.services.sso.nodes.host;
|
|
||||||
format = "raw";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,10 +8,12 @@ in
|
||||||
{
|
{
|
||||||
links.keycloak.protocol = "http";
|
links.keycloak.protocol = "http";
|
||||||
|
|
||||||
services.locksmith.waitForSecrets.keycloak = [
|
age.secrets.keycloak-dbpass = {
|
||||||
"patroni-keycloak"
|
file = ../../../secrets/keycloak-dbpass.age;
|
||||||
];
|
owner = "root";
|
||||||
|
group = "root";
|
||||||
|
mode = "0400";
|
||||||
|
};
|
||||||
services.nginx.virtualHosts = {
|
services.nginx.virtualHosts = {
|
||||||
"${login}" = lib.recursiveUpdate (vhosts.proxy kc.url) {
|
"${login}" = lib.recursiveUpdate (vhosts.proxy kc.url) {
|
||||||
locations = {
|
locations = {
|
||||||
|
@ -34,14 +36,13 @@ in
|
||||||
host = patroni.ipv4;
|
host = patroni.ipv4;
|
||||||
inherit (patroni) port;
|
inherit (patroni) port;
|
||||||
useSSL = false;
|
useSSL = false;
|
||||||
passwordFile = "/run/locksmith/patroni-keycloak";
|
passwordFile = config.age.secrets.keycloak-dbpass.path;
|
||||||
};
|
};
|
||||||
settings = {
|
settings = {
|
||||||
http-enabled = true;
|
|
||||||
http-host = kc.ipv4;
|
http-host = kc.ipv4;
|
||||||
http-port = kc.port;
|
http-port = kc.port;
|
||||||
hostname = login;
|
hostname = login;
|
||||||
proxy-headers = "xforwarded";
|
proxy = "edge";
|
||||||
# for backcompat, TODO: remove
|
# for backcompat, TODO: remove
|
||||||
http-relative-path = "/auth";
|
http-relative-path = "/auth";
|
||||||
};
|
};
|
||||||
|
@ -53,7 +54,7 @@ in
|
||||||
"-Dotel.traces.exporter=otlp"
|
"-Dotel.traces.exporter=otlp"
|
||||||
];
|
];
|
||||||
OTEL_EXPORTER_OTLP_PROTOCOL = "grpc";
|
OTEL_EXPORTER_OTLP_PROTOCOL = "grpc";
|
||||||
OTEL_EXPORTER_OTLP_ENDPOINT = cluster.config.ways.ingest-traces-otlp.url;
|
OTEL_EXPORTER_OTLP_ENDPOINT = cluster.config.links.tempo-otlp-grpc.url;
|
||||||
OTEL_TRACES_SAMPLER = "parentbased_traceidratio";
|
OTEL_TRACES_SAMPLER = "parentbased_traceidratio";
|
||||||
OTEL_TRACES_SAMPLER_ARG = "0.50";
|
OTEL_TRACES_SAMPLER_ARG = "0.50";
|
||||||
};
|
};
|
||||||
|
|
|
@ -56,7 +56,7 @@ in
|
||||||
};
|
};
|
||||||
simulacrum = {
|
simulacrum = {
|
||||||
enable = true;
|
enable = true;
|
||||||
deps = [ "wireguard" "consul" "locksmith" "dns" "incandescence" "ways" ];
|
deps = [ "wireguard" "consul" "locksmith" "dns" "incandescence" ];
|
||||||
settings = ./simulacrum/test.nix;
|
settings = ./simulacrum/test.nix;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -36,9 +36,10 @@ in
|
||||||
inherit (linkWeb) port;
|
inherit (linkWeb) port;
|
||||||
checks = [
|
checks = [
|
||||||
{
|
{
|
||||||
name = "Garage Service Status";
|
name = "Garage Node";
|
||||||
id = "service:garage-web:garage";
|
id = "service:garage-web:node";
|
||||||
alias_service = "garage";
|
interval = "5s";
|
||||||
|
http = "${config.links.garageMetrics.url}/health";
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
|
@ -25,14 +25,7 @@ in
|
||||||
];
|
];
|
||||||
locations = lib.mkMerge [
|
locations = lib.mkMerge [
|
||||||
{
|
{
|
||||||
"/" = if cfg.grpc then {
|
"/".proxyPass = cfg.target;
|
||||||
extraConfig = ''
|
|
||||||
set $nix_proxy_grpc_target ${cfg.target};
|
|
||||||
grpc_pass $nix_proxy_grpc_target;
|
|
||||||
'';
|
|
||||||
} else {
|
|
||||||
proxyPass = cfg.target;
|
|
||||||
};
|
|
||||||
"${cfg.healthCheckPath}".extraConfig = "access_log off;";
|
"${cfg.healthCheckPath}".extraConfig = "access_log off;";
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -89,11 +82,11 @@ in
|
||||||
'') consulServiceWays;
|
'') consulServiceWays;
|
||||||
in pkgs.writeText "ways-upstreams.ctmpl" (lib.concatStringsSep "\n" (lib.unique upstreams));
|
in pkgs.writeText "ways-upstreams.ctmpl" (lib.concatStringsSep "\n" (lib.unique upstreams));
|
||||||
destination = "/run/consul-template/nginx-ways-upstreams.conf";
|
destination = "/run/consul-template/nginx-ways-upstreams.conf";
|
||||||
exec.command = lib.singleton (pkgs.writeShellScript "ways-reload" ''
|
exec.command = [
|
||||||
if ${config.systemd.package}/bin/systemctl is-active nginx.service; then
|
"${config.services.nginx.package}/bin/nginx"
|
||||||
exec ${config.services.nginx.package}/bin/nginx -s reload -g 'pid /run/nginx/nginx.pid;'
|
"-s" "reload"
|
||||||
fi
|
"-g" "pid /run/nginx/nginx.pid;"
|
||||||
'');
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
|
@ -35,12 +35,6 @@ with lib;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
grpc = mkOption {
|
|
||||||
description = "Whether this endpoint is a gRPC service.";
|
|
||||||
type = types.bool;
|
|
||||||
default = false;
|
|
||||||
};
|
|
||||||
|
|
||||||
target = mkOption {
|
target = mkOption {
|
||||||
type = types.str;
|
type = types.str;
|
||||||
};
|
};
|
||||||
|
@ -107,7 +101,7 @@ with lib;
|
||||||
(lib.mkIf options.consulService.isDefined {
|
(lib.mkIf options.consulService.isDefined {
|
||||||
useConsul = true;
|
useConsul = true;
|
||||||
nginxUpstreamName = "ways_upstream_${builtins.hashString "md5" options.consulService.value}";
|
nginxUpstreamName = "ways_upstream_${builtins.hashString "md5" options.consulService.value}";
|
||||||
target = "${if config.grpc then "grpc" else "http"}://${options.nginxUpstreamName.value}";
|
target = "http://${options.nginxUpstreamName.value}";
|
||||||
})
|
})
|
||||||
(lib.mkIf options.bucket.isDefined {
|
(lib.mkIf options.bucket.isDefined {
|
||||||
consulService = "garage-web";
|
consulService = "garage-web";
|
||||||
|
|
93
flake.lock
93
flake.lock
|
@ -29,8 +29,9 @@
|
||||||
"flake-compat": [
|
"flake-compat": [
|
||||||
"blank"
|
"blank"
|
||||||
],
|
],
|
||||||
"flake-parts": "flake-parts",
|
"flake-utils": [
|
||||||
"nix-github-actions": "nix-github-actions",
|
"repin-flake-utils"
|
||||||
|
],
|
||||||
"nixpkgs": [
|
"nixpkgs": [
|
||||||
"nixpkgs"
|
"nixpkgs"
|
||||||
],
|
],
|
||||||
|
@ -39,11 +40,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730906442,
|
"lastModified": 1724226964,
|
||||||
"narHash": "sha256-tBuyb8jWBSHHgcIrOfiyQJZGY1IviMzH2V74t7gWfgI=",
|
"narHash": "sha256-cltFh4su2vcFidxKp7LuEgX3ZGLfPy0DCdrQZ/QTe68=",
|
||||||
"owner": "zhaofengli",
|
"owner": "zhaofengli",
|
||||||
"repo": "attic",
|
"repo": "attic",
|
||||||
"rev": "d0b66cf897e4d55f03d341562c9821dc4e566e54",
|
"rev": "6d9aeaef0a067d664cb11bb7704f7ec373d47fb2",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -117,11 +118,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1728330715,
|
"lastModified": 1722113426,
|
||||||
"narHash": "sha256-xRJ2nPOXb//u1jaBnDP56M7v5ldavjbtR6lfGqSvcKg=",
|
"narHash": "sha256-Yo/3loq572A8Su6aY5GP56knpuKYRvM2a1meP9oJZCw=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "devshell",
|
"repo": "devshell",
|
||||||
"rev": "dd6b80932022cea34a019e2bb32f6fa9e494dfef",
|
"rev": "67cce7359e4cd3c45296fb4aaf6a19e2a9c757ae",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -189,7 +190,6 @@
|
||||||
"flake-parts": {
|
"flake-parts": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs-lib": [
|
"nixpkgs-lib": [
|
||||||
"attic",
|
|
||||||
"nixpkgs"
|
"nixpkgs"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -208,26 +208,6 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-parts_2": {
|
"flake-parts_2": {
|
||||||
"inputs": {
|
|
||||||
"nixpkgs-lib": [
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1730504689,
|
|
||||||
"narHash": "sha256-hgmguH29K2fvs9szpq2r3pz2/8cJd2LPS+b4tfNFCwE=",
|
|
||||||
"owner": "hercules-ci",
|
|
||||||
"repo": "flake-parts",
|
|
||||||
"rev": "506278e768c2a08bec68eb62932193e341f55c90",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "hercules-ci",
|
|
||||||
"repo": "flake-parts",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-parts_3": {
|
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"nixpkgs-lib": [
|
"nixpkgs-lib": [
|
||||||
"nix-super",
|
"nix-super",
|
||||||
|
@ -311,11 +291,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730903510,
|
"lastModified": 1719226092,
|
||||||
"narHash": "sha256-mnynlrPeiW0nUQ8KGZHb3WyxAxA3Ye/BH8gMjdoKP6E=",
|
"narHash": "sha256-YNkUMcCUCpnULp40g+svYsaH1RbSEj6s4WdZY/SHe38=",
|
||||||
"owner": "hercules-ci",
|
"owner": "hercules-ci",
|
||||||
"repo": "hercules-ci-effects",
|
"repo": "hercules-ci-effects",
|
||||||
"rev": "b89ac4d66d618b915b1f0a408e2775fe3821d141",
|
"rev": "11e4b8dc112e2f485d7c97e1cee77f9958f498f5",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -419,11 +399,11 @@
|
||||||
"systems": "systems_2"
|
"systems": "systems_2"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1725623828,
|
"lastModified": 1723717449,
|
||||||
"narHash": "sha256-5Zrn72PO9yBaNO4Gd5uOsEmRpYH5rVAFKOQ5h2PxyhU=",
|
"narHash": "sha256-i+9i1D5zEz2c3o5RuH+X/jDRmA12vgU8UxxE0/TPvtE=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "nar-serve",
|
"repo": "nar-serve",
|
||||||
"rev": "e5c749a444f2d14f381c75ef3a8feaa82c333b92",
|
"rev": "846cb9d55aee9af7c7a1eaf7016f2c5b9e01cc6e",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -434,11 +414,11 @@
|
||||||
},
|
},
|
||||||
"nix-filter": {
|
"nix-filter": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730207686,
|
"lastModified": 1710156097,
|
||||||
"narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=",
|
"narHash": "sha256-1Wvk8UP7PXdf8bCCaEoMnOT1qe5/Duqgj+rL8sRQsSM=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "nix-filter",
|
"repo": "nix-filter",
|
||||||
"rev": "776e68c1d014c3adde193a18db9d738458cd2ba4",
|
"rev": "3342559a24e85fc164b295c3444e8a139924675b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -447,31 +427,10 @@
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nix-github-actions": {
|
|
||||||
"inputs": {
|
|
||||||
"nixpkgs": [
|
|
||||||
"attic",
|
|
||||||
"nixpkgs"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1729742964,
|
|
||||||
"narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=",
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "nix-github-actions",
|
|
||||||
"rev": "e04df33f62cdcf93d73e9a04142464753a16db67",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "nix-community",
|
|
||||||
"repo": "nix-github-actions",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nix-super": {
|
"nix-super": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": "flake-compat_2",
|
"flake-compat": "flake-compat_2",
|
||||||
"flake-parts": "flake-parts_3",
|
"flake-parts": "flake-parts_2",
|
||||||
"libgit2": "libgit2",
|
"libgit2": "libgit2",
|
||||||
"nixpkgs": "nixpkgs_3",
|
"nixpkgs": "nixpkgs_3",
|
||||||
"nixpkgs-regression": [
|
"nixpkgs-regression": [
|
||||||
|
@ -545,11 +504,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs_4": {
|
"nixpkgs_4": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730785428,
|
"lastModified": 1724221631,
|
||||||
"narHash": "sha256-Zwl8YgTVJTEum+L+0zVAWvXAGbWAuXHax3KzuejaDyo=",
|
"narHash": "sha256-zf3gNf0nX2yOb++h4jW9l4iG8R/LfazWZD+KhmW6fcc=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "4aa36568d413aca0ea84a1684d2d46f55dbabad7",
|
"rev": "ac2df85f4d5c580786c7b4db031c199554152681",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -598,11 +557,11 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1726560853,
|
"lastModified": 1710146030,
|
||||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -618,7 +577,7 @@
|
||||||
"blank": "blank",
|
"blank": "blank",
|
||||||
"devshell": "devshell",
|
"devshell": "devshell",
|
||||||
"drv-parts": "drv-parts",
|
"drv-parts": "drv-parts",
|
||||||
"flake-parts": "flake-parts_2",
|
"flake-parts": "flake-parts",
|
||||||
"hercules-ci-agent": "hercules-ci-agent",
|
"hercules-ci-agent": "hercules-ci-agent",
|
||||||
"hercules-ci-effects": "hercules-ci-effects",
|
"hercules-ci-effects": "hercules-ci-effects",
|
||||||
"hyprspace": "hyprspace",
|
"hyprspace": "hyprspace",
|
||||||
|
|
|
@ -54,6 +54,7 @@
|
||||||
nixpkgs.follows = "nixpkgs";
|
nixpkgs.follows = "nixpkgs";
|
||||||
nixpkgs-stable.follows = "nixpkgs";
|
nixpkgs-stable.follows = "nixpkgs";
|
||||||
flake-compat.follows = "blank";
|
flake-compat.follows = "blank";
|
||||||
|
flake-utils.follows = "repin-flake-utils";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -26,13 +26,11 @@ in
|
||||||
|
|
||||||
cfg = v.distributed;
|
cfg = v.distributed;
|
||||||
|
|
||||||
svcs = map (x: config.consul.services.${x}) cfg.registerServices;
|
svc = config.consul.services.${cfg.registerService};
|
||||||
|
|
||||||
runWithRegistration = pkgs.writeShellScript "run-with-registration" ''
|
runWithRegistration = pkgs.writeShellScript "run-with-registration" ''
|
||||||
trap '${lib.concatStringsSep ";" (map (svc: svc.commands.deregister) svcs)}' EXIT
|
trap '${svc.commands.deregister}' EXIT
|
||||||
${lib.concatStringsSep "\n" (
|
${svc.commands.register}
|
||||||
map (svc: svc.commands.register) svcs
|
|
||||||
)}
|
|
||||||
''${@}
|
''${@}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
|
@ -51,10 +49,10 @@ in
|
||||||
[Service]
|
[Service]
|
||||||
ExecStartPre=${waitForConsul} 'services/${n}%i'
|
ExecStartPre=${waitForConsul} 'services/${n}%i'
|
||||||
ExecStart=
|
ExecStart=
|
||||||
ExecStart=${consul}/bin/consul lock --name=${n} --n=${toString cfg.replicas} --shell=false --child-exit-code 'services/${n}%i' ${optionalString (cfg.registerServices != []) runWithRegistration} ${ExecStart}
|
ExecStart=${consul}/bin/consul lock --name=${n} --n=${toString cfg.replicas} --shell=false --child-exit-code 'services/${n}%i' ${optionalString (cfg.registerService != null) runWithRegistration} ${ExecStart}
|
||||||
Environment="CONSUL_HTTP_ADDR=${consulHttpAddr}"
|
Environment="CONSUL_HTTP_ADDR=${consulHttpAddr}"
|
||||||
${optionalString (v.serviceConfig ? RestrictAddressFamilies) "RestrictAddressFamilies=AF_NETLINK"}
|
${optionalString (v.serviceConfig ? RestrictAddressFamilies) "RestrictAddressFamilies=AF_NETLINK"}
|
||||||
${optionalString (cfg.registerServices != []) (lib.concatStringsSep "\n" (map (svc: "ExecStopPost=${svc.commands.deregister}") svcs))}
|
${optionalString (cfg.registerService != null) "ExecStopPost=${svc.commands.deregister}"}
|
||||||
''))
|
''))
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,6 @@ with lib;
|
||||||
type = with types; nullOr str;
|
type = with types; nullOr str;
|
||||||
default = null;
|
default = null;
|
||||||
};
|
};
|
||||||
registerServices = mkOption {
|
|
||||||
description = "Consul services to register when this service gets started.";
|
|
||||||
type = with types; listOf str;
|
|
||||||
default = if config.distributed.registerService == null then [ ] else [ config.distributed.registerService ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}));
|
}));
|
||||||
};
|
};
|
||||||
|
|
|
@ -7,8 +7,7 @@ nixosTest {
|
||||||
package = keycloak;
|
package = keycloak;
|
||||||
database.passwordFile = builtins.toFile "keycloak-test-password" "kcnixostest1234";
|
database.passwordFile = builtins.toFile "keycloak-test-password" "kcnixostest1234";
|
||||||
settings = {
|
settings = {
|
||||||
http-enabled = true;
|
proxy = "edge";
|
||||||
proxy-headers = "xforwarded";
|
|
||||||
hostname = "keycloak.local";
|
hostname = "keycloak.local";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -18,10 +18,6 @@ super: rec {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
jitsi-meet-insecure = let
|
|
||||||
olm-insecure = assert builtins.length super.olm.meta.knownVulnerabilities > 0; super.olm.overrideAttrs (o: { meta = o.meta // { knownVulnerabilities = []; }; });
|
|
||||||
in super.jitsi-meet.override { olm = olm-insecure; };
|
|
||||||
|
|
||||||
jre17_standard = let
|
jre17_standard = let
|
||||||
jre = super.jre_minimal.override {
|
jre = super.jre_minimal.override {
|
||||||
jdk = super.jdk17_headless;
|
jdk = super.jdk17_headless;
|
||||||
|
@ -50,7 +46,7 @@ super: rec {
|
||||||
|
|
||||||
prometheus-jitsi-exporter = patch super.prometheus-jitsi-exporter "patches/base/prometheus-jitsi-exporter";
|
prometheus-jitsi-exporter = patch super.prometheus-jitsi-exporter "patches/base/prometheus-jitsi-exporter";
|
||||||
|
|
||||||
s3ql = super.s3ql.overrideAttrs (old: {
|
s3ql = (patch super.s3ql "patches/base/s3ql").overrideAttrs (old: {
|
||||||
propagatedBuildInputs = old.propagatedBuildInputs ++ [
|
propagatedBuildInputs = old.propagatedBuildInputs ++ [
|
||||||
super.python3Packages.packaging
|
super.python3Packages.packaging
|
||||||
super.python3Packages.systemd
|
super.python3Packages.systemd
|
||||||
|
|
|
@ -9,9 +9,7 @@
|
||||||
|
|
||||||
options.shadows = lib.mkOption {
|
options.shadows = lib.mkOption {
|
||||||
type = with lib.types; lazyAttrsOf package;
|
type = with lib.types; lazyAttrsOf package;
|
||||||
default = {
|
default = { };
|
||||||
jitsi-meet = self'.packages.jitsi-meet-insecure;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,28 +1,25 @@
|
||||||
diff --git a/unix_integration/resolver/src/idprovider/kanidm.rs b/unix_integration/resolver/src/idprovider/kanidm.rs
|
diff --git a/unix_integration/src/idprovider/kanidm.rs b/unix_integration/src/idprovider/kanidm.rs
|
||||||
index 63cedb4d5..35c45fb0e 100644
|
index 6fc015756..31593f03e 100644
|
||||||
--- a/unix_integration/resolver/src/idprovider/kanidm.rs
|
--- a/unix_integration/src/idprovider/kanidm.rs
|
||||||
+++ b/unix_integration/resolver/src/idprovider/kanidm.rs
|
+++ b/unix_integration/src/idprovider/kanidm.rs
|
||||||
@@ -7,6 +7,7 @@ use kanidm_proto::internal::OperationError;
|
@@ -4,6 +4,7 @@ use kanidm_client::{ClientError, KanidmClient, StatusCode};
|
||||||
|
use kanidm_proto::internal::OperationError;
|
||||||
use kanidm_proto::v1::{UnixGroupToken, UnixUserToken};
|
use kanidm_proto::v1::{UnixGroupToken, UnixUserToken};
|
||||||
use std::collections::BTreeSet;
|
use tokio::sync::{broadcast, RwLock};
|
||||||
use std::time::{Duration, SystemTime};
|
|
||||||
+use std::env;
|
+use std::env;
|
||||||
use tokio::sync::{broadcast, Mutex};
|
|
||||||
|
|
||||||
use kanidm_lib_crypto::CryptoPolicy;
|
use super::interface::{
|
||||||
@@ -38,6 +39,8 @@ struct KanidmProviderInternal {
|
// KeyStore,
|
||||||
hmac_key: HmacKey,
|
@@ -25,12 +26,28 @@ const TAG_IDKEY: &str = "idkey";
|
||||||
crypto_policy: CryptoPolicy,
|
|
||||||
pam_allow_groups: BTreeSet<String>,
|
pub struct KanidmProvider {
|
||||||
|
client: RwLock<KanidmClient>,
|
||||||
+ auth_name: Option<String>,
|
+ auth_name: Option<String>,
|
||||||
+ auth_password: Option<String>,
|
+ auth_password: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct KanidmProvider {
|
impl KanidmProvider {
|
||||||
@@ -102,6 +105,19 @@ impl KanidmProvider {
|
pub fn new(client: KanidmClient) -> Self {
|
||||||
.map(|GroupMap { local, with }| (local, Id::Name(with)))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
+ let env_username: Option<String>;
|
+ let env_username: Option<String>;
|
||||||
+ let env_password: Option<String>;
|
+ let env_password: Option<String>;
|
||||||
+ match (env::var_os("KANIDM_NAME"), env::var_os("KANIDM_PASSWORD")) {
|
+ match (env::var_os("KANIDM_NAME"), env::var_os("KANIDM_PASSWORD")) {
|
||||||
|
@ -35,29 +32,23 @@ index 63cedb4d5..35c45fb0e 100644
|
||||||
+ env_password = None;
|
+ env_password = None;
|
||||||
+ }
|
+ }
|
||||||
+ }
|
+ }
|
||||||
+
|
KanidmProvider {
|
||||||
Ok(KanidmProvider {
|
client: RwLock::new(client),
|
||||||
inner: Mutex::new(KanidmProviderInternal {
|
+ auth_name: env_username,
|
||||||
state: CacheState::OfflineNextCheck(now),
|
+ auth_password: env_password,
|
||||||
@@ -109,6 +125,8 @@ impl KanidmProvider {
|
}
|
||||||
hmac_key,
|
|
||||||
crypto_policy,
|
|
||||||
pam_allow_groups,
|
|
||||||
+ auth_name: env_username,
|
|
||||||
+ auth_password: env_password
|
|
||||||
}),
|
|
||||||
map_group,
|
|
||||||
})
|
|
||||||
@@ -256,7 +274,11 @@ impl KanidmProviderInternal {
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
@@ -118,7 +135,11 @@ impl IdProvider for KanidmProvider {
|
||||||
|
|
||||||
async fn attempt_online(&mut self, _tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
|
// Needs .read on all types except re-auth.
|
||||||
- match self.client.auth_anonymous().await {
|
async fn provider_authenticate(&self, _tpm: &mut tpm::BoxedDynTpm) -> Result<(), IdpError> {
|
||||||
|
- match self.client.write().await.auth_anonymous().await {
|
||||||
+ let auth_method = match (&self.auth_name, &self.auth_password) {
|
+ let auth_method = match (&self.auth_name, &self.auth_password) {
|
||||||
+ (Some(name), Some(password)) => self.client.auth_simple_password(name, password).await,
|
+ (Some(name), Some(password)) => self.client.write().await.auth_simple_password(name, password).await,
|
||||||
+ _ => self.client.auth_anonymous().await
|
+ _ => self.client.write().await.auth_anonymous().await
|
||||||
+ };
|
+ };
|
||||||
+ match auth_method {
|
+ match auth_method {
|
||||||
Ok(_uat) => {
|
Ok(_uat) => Ok(()),
|
||||||
self.state = CacheState::Online;
|
Err(err) => {
|
||||||
true
|
error!(?err, "Provider authentication failed");
|
||||||
|
|
18
patches/base/s3ql/0000-cache-entry-seek-whence.patch
Normal file
18
patches/base/s3ql/0000-cache-entry-seek-whence.patch
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py
|
||||||
|
index a4b55fd1..267b9a12 100644
|
||||||
|
--- a/src/s3ql/block_cache.py
|
||||||
|
+++ b/src/s3ql/block_cache.py
|
||||||
|
@@ -86,10 +86,10 @@ class CacheEntry:
|
||||||
|
def flush(self):
|
||||||
|
self.fh.flush()
|
||||||
|
|
||||||
|
- def seek(self, off):
|
||||||
|
+ def seek(self, off, whence=0):
|
||||||
|
if self.pos != off:
|
||||||
|
- self.fh.seek(off)
|
||||||
|
- self.pos = off
|
||||||
|
+ self.fh.seek(off, whence)
|
||||||
|
+ self.pos = self.fh.tell()
|
||||||
|
|
||||||
|
def tell(self):
|
||||||
|
return self.pos
|
26
patches/base/s3ql/0001-fix-plain-block-size.patch
Normal file
26
patches/base/s3ql/0001-fix-plain-block-size.patch
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
diff --git a/src/s3ql/backends/comprenc.py b/src/s3ql/backends/comprenc.py
|
||||||
|
index 6402fec1..9ed3627e 100644
|
||||||
|
--- a/src/s3ql/backends/comprenc.py
|
||||||
|
+++ b/src/s3ql/backends/comprenc.py
|
||||||
|
@@ -276,7 +276,7 @@ class ComprencBackend(AbstractBackend):
|
||||||
|
buf.seek(0)
|
||||||
|
fh = buf
|
||||||
|
|
||||||
|
- return self.backend.write_fh(key, fh, meta_raw)
|
||||||
|
+ return self.backend.write_fh(key, fh, meta_raw, len_=len_ if meta_raw['compression'] == 'None'and meta_raw['encryption'] == 'None' else None)
|
||||||
|
|
||||||
|
def contains(self, key):
|
||||||
|
return self.backend.contains(key)
|
||||||
|
diff --git a/src/s3ql/database.py b/src/s3ql/database.py
|
||||||
|
index bb4054e6..c2142bf6 100644
|
||||||
|
--- a/src/s3ql/database.py
|
||||||
|
+++ b/src/s3ql/database.py
|
||||||
|
@@ -659,7 +659,7 @@ def upload_metadata(
|
||||||
|
)
|
||||||
|
obj = METADATA_OBJ_NAME % (blockno, params.seq_no)
|
||||||
|
fh.seek(blockno * blocksize)
|
||||||
|
- backend.write_fh(obj, fh, len_=blocksize)
|
||||||
|
+ backend.write_fh(obj, fh, len_=min(blocksize, db_size - blockno * blocksize))
|
||||||
|
|
||||||
|
if not update_params:
|
||||||
|
return
|
17
patches/base/s3ql/0002-comprenc-always-copy.patch
Normal file
17
patches/base/s3ql/0002-comprenc-always-copy.patch
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
diff --git a/src/s3ql/backends/comprenc.py b/src/s3ql/backends/comprenc.py
|
||||||
|
index 9ed3627e..db419bb7 100644
|
||||||
|
--- a/src/s3ql/backends/comprenc.py
|
||||||
|
+++ b/src/s3ql/backends/comprenc.py
|
||||||
|
@@ -276,6 +276,12 @@ class ComprencBackend(AbstractBackend):
|
||||||
|
buf.seek(0)
|
||||||
|
fh = buf
|
||||||
|
|
||||||
|
+ if meta_raw['compression'] == 'None' and meta_raw['encryption'] == 'None':
|
||||||
|
+ buf = io.BytesIO()
|
||||||
|
+ copyfh(fh, buf, len_)
|
||||||
|
+ buf.seek(0)
|
||||||
|
+ fh = buf
|
||||||
|
+
|
||||||
|
return self.backend.write_fh(key, fh, meta_raw, len_=len_ if meta_raw['compression'] == 'None'and meta_raw['encryption'] == 'None' else None)
|
||||||
|
|
||||||
|
def contains(self, key):
|
12
patches/base/s3ql/remove-ssl-monkeypatch.patch
Normal file
12
patches/base/s3ql/remove-ssl-monkeypatch.patch
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
diff --git a/tests/t0_http.py b/tests/t0_http.py
|
||||||
|
index 66ed564f..36bebab1 100755
|
||||||
|
--- a/tests/t0_http.py
|
||||||
|
+++ b/tests/t0_http.py
|
||||||
|
@@ -289,7 +289,6 @@ def do_GET(self):
|
||||||
|
|
||||||
|
# We don't *actually* want to establish SSL, that'd be
|
||||||
|
# to complex for our mock server
|
||||||
|
- monkeypatch.setattr('ssl.match_hostname', lambda x, y: True)
|
||||||
|
conn = HTTPConnection(
|
||||||
|
test_host,
|
||||||
|
test_port,
|
26
patches/base/s3ql/s3c-accurate-length.patch
Normal file
26
patches/base/s3ql/s3c-accurate-length.patch
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
commit 1edbbcf08d5701ea38f13fca7491418318aebca9
|
||||||
|
Author: Max <max@privatevoid.net>
|
||||||
|
Date: Fri Jun 7 23:31:08 2024 +0200
|
||||||
|
|
||||||
|
accurate length
|
||||||
|
|
||||||
|
diff --git a/src/s3ql/backends/s3c.py b/src/s3ql/backends/s3c.py
|
||||||
|
index 2995ca4f..3c3c79ab 100644
|
||||||
|
--- a/src/s3ql/backends/s3c.py
|
||||||
|
+++ b/src/s3ql/backends/s3c.py
|
||||||
|
@@ -387,9 +387,13 @@ class Backend(AbstractBackend):
|
||||||
|
'''
|
||||||
|
|
||||||
|
off = fh.tell()
|
||||||
|
+ fh.seek(0, os.SEEK_END)
|
||||||
|
+ actual_len = fh.tell() - off
|
||||||
|
+ fh.seek(off, os.SEEK_SET)
|
||||||
|
if len_ is None:
|
||||||
|
- fh.seek(0, os.SEEK_END)
|
||||||
|
- len_ = fh.tell()
|
||||||
|
+ len_ = actual_len
|
||||||
|
+ else:
|
||||||
|
+ len_ = min(len_, actual_len)
|
||||||
|
return self._write_fh(key, fh, off, len_, metadata or {})
|
||||||
|
|
||||||
|
@retry
|
392
patches/base/s3ql/s3v4.patch
Normal file
392
patches/base/s3ql/s3v4.patch
Normal file
|
@ -0,0 +1,392 @@
|
||||||
|
From 11e3a9cea77cd8498d874f7fd69a938af4da68cd Mon Sep 17 00:00:00 2001
|
||||||
|
From: xeji <36407913+xeji@users.noreply.github.com>
|
||||||
|
Date: Thu, 28 Mar 2024 22:19:11 +0100
|
||||||
|
Subject: [PATCH] new backend s3c4: s3c with V4 request signatures (#349)
|
||||||
|
|
||||||
|
---
|
||||||
|
rst/backends.rst | 15 ++++
|
||||||
|
src/s3ql/backends/__init__.py | 3 +-
|
||||||
|
src/s3ql/backends/s3.py | 100 ++----------------------
|
||||||
|
src/s3ql/backends/s3c4.py | 140 ++++++++++++++++++++++++++++++++++
|
||||||
|
src/s3ql/parse_args.py | 2 +-
|
||||||
|
tests/mock_server.py | 11 +++
|
||||||
|
6 files changed, 174 insertions(+), 97 deletions(-)
|
||||||
|
create mode 100644 src/s3ql/backends/s3c4.py
|
||||||
|
|
||||||
|
diff --git a/rst/backends.rst b/rst/backends.rst
|
||||||
|
index 7220ee96..4bc68387 100644
|
||||||
|
--- a/rst/backends.rst
|
||||||
|
+++ b/rst/backends.rst
|
||||||
|
@@ -341,6 +341,14 @@ can be an arbitrary prefix that will be prepended to all object names
|
||||||
|
used by S3QL. This allows you to store several S3QL file systems in
|
||||||
|
the same bucket.
|
||||||
|
|
||||||
|
+`s3c://` authenticates API requests using AWS V2 signatures, which are
|
||||||
|
+deprecated by AWS but still accepted by many S3 compatible services.
|
||||||
|
+
|
||||||
|
+`s3c4://` denotes a variant of this backend that works the same
|
||||||
|
+but uses AWS V4 signatures for request authentication instead: ::
|
||||||
|
+
|
||||||
|
+ s3c4://<hostname>:<port>/<bucketname>/<prefix>
|
||||||
|
+
|
||||||
|
The S3 compatible backend accepts the following backend options:
|
||||||
|
|
||||||
|
.. option:: no-ssl
|
||||||
|
@@ -385,6 +393,13 @@ The S3 compatible backend accepts the following backend options:
|
||||||
|
necessary if your storage server does not return a valid response
|
||||||
|
body for a successful copy operation.
|
||||||
|
|
||||||
|
+.. option:: sig-region=<region>
|
||||||
|
+
|
||||||
|
+ For `s3c4://` variant only: Region to use for calculating V4
|
||||||
|
+ request signatures. Contrary to S3, the region is not a defined
|
||||||
|
+ part of the storage URL and must be specified separately.
|
||||||
|
+ Defaults to `us-east-1`.
|
||||||
|
+
|
||||||
|
.. _`S3 COPY API`: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
|
||||||
|
.. __: https://doc.s3.amazonaws.com/proposals/copy.html
|
||||||
|
|
||||||
|
diff --git a/src/s3ql/backends/__init__.py b/src/s3ql/backends/__init__.py
|
||||||
|
index a1335762..442828cd 100644
|
||||||
|
--- a/src/s3ql/backends/__init__.py
|
||||||
|
+++ b/src/s3ql/backends/__init__.py
|
||||||
|
@@ -6,7 +6,7 @@
|
||||||
|
This work can be distributed under the terms of the GNU GPLv3.
|
||||||
|
'''
|
||||||
|
|
||||||
|
-from . import gs, local, rackspace, s3, s3c, swift, swiftks
|
||||||
|
+from . import gs, local, rackspace, s3, s3c, s3c4, swift, swiftks
|
||||||
|
from .b2.b2_backend import B2Backend
|
||||||
|
|
||||||
|
#: Mapping from storage URL prefixes to backend classes
|
||||||
|
@@ -15,6 +15,7 @@
|
||||||
|
'local': local.Backend,
|
||||||
|
'gs': gs.Backend,
|
||||||
|
's3c': s3c.Backend,
|
||||||
|
+ 's3c4': s3c4.Backend,
|
||||||
|
'swift': swift.Backend,
|
||||||
|
'swiftks': swiftks.Backend,
|
||||||
|
'rackspace': rackspace.Backend,
|
||||||
|
diff --git a/src/s3ql/backends/s3.py b/src/s3ql/backends/s3.py
|
||||||
|
index e05a49ba..5548a855 100644
|
||||||
|
--- a/src/s3ql/backends/s3.py
|
||||||
|
+++ b/src/s3ql/backends/s3.py
|
||||||
|
@@ -15,7 +15,7 @@
|
||||||
|
from xml.sax.saxutils import escape as xml_escape
|
||||||
|
|
||||||
|
from ..logging import QuietError
|
||||||
|
-from . import s3c
|
||||||
|
+from . import s3c4
|
||||||
|
from .common import retry
|
||||||
|
from .s3c import get_S3Error
|
||||||
|
|
||||||
|
@@ -28,22 +28,23 @@
|
||||||
|
# pylint: disable=E1002,E1101
|
||||||
|
|
||||||
|
|
||||||
|
-class Backend(s3c.Backend):
|
||||||
|
+class Backend(s3c4.Backend):
|
||||||
|
"""A backend to store data in Amazon S3
|
||||||
|
|
||||||
|
This class uses standard HTTP connections to connect to S3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
- known_options = (s3c.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - {
|
||||||
|
+ known_options = (s3c4.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - {
|
||||||
|
'dumb-copy',
|
||||||
|
'disable-expect100',
|
||||||
|
+ 'sig-region',
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, options):
|
||||||
|
self.region = None
|
||||||
|
- self.signing_key = None
|
||||||
|
super().__init__(options)
|
||||||
|
self._set_storage_options(self._extra_put_headers)
|
||||||
|
+ self.sig_region = self.region
|
||||||
|
|
||||||
|
def _parse_storage_url(self, storage_url, ssl_context):
|
||||||
|
hit = re.match(r'^s3s?://([^/]+)/([^/]+)(?:/(.*))?$', storage_url)
|
||||||
|
@@ -147,94 +148,3 @@ def _delete_multi(self, keys):
|
||||||
|
|
||||||
|
except:
|
||||||
|
self.conn.discard()
|
||||||
|
-
|
||||||
|
- def _authorize_request(self, method, path, headers, subres, query_string):
|
||||||
|
- '''Add authorization information to *headers*'''
|
||||||
|
-
|
||||||
|
- # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
|
||||||
|
-
|
||||||
|
- now = time.gmtime()
|
||||||
|
- # now = time.strptime('Fri, 24 May 2013 00:00:00 GMT',
|
||||||
|
- # '%a, %d %b %Y %H:%M:%S GMT')
|
||||||
|
-
|
||||||
|
- ymd = time.strftime('%Y%m%d', now)
|
||||||
|
- ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now)
|
||||||
|
-
|
||||||
|
- headers['x-amz-date'] = ymdhms
|
||||||
|
- headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD'
|
||||||
|
- # headers['x-amz-content-sha256'] = hashlib.sha256(body).hexdigest()
|
||||||
|
- headers.pop('Authorization', None)
|
||||||
|
-
|
||||||
|
- auth_strs = [method]
|
||||||
|
- auth_strs.append(urllib.parse.quote(path))
|
||||||
|
-
|
||||||
|
- if query_string:
|
||||||
|
- s = urllib.parse.urlencode(
|
||||||
|
- query_string, doseq=True, quote_via=urllib.parse.quote
|
||||||
|
- ).split('&')
|
||||||
|
- else:
|
||||||
|
- s = []
|
||||||
|
- if subres:
|
||||||
|
- s.append(urllib.parse.quote(subres) + '=')
|
||||||
|
- if s:
|
||||||
|
- s = '&'.join(sorted(s))
|
||||||
|
- else:
|
||||||
|
- s = ''
|
||||||
|
- auth_strs.append(s)
|
||||||
|
-
|
||||||
|
- # Headers
|
||||||
|
- sig_hdrs = sorted(x.lower() for x in headers.keys())
|
||||||
|
- for hdr in sig_hdrs:
|
||||||
|
- auth_strs.append('%s:%s' % (hdr, headers[hdr].strip()))
|
||||||
|
- auth_strs.append('')
|
||||||
|
- auth_strs.append(';'.join(sig_hdrs))
|
||||||
|
- auth_strs.append(headers['x-amz-content-sha256'])
|
||||||
|
- can_req = '\n'.join(auth_strs)
|
||||||
|
- # log.debug('canonical request: %s', can_req)
|
||||||
|
-
|
||||||
|
- can_req_hash = hashlib.sha256(can_req.encode()).hexdigest()
|
||||||
|
- str_to_sign = (
|
||||||
|
- "AWS4-HMAC-SHA256\n"
|
||||||
|
- + ymdhms
|
||||||
|
- + '\n'
|
||||||
|
- + '%s/%s/s3/aws4_request\n' % (ymd, self.region)
|
||||||
|
- + can_req_hash
|
||||||
|
- )
|
||||||
|
- # log.debug('string to sign: %s', str_to_sign)
|
||||||
|
-
|
||||||
|
- if self.signing_key is None or self.signing_key[1] != ymd:
|
||||||
|
- self.update_signing_key(ymd)
|
||||||
|
- signing_key = self.signing_key[0]
|
||||||
|
-
|
||||||
|
- sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True)
|
||||||
|
-
|
||||||
|
- cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % (
|
||||||
|
- self.login,
|
||||||
|
- now.tm_year,
|
||||||
|
- now.tm_mon,
|
||||||
|
- now.tm_mday,
|
||||||
|
- self.region,
|
||||||
|
- )
|
||||||
|
-
|
||||||
|
- headers['Authorization'] = (
|
||||||
|
- 'AWS4-HMAC-SHA256 '
|
||||||
|
- 'Credential=%s,'
|
||||||
|
- 'SignedHeaders=%s,'
|
||||||
|
- 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig)
|
||||||
|
- )
|
||||||
|
-
|
||||||
|
- def update_signing_key(self, ymd):
|
||||||
|
- date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode())
|
||||||
|
- region_key = hmac_sha256(date_key, self.region.encode())
|
||||||
|
- service_key = hmac_sha256(region_key, b's3')
|
||||||
|
- signing_key = hmac_sha256(service_key, b'aws4_request')
|
||||||
|
-
|
||||||
|
- self.signing_key = (signing_key, ymd)
|
||||||
|
-
|
||||||
|
-
|
||||||
|
-def hmac_sha256(key, msg, hex=False):
|
||||||
|
- d = hmac.new(key, msg, hashlib.sha256)
|
||||||
|
- if hex:
|
||||||
|
- return d.hexdigest()
|
||||||
|
- else:
|
||||||
|
- return d.digest()
|
||||||
|
diff --git a/src/s3ql/backends/s3c4.py b/src/s3ql/backends/s3c4.py
|
||||||
|
new file mode 100644
|
||||||
|
index 00000000..37ff0b7a
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/src/s3ql/backends/s3c4.py
|
||||||
|
@@ -0,0 +1,140 @@
|
||||||
|
+'''
|
||||||
|
+s3c4.py - this file is part of S3QL.
|
||||||
|
+
|
||||||
|
+Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>
|
||||||
|
+
|
||||||
|
+This work can be distributed under the terms of the GNU GPLv3.
|
||||||
|
+'''
|
||||||
|
+
|
||||||
|
+import hashlib
|
||||||
|
+import hmac
|
||||||
|
+import logging
|
||||||
|
+import re
|
||||||
|
+import time
|
||||||
|
+import urllib.parse
|
||||||
|
+from xml.sax.saxutils import escape as xml_escape
|
||||||
|
+
|
||||||
|
+from ..logging import QuietError
|
||||||
|
+from . import s3c
|
||||||
|
+from .common import retry
|
||||||
|
+from .s3c import get_S3Error
|
||||||
|
+
|
||||||
|
+log = logging.getLogger(__name__)
|
||||||
|
+
|
||||||
|
+# Maximum number of keys that can be deleted at once
|
||||||
|
+MAX_KEYS = 1000
|
||||||
|
+
|
||||||
|
+# Pylint goes berserk with false positives
|
||||||
|
+# pylint: disable=E1002,E1101
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+class Backend(s3c.Backend):
|
||||||
|
+ """A backend to stored data in some S3 compatible storage service.
|
||||||
|
+
|
||||||
|
+ This classes uses AWS Signature V4 for authorization.
|
||||||
|
+ """
|
||||||
|
+
|
||||||
|
+ known_options = s3c.Backend.known_options | {'sig-region'}
|
||||||
|
+
|
||||||
|
+ def __init__(self, options):
|
||||||
|
+ self.sig_region = options.backend_options.get('sig-region', 'us-east-1')
|
||||||
|
+ self.signing_key = None
|
||||||
|
+ super().__init__(options)
|
||||||
|
+
|
||||||
|
+ def __str__(self):
|
||||||
|
+ return 's3c4://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix)
|
||||||
|
+
|
||||||
|
+ def _authorize_request(self, method, path, headers, subres, query_string):
|
||||||
|
+ '''Add authorization information to *headers*'''
|
||||||
|
+
|
||||||
|
+ # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
|
||||||
|
+
|
||||||
|
+ now = time.gmtime()
|
||||||
|
+ # now = time.strptime('Fri, 24 May 2013 00:00:00 GMT',
|
||||||
|
+ # '%a, %d %b %Y %H:%M:%S GMT')
|
||||||
|
+
|
||||||
|
+ ymd = time.strftime('%Y%m%d', now)
|
||||||
|
+ ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now)
|
||||||
|
+
|
||||||
|
+ # add non-standard port to host header, needed for correct signature
|
||||||
|
+ if self.port != 443:
|
||||||
|
+ headers['host'] = '%s:%s' % (self.hostname, self.port)
|
||||||
|
+
|
||||||
|
+ headers['x-amz-date'] = ymdhms
|
||||||
|
+ headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD'
|
||||||
|
+
|
||||||
|
+ headers.pop('Authorization', None)
|
||||||
|
+
|
||||||
|
+ auth_strs = [method]
|
||||||
|
+ auth_strs.append(urllib.parse.quote(path))
|
||||||
|
+
|
||||||
|
+ if query_string:
|
||||||
|
+ s = urllib.parse.urlencode(
|
||||||
|
+ query_string, doseq=True, quote_via=urllib.parse.quote
|
||||||
|
+ ).split('&')
|
||||||
|
+ else:
|
||||||
|
+ s = []
|
||||||
|
+ if subres:
|
||||||
|
+ s.append(urllib.parse.quote(subres) + '=')
|
||||||
|
+ if s:
|
||||||
|
+ s = '&'.join(sorted(s))
|
||||||
|
+ else:
|
||||||
|
+ s = ''
|
||||||
|
+ auth_strs.append(s)
|
||||||
|
+
|
||||||
|
+ # Headers
|
||||||
|
+ sig_hdrs = sorted(x.lower() for x in headers.keys())
|
||||||
|
+ for hdr in sig_hdrs:
|
||||||
|
+ auth_strs.append('%s:%s' % (hdr, headers[hdr].strip()))
|
||||||
|
+ auth_strs.append('')
|
||||||
|
+ auth_strs.append(';'.join(sig_hdrs))
|
||||||
|
+ auth_strs.append(headers['x-amz-content-sha256'])
|
||||||
|
+ can_req = '\n'.join(auth_strs)
|
||||||
|
+ # log.debug('canonical request: %s', can_req)
|
||||||
|
+
|
||||||
|
+ can_req_hash = hashlib.sha256(can_req.encode()).hexdigest()
|
||||||
|
+ str_to_sign = (
|
||||||
|
+ "AWS4-HMAC-SHA256\n"
|
||||||
|
+ + ymdhms
|
||||||
|
+ + '\n'
|
||||||
|
+ + '%s/%s/s3/aws4_request\n' % (ymd, self.sig_region)
|
||||||
|
+ + can_req_hash
|
||||||
|
+ )
|
||||||
|
+ # log.debug('string to sign: %s', str_to_sign)
|
||||||
|
+
|
||||||
|
+ if self.signing_key is None or self.signing_key[1] != ymd:
|
||||||
|
+ self.update_signing_key(ymd)
|
||||||
|
+ signing_key = self.signing_key[0]
|
||||||
|
+
|
||||||
|
+ sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True)
|
||||||
|
+
|
||||||
|
+ cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % (
|
||||||
|
+ self.login,
|
||||||
|
+ now.tm_year,
|
||||||
|
+ now.tm_mon,
|
||||||
|
+ now.tm_mday,
|
||||||
|
+ self.sig_region,
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ headers['Authorization'] = (
|
||||||
|
+ 'AWS4-HMAC-SHA256 '
|
||||||
|
+ 'Credential=%s,'
|
||||||
|
+ 'SignedHeaders=%s,'
|
||||||
|
+ 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig)
|
||||||
|
+ )
|
||||||
|
+
|
||||||
|
+ def update_signing_key(self, ymd):
|
||||||
|
+ date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode())
|
||||||
|
+ region_key = hmac_sha256(date_key, self.sig_region.encode())
|
||||||
|
+ service_key = hmac_sha256(region_key, b's3')
|
||||||
|
+ signing_key = hmac_sha256(service_key, b'aws4_request')
|
||||||
|
+
|
||||||
|
+ self.signing_key = (signing_key, ymd)
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+def hmac_sha256(key, msg, hex=False):
|
||||||
|
+ d = hmac.new(key, msg, hashlib.sha256)
|
||||||
|
+ if hex:
|
||||||
|
+ return d.hexdigest()
|
||||||
|
+ else:
|
||||||
|
+ return d.digest()
|
||||||
|
diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py
|
||||||
|
index 272e10c7..24ad50f4 100644
|
||||||
|
--- a/src/s3ql/parse_args.py
|
||||||
|
+++ b/src/s3ql/parse_args.py
|
||||||
|
@@ -374,7 +374,7 @@ def storage_url_type(s):
|
||||||
|
# slash (even when using a prefix), but we can't do that now because it
|
||||||
|
# would make file systems created without trailing slash inaccessible.
|
||||||
|
if re.match(r'^(s3|gs)://[^/]+$', s) or re.match(
|
||||||
|
- r'^(s3c|swift(ks)?|rackspace)://[^/]+/[^/]+$', s
|
||||||
|
+ r'^(s3c|s3c4|swift(ks)?|rackspace)://[^/]+/[^/]+$', s
|
||||||
|
):
|
||||||
|
s += '/'
|
||||||
|
|
||||||
|
diff --git a/tests/mock_server.py b/tests/mock_server.py
|
||||||
|
index b453e705..e3084065 100644
|
||||||
|
--- a/tests/mock_server.py
|
||||||
|
+++ b/tests/mock_server.py
|
||||||
|
@@ -292,6 +292,16 @@ def send_error(self, status, message=None, code='', resource='', extra_headers=N
|
||||||
|
self.wfile.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
+class S3C4RequestHandler(S3CRequestHandler):
|
||||||
|
+ '''Request Handler for s3c4 backend
|
||||||
|
+
|
||||||
|
+ Currently identical to S3CRequestHandler since mock request handlers
|
||||||
|
+ do not check request signatures.
|
||||||
|
+ '''
|
||||||
|
+
|
||||||
|
+ pass
|
||||||
|
+
|
||||||
|
+
|
||||||
|
class BasicSwiftRequestHandler(S3CRequestHandler):
|
||||||
|
'''A request handler implementing a subset of the OpenStack Swift Interface
|
||||||
|
|
||||||
|
@@ -569,6 +579,7 @@ def inline_error(http_status, body):
|
||||||
|
#: corresponding storage urls
|
||||||
|
handler_list = [
|
||||||
|
(S3CRequestHandler, 's3c://%(host)s:%(port)d/s3ql_test'),
|
||||||
|
+ (S3C4RequestHandler, 's3c4://%(host)s:%(port)d/s3ql_test'),
|
||||||
|
# Special syntax only for testing against mock server
|
||||||
|
(BasicSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'),
|
||||||
|
(CopySwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'),
|
BIN
secrets/keycloak-dbpass.age
Normal file
BIN
secrets/keycloak-dbpass.age
Normal file
Binary file not shown.
Loading…
Reference in a new issue