diff --git a/cluster/services/attic/server.nix b/cluster/services/attic/server.nix index 6aaa2d1..39be7ce 100644 --- a/cluster/services/attic/server.nix +++ b/cluster/services/attic/server.nix @@ -9,17 +9,13 @@ let in { - imports = [ - depot.inputs.attic.nixosModules.atticd - ]; - services.locksmith.waitForSecrets.atticd = [ "garage-attic" ]; services.atticd = { enable = true; package = depot.inputs.attic.packages.attic-server; - credentialsFile = secrets.serverToken.path; + environmentFile = secrets.serverToken.path; mode = if isMonolith then "monolithic" else "api-server"; settings = { diff --git a/cluster/services/sso/host.nix b/cluster/services/sso/host.nix index 31628b8..dd3f9c2 100644 --- a/cluster/services/sso/host.nix +++ b/cluster/services/sso/host.nix @@ -37,10 +37,11 @@ in passwordFile = "/run/locksmith/patroni-keycloak"; }; settings = { + http-enabled = true; http-host = kc.ipv4; http-port = kc.port; hostname = login; - proxy = "edge"; + proxy-headers = "xforwarded"; # for backcompat, TODO: remove http-relative-path = "/auth"; }; diff --git a/cluster/services/storage/default.nix b/cluster/services/storage/default.nix index bc5de89..f960359 100644 --- a/cluster/services/storage/default.nix +++ b/cluster/services/storage/default.nix @@ -56,7 +56,7 @@ in }; simulacrum = { enable = true; - deps = [ "wireguard" "consul" "locksmith" "dns" "incandescence" ]; + deps = [ "wireguard" "consul" "locksmith" "dns" "incandescence" "ways" ]; settings = ./simulacrum/test.nix; }; }; diff --git a/cluster/services/ways/host.nix b/cluster/services/ways/host.nix index 90455c3..984d55c 100644 --- a/cluster/services/ways/host.nix +++ b/cluster/services/ways/host.nix @@ -89,11 +89,11 @@ in '') consulServiceWays; in pkgs.writeText "ways-upstreams.ctmpl" (lib.concatStringsSep "\n" (lib.unique upstreams)); destination = "/run/consul-template/nginx-ways-upstreams.conf"; - exec.command = [ - "${config.services.nginx.package}/bin/nginx" - "-s" "reload" - "-g" "pid /run/nginx/nginx.pid;" - ]; + exec.command = lib.singleton (pkgs.writeShellScript "ways-reload" '' + if ${config.systemd.package}/bin/systemctl is-active nginx.service; then + exec ${config.services.nginx.package}/bin/nginx -s reload -g 'pid /run/nginx/nginx.pid;' + fi + ''); } ]; }; diff --git a/flake.lock b/flake.lock index 9eae471..3959b01 100644 --- a/flake.lock +++ b/flake.lock @@ -10,11 +10,11 @@ "systems": "systems" }, "locked": { - "lastModified": 1722339003, - "narHash": "sha256-ZeS51uJI30ehNkcZ4uKqT4ZDARPyqrHADSKAwv5vVCU=", + "lastModified": 1723293904, + "narHash": "sha256-b+uqzj+Wa6xgMS9aNbX4I+sXeb5biPDi39VgvSFqFvU=", "owner": "ryantm", "repo": "agenix", - "rev": "3f1dae074a12feb7327b4bf43cbac0d124488bb7", + "rev": "f6291c5935fdc4e0bef208cfc0dcab7e3f7a1c41", "type": "github" }, "original": { @@ -29,9 +29,8 @@ "flake-compat": [ "blank" ], - "flake-utils": [ - "repin-flake-utils" - ], + "flake-parts": "flake-parts", + "nix-github-actions": "nix-github-actions", "nixpkgs": [ "nixpkgs" ], @@ -40,11 +39,11 @@ ] }, "locked": { - "lastModified": 1722472866, - "narHash": "sha256-GJIz4M5HDB948Ex/8cPvbkrNzl/eKUE7/c21JBu4lb8=", + "lastModified": 1730906442, + "narHash": "sha256-tBuyb8jWBSHHgcIrOfiyQJZGY1IviMzH2V74t7gWfgI=", "owner": "zhaofengli", "repo": "attic", - "rev": "e127acbf9a71ebc0c26bc8e28346822e0a6e16ba", + "rev": "d0b66cf897e4d55f03d341562c9821dc4e566e54", "type": "github" }, "original": { @@ -76,11 +75,11 @@ ] }, "locked": { - "lastModified": 1717025063, - "narHash": "sha256-dIubLa56W9sNNz0e8jGxrX3CAkPXsq7snuFA/Ie6dn8=", + "lastModified": 1722960479, + "narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=", "owner": "ipetkov", "repo": "crane", - "rev": "480dff0be03dac0e51a8dfc26e882b0d123a450e", + "rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4", "type": "github" }, "original": { @@ -118,11 +117,11 @@ ] }, "locked": { - "lastModified": 1722113426, - "narHash": "sha256-Yo/3loq572A8Su6aY5GP56knpuKYRvM2a1meP9oJZCw=", + "lastModified": 1728330715, + "narHash": "sha256-xRJ2nPOXb//u1jaBnDP56M7v5ldavjbtR6lfGqSvcKg=", "owner": "numtide", "repo": "devshell", - "rev": "67cce7359e4cd3c45296fb4aaf6a19e2a9c757ae", + "rev": "dd6b80932022cea34a019e2bb32f6fa9e494dfef", "type": "github" }, "original": { @@ -190,6 +189,7 @@ "flake-parts": { "inputs": { "nixpkgs-lib": [ + "attic", "nixpkgs" ] }, @@ -208,6 +208,26 @@ } }, "flake-parts_2": { + "inputs": { + "nixpkgs-lib": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1730504689, + "narHash": "sha256-hgmguH29K2fvs9szpq2r3pz2/8cJd2LPS+b4tfNFCwE=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "506278e768c2a08bec68eb62932193e341f55c90", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-parts_3": { "inputs": { "nixpkgs-lib": [ "nix-super", @@ -268,11 +288,11 @@ "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1720223941, - "narHash": "sha256-QDbU8LZzcUSqBp1CBqDj/f5Wd/sdgQ8pZwRWueoMUL4=", + "lastModified": 1723736589, + "narHash": "sha256-/Vdg5ZKtP71ZEKVV6JXlrOEu0CM2Flcs+nwDmWRzgjQ=", "owner": "hercules-ci", "repo": "hercules-ci-agent", - "rev": "2e10fb21fc2e07edf40763b73443e5934bd40947", + "rev": "c303cc8e437c0fd26b9452472e7df5aa374e9177", "type": "github" }, "original": { @@ -291,11 +311,11 @@ ] }, "locked": { - "lastModified": 1719226092, - "narHash": "sha256-YNkUMcCUCpnULp40g+svYsaH1RbSEj6s4WdZY/SHe38=", + "lastModified": 1730903510, + "narHash": "sha256-mnynlrPeiW0nUQ8KGZHb3WyxAxA3Ye/BH8gMjdoKP6E=", "owner": "hercules-ci", "repo": "hercules-ci-effects", - "rev": "11e4b8dc112e2f485d7c97e1cee77f9958f498f5", + "rev": "b89ac4d66d618b915b1f0a408e2775fe3821d141", "type": "github" }, "original": { @@ -399,11 +419,11 @@ "systems": "systems_2" }, "locked": { - "lastModified": 1722409392, - "narHash": "sha256-8QuMS00EutmqzAIPxyJEPxM8EHiWlSKs6E2Htoh3Kes=", + "lastModified": 1725623828, + "narHash": "sha256-5Zrn72PO9yBaNO4Gd5uOsEmRpYH5rVAFKOQ5h2PxyhU=", "owner": "numtide", "repo": "nar-serve", - "rev": "9d0eff868d328fe67c60c26c8ba50e0b9d8de867", + "rev": "e5c749a444f2d14f381c75ef3a8feaa82c333b92", "type": "github" }, "original": { @@ -414,11 +434,11 @@ }, "nix-filter": { "locked": { - "lastModified": 1710156097, - "narHash": "sha256-1Wvk8UP7PXdf8bCCaEoMnOT1qe5/Duqgj+rL8sRQsSM=", + "lastModified": 1730207686, + "narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=", "owner": "numtide", "repo": "nix-filter", - "rev": "3342559a24e85fc164b295c3444e8a139924675b", + "rev": "776e68c1d014c3adde193a18db9d738458cd2ba4", "type": "github" }, "original": { @@ -427,10 +447,31 @@ "type": "github" } }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "attic", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1729742964, + "narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "e04df33f62cdcf93d73e9a04142464753a16db67", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, "nix-super": { "inputs": { "flake-compat": "flake-compat_2", - "flake-parts": "flake-parts_2", + "flake-parts": "flake-parts_3", "libgit2": "libgit2", "nixpkgs": "nixpkgs_3", "nixpkgs-regression": [ @@ -504,11 +545,11 @@ }, "nixpkgs_4": { "locked": { - "lastModified": 1722539632, - "narHash": "sha256-g4L+I8rDl7RQy5x8XcEMqNO49LFhrHTzVBqXtG2+FGo=", + "lastModified": 1730785428, + "narHash": "sha256-Zwl8YgTVJTEum+L+0zVAWvXAGbWAuXHax3KzuejaDyo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f2d6c7123138044e0c68902268bd8f37dd7e2fa7", + "rev": "4aa36568d413aca0ea84a1684d2d46f55dbabad7", "type": "github" }, "original": { @@ -557,11 +598,11 @@ ] }, "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "lastModified": 1726560853, + "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a", "type": "github" }, "original": { @@ -577,7 +618,7 @@ "blank": "blank", "devshell": "devshell", "drv-parts": "drv-parts", - "flake-parts": "flake-parts", + "flake-parts": "flake-parts_2", "hercules-ci-agent": "hercules-ci-agent", "hercules-ci-effects": "hercules-ci-effects", "hyprspace": "hyprspace", diff --git a/flake.nix b/flake.nix index 51b9840..419a526 100644 --- a/flake.nix +++ b/flake.nix @@ -54,7 +54,6 @@ nixpkgs.follows = "nixpkgs"; nixpkgs-stable.follows = "nixpkgs"; flake-compat.follows = "blank"; - flake-utils.follows = "repin-flake-utils"; }; }; diff --git a/packages/checks/keycloak-custom-jre.nix b/packages/checks/keycloak-custom-jre.nix index b8ce717..33a6bdf 100644 --- a/packages/checks/keycloak-custom-jre.nix +++ b/packages/checks/keycloak-custom-jre.nix @@ -7,7 +7,8 @@ nixosTest { package = keycloak; database.passwordFile = builtins.toFile "keycloak-test-password" "kcnixostest1234"; settings = { - proxy = "edge"; + http-enabled = true; + proxy-headers = "xforwarded"; hostname = "keycloak.local"; }; }; diff --git a/packages/patched-derivations.nix b/packages/patched-derivations.nix index af28a0e..19357e2 100644 --- a/packages/patched-derivations.nix +++ b/packages/patched-derivations.nix @@ -18,6 +18,10 @@ super: rec { }; }; + jitsi-meet-insecure = let + olm-insecure = assert builtins.length super.olm.meta.knownVulnerabilities > 0; super.olm.overrideAttrs (o: { meta = o.meta // { knownVulnerabilities = []; }; }); + in super.jitsi-meet.override { olm = olm-insecure; }; + jre17_standard = let jre = super.jre_minimal.override { jdk = super.jdk17_headless; @@ -46,7 +50,7 @@ super: rec { prometheus-jitsi-exporter = patch super.prometheus-jitsi-exporter "patches/base/prometheus-jitsi-exporter"; - s3ql = (patch super.s3ql "patches/base/s3ql").overrideAttrs (old: { + s3ql = super.s3ql.overrideAttrs (old: { propagatedBuildInputs = old.propagatedBuildInputs ++ [ super.python3Packages.packaging super.python3Packages.systemd diff --git a/packages/shadows.nix b/packages/shadows.nix index c3a90af..b6e680f 100644 --- a/packages/shadows.nix +++ b/packages/shadows.nix @@ -9,7 +9,9 @@ options.shadows = lib.mkOption { type = with lib.types; lazyAttrsOf package; - default = { }; + default = { + jitsi-meet = self'.packages.jitsi-meet-insecure; + }; }; }; } diff --git a/patches/base/kanidm/unixd-authenticated.patch b/patches/base/kanidm/unixd-authenticated.patch index d02e122..36cb69f 100644 --- a/patches/base/kanidm/unixd-authenticated.patch +++ b/patches/base/kanidm/unixd-authenticated.patch @@ -1,25 +1,28 @@ -diff --git a/unix_integration/src/idprovider/kanidm.rs b/unix_integration/src/idprovider/kanidm.rs -index 6fc015756..31593f03e 100644 ---- a/unix_integration/src/idprovider/kanidm.rs -+++ b/unix_integration/src/idprovider/kanidm.rs -@@ -4,6 +4,7 @@ use kanidm_client::{ClientError, KanidmClient, StatusCode}; - use kanidm_proto::internal::OperationError; +diff --git a/unix_integration/resolver/src/idprovider/kanidm.rs b/unix_integration/resolver/src/idprovider/kanidm.rs +index 63cedb4d5..35c45fb0e 100644 +--- a/unix_integration/resolver/src/idprovider/kanidm.rs ++++ b/unix_integration/resolver/src/idprovider/kanidm.rs +@@ -7,6 +7,7 @@ use kanidm_proto::internal::OperationError; use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; - use tokio::sync::{broadcast, RwLock}; + use std::collections::BTreeSet; + use std::time::{Duration, SystemTime}; +use std::env; + use tokio::sync::{broadcast, Mutex}; - use super::interface::{ - // KeyStore, -@@ -25,12 +26,28 @@ const TAG_IDKEY: &str = "idkey"; - - pub struct KanidmProvider { - client: RwLock, + use kanidm_lib_crypto::CryptoPolicy; +@@ -38,6 +39,8 @@ struct KanidmProviderInternal { + hmac_key: HmacKey, + crypto_policy: CryptoPolicy, + pam_allow_groups: BTreeSet, + auth_name: Option, + auth_password: Option, } - impl KanidmProvider { - pub fn new(client: KanidmClient) -> Self { + pub struct KanidmProvider { +@@ -102,6 +105,19 @@ impl KanidmProvider { + .map(|GroupMap { local, with }| (local, Id::Name(with))) + .collect(); + + let env_username: Option; + let env_password: Option; + match (env::var_os("KANIDM_NAME"), env::var_os("KANIDM_PASSWORD")) { @@ -32,23 +35,29 @@ index 6fc015756..31593f03e 100644 + env_password = None; + } + } - KanidmProvider { - client: RwLock::new(client), -+ auth_name: env_username, -+ auth_password: env_password, - } ++ + Ok(KanidmProvider { + inner: Mutex::new(KanidmProviderInternal { + state: CacheState::OfflineNextCheck(now), +@@ -109,6 +125,8 @@ impl KanidmProvider { + hmac_key, + crypto_policy, + pam_allow_groups, ++ auth_name: env_username, ++ auth_password: env_password + }), + map_group, + }) +@@ -256,7 +274,11 @@ impl KanidmProviderInternal { } - } -@@ -118,7 +135,11 @@ impl IdProvider for KanidmProvider { - // Needs .read on all types except re-auth. - async fn provider_authenticate(&self, _tpm: &mut tpm::BoxedDynTpm) -> Result<(), IdpError> { -- match self.client.write().await.auth_anonymous().await { + async fn attempt_online(&mut self, _tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool { +- match self.client.auth_anonymous().await { + let auth_method = match (&self.auth_name, &self.auth_password) { -+ (Some(name), Some(password)) => self.client.write().await.auth_simple_password(name, password).await, -+ _ => self.client.write().await.auth_anonymous().await ++ (Some(name), Some(password)) => self.client.auth_simple_password(name, password).await, ++ _ => self.client.auth_anonymous().await + }; + match auth_method { - Ok(_uat) => Ok(()), - Err(err) => { - error!(?err, "Provider authentication failed"); + Ok(_uat) => { + self.state = CacheState::Online; + true diff --git a/patches/base/s3ql/0000-cache-entry-seek-whence.patch b/patches/base/s3ql/0000-cache-entry-seek-whence.patch deleted file mode 100644 index e6660b1..0000000 --- a/patches/base/s3ql/0000-cache-entry-seek-whence.patch +++ /dev/null @@ -1,18 +0,0 @@ -diff --git a/src/s3ql/block_cache.py b/src/s3ql/block_cache.py -index a4b55fd1..267b9a12 100644 ---- a/src/s3ql/block_cache.py -+++ b/src/s3ql/block_cache.py -@@ -86,10 +86,10 @@ class CacheEntry: - def flush(self): - self.fh.flush() - -- def seek(self, off): -+ def seek(self, off, whence=0): - if self.pos != off: -- self.fh.seek(off) -- self.pos = off -+ self.fh.seek(off, whence) -+ self.pos = self.fh.tell() - - def tell(self): - return self.pos diff --git a/patches/base/s3ql/0001-fix-plain-block-size.patch b/patches/base/s3ql/0001-fix-plain-block-size.patch deleted file mode 100644 index 958e099..0000000 --- a/patches/base/s3ql/0001-fix-plain-block-size.patch +++ /dev/null @@ -1,26 +0,0 @@ -diff --git a/src/s3ql/backends/comprenc.py b/src/s3ql/backends/comprenc.py -index 6402fec1..9ed3627e 100644 ---- a/src/s3ql/backends/comprenc.py -+++ b/src/s3ql/backends/comprenc.py -@@ -276,7 +276,7 @@ class ComprencBackend(AbstractBackend): - buf.seek(0) - fh = buf - -- return self.backend.write_fh(key, fh, meta_raw) -+ return self.backend.write_fh(key, fh, meta_raw, len_=len_ if meta_raw['compression'] == 'None'and meta_raw['encryption'] == 'None' else None) - - def contains(self, key): - return self.backend.contains(key) -diff --git a/src/s3ql/database.py b/src/s3ql/database.py -index bb4054e6..c2142bf6 100644 ---- a/src/s3ql/database.py -+++ b/src/s3ql/database.py -@@ -659,7 +659,7 @@ def upload_metadata( - ) - obj = METADATA_OBJ_NAME % (blockno, params.seq_no) - fh.seek(blockno * blocksize) -- backend.write_fh(obj, fh, len_=blocksize) -+ backend.write_fh(obj, fh, len_=min(blocksize, db_size - blockno * blocksize)) - - if not update_params: - return diff --git a/patches/base/s3ql/0002-comprenc-always-copy.patch b/patches/base/s3ql/0002-comprenc-always-copy.patch deleted file mode 100644 index b1e7912..0000000 --- a/patches/base/s3ql/0002-comprenc-always-copy.patch +++ /dev/null @@ -1,17 +0,0 @@ -diff --git a/src/s3ql/backends/comprenc.py b/src/s3ql/backends/comprenc.py -index 9ed3627e..db419bb7 100644 ---- a/src/s3ql/backends/comprenc.py -+++ b/src/s3ql/backends/comprenc.py -@@ -276,6 +276,12 @@ class ComprencBackend(AbstractBackend): - buf.seek(0) - fh = buf - -+ if meta_raw['compression'] == 'None' and meta_raw['encryption'] == 'None': -+ buf = io.BytesIO() -+ copyfh(fh, buf, len_) -+ buf.seek(0) -+ fh = buf -+ - return self.backend.write_fh(key, fh, meta_raw, len_=len_ if meta_raw['compression'] == 'None'and meta_raw['encryption'] == 'None' else None) - - def contains(self, key): diff --git a/patches/base/s3ql/remove-ssl-monkeypatch.patch b/patches/base/s3ql/remove-ssl-monkeypatch.patch deleted file mode 100644 index 565cd44..0000000 --- a/patches/base/s3ql/remove-ssl-monkeypatch.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff --git a/tests/t0_http.py b/tests/t0_http.py -index 66ed564f..36bebab1 100755 ---- a/tests/t0_http.py -+++ b/tests/t0_http.py -@@ -289,7 +289,6 @@ def do_GET(self): - - # We don't *actually* want to establish SSL, that'd be - # to complex for our mock server -- monkeypatch.setattr('ssl.match_hostname', lambda x, y: True) - conn = HTTPConnection( - test_host, - test_port, diff --git a/patches/base/s3ql/s3c-accurate-length.patch b/patches/base/s3ql/s3c-accurate-length.patch deleted file mode 100644 index bae1c01..0000000 --- a/patches/base/s3ql/s3c-accurate-length.patch +++ /dev/null @@ -1,26 +0,0 @@ -commit 1edbbcf08d5701ea38f13fca7491418318aebca9 -Author: Max -Date: Fri Jun 7 23:31:08 2024 +0200 - - accurate length - -diff --git a/src/s3ql/backends/s3c.py b/src/s3ql/backends/s3c.py -index 2995ca4f..3c3c79ab 100644 ---- a/src/s3ql/backends/s3c.py -+++ b/src/s3ql/backends/s3c.py -@@ -387,9 +387,13 @@ class Backend(AbstractBackend): - ''' - - off = fh.tell() -+ fh.seek(0, os.SEEK_END) -+ actual_len = fh.tell() - off -+ fh.seek(off, os.SEEK_SET) - if len_ is None: -- fh.seek(0, os.SEEK_END) -- len_ = fh.tell() -+ len_ = actual_len -+ else: -+ len_ = min(len_, actual_len) - return self._write_fh(key, fh, off, len_, metadata or {}) - - @retry diff --git a/patches/base/s3ql/s3v4.patch b/patches/base/s3ql/s3v4.patch deleted file mode 100644 index baab9b3..0000000 --- a/patches/base/s3ql/s3v4.patch +++ /dev/null @@ -1,392 +0,0 @@ -From 11e3a9cea77cd8498d874f7fd69a938af4da68cd Mon Sep 17 00:00:00 2001 -From: xeji <36407913+xeji@users.noreply.github.com> -Date: Thu, 28 Mar 2024 22:19:11 +0100 -Subject: [PATCH] new backend s3c4: s3c with V4 request signatures (#349) - ---- - rst/backends.rst | 15 ++++ - src/s3ql/backends/__init__.py | 3 +- - src/s3ql/backends/s3.py | 100 ++---------------------- - src/s3ql/backends/s3c4.py | 140 ++++++++++++++++++++++++++++++++++ - src/s3ql/parse_args.py | 2 +- - tests/mock_server.py | 11 +++ - 6 files changed, 174 insertions(+), 97 deletions(-) - create mode 100644 src/s3ql/backends/s3c4.py - -diff --git a/rst/backends.rst b/rst/backends.rst -index 7220ee96..4bc68387 100644 ---- a/rst/backends.rst -+++ b/rst/backends.rst -@@ -341,6 +341,14 @@ can be an arbitrary prefix that will be prepended to all object names - used by S3QL. This allows you to store several S3QL file systems in - the same bucket. - -+`s3c://` authenticates API requests using AWS V2 signatures, which are -+deprecated by AWS but still accepted by many S3 compatible services. -+ -+`s3c4://` denotes a variant of this backend that works the same -+but uses AWS V4 signatures for request authentication instead: :: -+ -+ s3c4://:// -+ - The S3 compatible backend accepts the following backend options: - - .. option:: no-ssl -@@ -385,6 +393,13 @@ The S3 compatible backend accepts the following backend options: - necessary if your storage server does not return a valid response - body for a successful copy operation. - -+.. option:: sig-region= -+ -+ For `s3c4://` variant only: Region to use for calculating V4 -+ request signatures. Contrary to S3, the region is not a defined -+ part of the storage URL and must be specified separately. -+ Defaults to `us-east-1`. -+ - .. _`S3 COPY API`: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html - .. __: https://doc.s3.amazonaws.com/proposals/copy.html - -diff --git a/src/s3ql/backends/__init__.py b/src/s3ql/backends/__init__.py -index a1335762..442828cd 100644 ---- a/src/s3ql/backends/__init__.py -+++ b/src/s3ql/backends/__init__.py -@@ -6,7 +6,7 @@ - This work can be distributed under the terms of the GNU GPLv3. - ''' - --from . import gs, local, rackspace, s3, s3c, swift, swiftks -+from . import gs, local, rackspace, s3, s3c, s3c4, swift, swiftks - from .b2.b2_backend import B2Backend - - #: Mapping from storage URL prefixes to backend classes -@@ -15,6 +15,7 @@ - 'local': local.Backend, - 'gs': gs.Backend, - 's3c': s3c.Backend, -+ 's3c4': s3c4.Backend, - 'swift': swift.Backend, - 'swiftks': swiftks.Backend, - 'rackspace': rackspace.Backend, -diff --git a/src/s3ql/backends/s3.py b/src/s3ql/backends/s3.py -index e05a49ba..5548a855 100644 ---- a/src/s3ql/backends/s3.py -+++ b/src/s3ql/backends/s3.py -@@ -15,7 +15,7 @@ - from xml.sax.saxutils import escape as xml_escape - - from ..logging import QuietError --from . import s3c -+from . import s3c4 - from .common import retry - from .s3c import get_S3Error - -@@ -28,22 +28,23 @@ - # pylint: disable=E1002,E1101 - - --class Backend(s3c.Backend): -+class Backend(s3c4.Backend): - """A backend to store data in Amazon S3 - - This class uses standard HTTP connections to connect to S3. - """ - -- known_options = (s3c.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - { -+ known_options = (s3c4.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - { - 'dumb-copy', - 'disable-expect100', -+ 'sig-region', - } - - def __init__(self, options): - self.region = None -- self.signing_key = None - super().__init__(options) - self._set_storage_options(self._extra_put_headers) -+ self.sig_region = self.region - - def _parse_storage_url(self, storage_url, ssl_context): - hit = re.match(r'^s3s?://([^/]+)/([^/]+)(?:/(.*))?$', storage_url) -@@ -147,94 +148,3 @@ def _delete_multi(self, keys): - - except: - self.conn.discard() -- -- def _authorize_request(self, method, path, headers, subres, query_string): -- '''Add authorization information to *headers*''' -- -- # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html -- -- now = time.gmtime() -- # now = time.strptime('Fri, 24 May 2013 00:00:00 GMT', -- # '%a, %d %b %Y %H:%M:%S GMT') -- -- ymd = time.strftime('%Y%m%d', now) -- ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now) -- -- headers['x-amz-date'] = ymdhms -- headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD' -- # headers['x-amz-content-sha256'] = hashlib.sha256(body).hexdigest() -- headers.pop('Authorization', None) -- -- auth_strs = [method] -- auth_strs.append(urllib.parse.quote(path)) -- -- if query_string: -- s = urllib.parse.urlencode( -- query_string, doseq=True, quote_via=urllib.parse.quote -- ).split('&') -- else: -- s = [] -- if subres: -- s.append(urllib.parse.quote(subres) + '=') -- if s: -- s = '&'.join(sorted(s)) -- else: -- s = '' -- auth_strs.append(s) -- -- # Headers -- sig_hdrs = sorted(x.lower() for x in headers.keys()) -- for hdr in sig_hdrs: -- auth_strs.append('%s:%s' % (hdr, headers[hdr].strip())) -- auth_strs.append('') -- auth_strs.append(';'.join(sig_hdrs)) -- auth_strs.append(headers['x-amz-content-sha256']) -- can_req = '\n'.join(auth_strs) -- # log.debug('canonical request: %s', can_req) -- -- can_req_hash = hashlib.sha256(can_req.encode()).hexdigest() -- str_to_sign = ( -- "AWS4-HMAC-SHA256\n" -- + ymdhms -- + '\n' -- + '%s/%s/s3/aws4_request\n' % (ymd, self.region) -- + can_req_hash -- ) -- # log.debug('string to sign: %s', str_to_sign) -- -- if self.signing_key is None or self.signing_key[1] != ymd: -- self.update_signing_key(ymd) -- signing_key = self.signing_key[0] -- -- sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True) -- -- cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % ( -- self.login, -- now.tm_year, -- now.tm_mon, -- now.tm_mday, -- self.region, -- ) -- -- headers['Authorization'] = ( -- 'AWS4-HMAC-SHA256 ' -- 'Credential=%s,' -- 'SignedHeaders=%s,' -- 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig) -- ) -- -- def update_signing_key(self, ymd): -- date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode()) -- region_key = hmac_sha256(date_key, self.region.encode()) -- service_key = hmac_sha256(region_key, b's3') -- signing_key = hmac_sha256(service_key, b'aws4_request') -- -- self.signing_key = (signing_key, ymd) -- -- --def hmac_sha256(key, msg, hex=False): -- d = hmac.new(key, msg, hashlib.sha256) -- if hex: -- return d.hexdigest() -- else: -- return d.digest() -diff --git a/src/s3ql/backends/s3c4.py b/src/s3ql/backends/s3c4.py -new file mode 100644 -index 00000000..37ff0b7a ---- /dev/null -+++ b/src/s3ql/backends/s3c4.py -@@ -0,0 +1,140 @@ -+''' -+s3c4.py - this file is part of S3QL. -+ -+Copyright © 2008 Nikolaus Rath -+ -+This work can be distributed under the terms of the GNU GPLv3. -+''' -+ -+import hashlib -+import hmac -+import logging -+import re -+import time -+import urllib.parse -+from xml.sax.saxutils import escape as xml_escape -+ -+from ..logging import QuietError -+from . import s3c -+from .common import retry -+from .s3c import get_S3Error -+ -+log = logging.getLogger(__name__) -+ -+# Maximum number of keys that can be deleted at once -+MAX_KEYS = 1000 -+ -+# Pylint goes berserk with false positives -+# pylint: disable=E1002,E1101 -+ -+ -+class Backend(s3c.Backend): -+ """A backend to stored data in some S3 compatible storage service. -+ -+ This classes uses AWS Signature V4 for authorization. -+ """ -+ -+ known_options = s3c.Backend.known_options | {'sig-region'} -+ -+ def __init__(self, options): -+ self.sig_region = options.backend_options.get('sig-region', 'us-east-1') -+ self.signing_key = None -+ super().__init__(options) -+ -+ def __str__(self): -+ return 's3c4://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix) -+ -+ def _authorize_request(self, method, path, headers, subres, query_string): -+ '''Add authorization information to *headers*''' -+ -+ # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html -+ -+ now = time.gmtime() -+ # now = time.strptime('Fri, 24 May 2013 00:00:00 GMT', -+ # '%a, %d %b %Y %H:%M:%S GMT') -+ -+ ymd = time.strftime('%Y%m%d', now) -+ ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now) -+ -+ # add non-standard port to host header, needed for correct signature -+ if self.port != 443: -+ headers['host'] = '%s:%s' % (self.hostname, self.port) -+ -+ headers['x-amz-date'] = ymdhms -+ headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD' -+ -+ headers.pop('Authorization', None) -+ -+ auth_strs = [method] -+ auth_strs.append(urllib.parse.quote(path)) -+ -+ if query_string: -+ s = urllib.parse.urlencode( -+ query_string, doseq=True, quote_via=urllib.parse.quote -+ ).split('&') -+ else: -+ s = [] -+ if subres: -+ s.append(urllib.parse.quote(subres) + '=') -+ if s: -+ s = '&'.join(sorted(s)) -+ else: -+ s = '' -+ auth_strs.append(s) -+ -+ # Headers -+ sig_hdrs = sorted(x.lower() for x in headers.keys()) -+ for hdr in sig_hdrs: -+ auth_strs.append('%s:%s' % (hdr, headers[hdr].strip())) -+ auth_strs.append('') -+ auth_strs.append(';'.join(sig_hdrs)) -+ auth_strs.append(headers['x-amz-content-sha256']) -+ can_req = '\n'.join(auth_strs) -+ # log.debug('canonical request: %s', can_req) -+ -+ can_req_hash = hashlib.sha256(can_req.encode()).hexdigest() -+ str_to_sign = ( -+ "AWS4-HMAC-SHA256\n" -+ + ymdhms -+ + '\n' -+ + '%s/%s/s3/aws4_request\n' % (ymd, self.sig_region) -+ + can_req_hash -+ ) -+ # log.debug('string to sign: %s', str_to_sign) -+ -+ if self.signing_key is None or self.signing_key[1] != ymd: -+ self.update_signing_key(ymd) -+ signing_key = self.signing_key[0] -+ -+ sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True) -+ -+ cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % ( -+ self.login, -+ now.tm_year, -+ now.tm_mon, -+ now.tm_mday, -+ self.sig_region, -+ ) -+ -+ headers['Authorization'] = ( -+ 'AWS4-HMAC-SHA256 ' -+ 'Credential=%s,' -+ 'SignedHeaders=%s,' -+ 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig) -+ ) -+ -+ def update_signing_key(self, ymd): -+ date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode()) -+ region_key = hmac_sha256(date_key, self.sig_region.encode()) -+ service_key = hmac_sha256(region_key, b's3') -+ signing_key = hmac_sha256(service_key, b'aws4_request') -+ -+ self.signing_key = (signing_key, ymd) -+ -+ -+def hmac_sha256(key, msg, hex=False): -+ d = hmac.new(key, msg, hashlib.sha256) -+ if hex: -+ return d.hexdigest() -+ else: -+ return d.digest() -diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py -index 272e10c7..24ad50f4 100644 ---- a/src/s3ql/parse_args.py -+++ b/src/s3ql/parse_args.py -@@ -374,7 +374,7 @@ def storage_url_type(s): - # slash (even when using a prefix), but we can't do that now because it - # would make file systems created without trailing slash inaccessible. - if re.match(r'^(s3|gs)://[^/]+$', s) or re.match( -- r'^(s3c|swift(ks)?|rackspace)://[^/]+/[^/]+$', s -+ r'^(s3c|s3c4|swift(ks)?|rackspace)://[^/]+/[^/]+$', s - ): - s += '/' - -diff --git a/tests/mock_server.py b/tests/mock_server.py -index b453e705..e3084065 100644 ---- a/tests/mock_server.py -+++ b/tests/mock_server.py -@@ -292,6 +292,16 @@ def send_error(self, status, message=None, code='', resource='', extra_headers=N - self.wfile.write(content) - - -+class S3C4RequestHandler(S3CRequestHandler): -+ '''Request Handler for s3c4 backend -+ -+ Currently identical to S3CRequestHandler since mock request handlers -+ do not check request signatures. -+ ''' -+ -+ pass -+ -+ - class BasicSwiftRequestHandler(S3CRequestHandler): - '''A request handler implementing a subset of the OpenStack Swift Interface - -@@ -569,6 +579,7 @@ def inline_error(http_status, body): - #: corresponding storage urls - handler_list = [ - (S3CRequestHandler, 's3c://%(host)s:%(port)d/s3ql_test'), -+ (S3C4RequestHandler, 's3c4://%(host)s:%(port)d/s3ql_test'), - # Special syntax only for testing against mock server - (BasicSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'), - (CopySwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'),