Compare commits
3 commits
Author | SHA1 | Date | |
---|---|---|---|
727af63d6f | |||
0f3bd138e5 | |||
a8a564a2bb |
678 changed files with 67213 additions and 9697 deletions
3
.dvc/.gitignore
vendored
Normal file
3
.dvc/.gitignore
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
/config.local
|
||||
/tmp
|
||||
/cache
|
5
.dvc/config
Normal file
5
.dvc/config
Normal file
|
@ -0,0 +1,5 @@
|
|||
[core]
|
||||
remote = cdn
|
||||
['remote "cdn"']
|
||||
url = s3://content-delivery/assets
|
||||
endpointurl = https://object-storage.privatevoid.net
|
3
.dvcignore
Normal file
3
.dvcignore
Normal file
|
@ -0,0 +1,3 @@
|
|||
# Add patterns of files dvc should ignore, which could improve
|
||||
# the performance. Learn more at
|
||||
# https://dvc.org/doc/user-guide/dvcignore
|
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -2,6 +2,4 @@
|
|||
result
|
||||
result-*
|
||||
**/.direnv/
|
||||
.data/
|
||||
.cache/
|
||||
.nixos-test-history
|
||||
.data/
|
|
@ -1,10 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
perSystem = {
|
||||
options.catalog = lib.mkOption {
|
||||
type = with lib.types; lazyAttrsOf (lazyAttrsOf (lazyAttrsOf (submodule ./target.nix)));
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
{ lib, name, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
description = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
actions = lib.mkOption {
|
||||
type = with lib.types; lazyAttrsOf (submodule {
|
||||
options = {
|
||||
description = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
command = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
packages = lib.mkOption {
|
||||
type = with lib.types; listOf package;
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
});
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
imports = [
|
||||
./services.nix
|
||||
./secrets.nix
|
||||
];
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
{ config, lib, withSystem, ... }:
|
||||
|
||||
let
|
||||
inherit (config) cluster hours;
|
||||
in
|
||||
|
||||
{
|
||||
perSystem = { config, pkgs, system, ... }: {
|
||||
catalog.cluster = {
|
||||
secrets = lib.pipe cluster.config.services [
|
||||
(lib.mapAttrsToList (svcName: svcConfig: lib.mapAttrsToList (secretName: secretConfig: {
|
||||
name = "${svcName}/${secretName}";
|
||||
value = {
|
||||
description = "Cluster secret '${secretName}' of service '${svcName}'";
|
||||
actions = let
|
||||
agenixRules = builtins.toFile "agenix-rules-shim.nix" /*nix*/ ''
|
||||
builtins.fromJSON (builtins.readFile (builtins.getEnv "AGENIX_KEYS_JSON"))
|
||||
'';
|
||||
|
||||
mkKeys = secretFile: nodes: builtins.toFile "agenix-keys.json" (builtins.toJSON {
|
||||
"${secretFile}".publicKeys = (map (hour: hours.${hour}.ssh.id.publicKey) nodes) ++ cluster.config.secrets.extraKeys;
|
||||
});
|
||||
|
||||
setupCommands = secretFile: nodes: let
|
||||
agenixKeysJson = mkKeys secretFile nodes;
|
||||
in ''
|
||||
export RULES='${agenixRules}'
|
||||
export AGENIX_KEYS_JSON='${agenixKeysJson}'
|
||||
mkdir -p "$PRJ_ROOT/cluster/secrets"
|
||||
cd "$PRJ_ROOT/cluster/secrets"
|
||||
'';
|
||||
in (lib.optionalAttrs (secretConfig.generate != null) {
|
||||
generateSecret = {
|
||||
description = "Generate this secret";
|
||||
command = if secretConfig.shared then let
|
||||
secretFile = "${svcName}-${secretName}.age";
|
||||
in ''
|
||||
${setupCommands secretFile secretConfig.nodes}
|
||||
${withSystem system secretConfig.generate} | agenix -e '${secretFile}'
|
||||
'' else lib.concatStringsSep "\n" (map (node: let
|
||||
secretFile = "${svcName}-${secretName}-${node}.age";
|
||||
in ''
|
||||
${setupCommands secretFile [ node ]}
|
||||
${withSystem system secretConfig.generate} | agenix -e '${secretFile}'
|
||||
'') secretConfig.nodes);
|
||||
};
|
||||
}) // (if secretConfig.shared then let
|
||||
secretFile = "${svcName}-${secretName}.age";
|
||||
in {
|
||||
editSecret = {
|
||||
description = "Edit this secret";
|
||||
command = ''
|
||||
${setupCommands secretFile secretConfig.nodes}
|
||||
agenix -e '${secretFile}'
|
||||
'';
|
||||
};
|
||||
} else lib.mapAttrs' (name: lib.nameValuePair "editSecretInstance-${name}") (lib.genAttrs secretConfig.nodes (node: let
|
||||
secretFile = "${svcName}-${secretName}-${node}.age";
|
||||
in {
|
||||
description = "Edit this secret for '${node}'";
|
||||
command = ''
|
||||
${setupCommands secretFile [ node ]}
|
||||
agenix -e '${secretFile}'
|
||||
'';
|
||||
})));
|
||||
};
|
||||
}) svcConfig.secrets))
|
||||
lib.concatLists
|
||||
lib.listToAttrs
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (config) cluster flake;
|
||||
in
|
||||
|
||||
{
|
||||
perSystem = { config, pkgs, ... }: {
|
||||
catalog.cluster = {
|
||||
services = lib.mapAttrs (name: svc: {
|
||||
description = "Cluster service: ${name}";
|
||||
actions = let
|
||||
mkDeployAction = { description, agents }: {
|
||||
inherit description;
|
||||
packages = [
|
||||
config.packages.cachix
|
||||
pkgs.tmux
|
||||
];
|
||||
command = let
|
||||
cachixDeployJson = pkgs.writeText "cachix-deploy.json" (builtins.toJSON {
|
||||
agents = lib.genAttrs agents (name: builtins.unsafeDiscardStringContext flake.nixosConfigurations.${name}.config.system.build.toplevel);
|
||||
});
|
||||
in ''
|
||||
set -e
|
||||
echo building ${toString (lib.length agents)} configurations in parallel
|
||||
tmux new-session ${lib.concatStringsSep " split-window " (
|
||||
map (host: let
|
||||
drvPath = builtins.unsafeDiscardStringContext flake.nixosConfigurations.${host}.config.system.build.toplevel.drvPath;
|
||||
in '' 'echo building configuration for ${host}; nix build -L --no-link --store "ssh-ng://${host}" --eval-store auto "${drvPath}^*"'\; '') agents
|
||||
)} select-layout even-vertical
|
||||
|
||||
source ~/.config/cachix/deploy
|
||||
cachix deploy activate ${cachixDeployJson}
|
||||
echo
|
||||
'';
|
||||
};
|
||||
in {
|
||||
deployAll = mkDeployAction {
|
||||
description = "Deploy ALL groups of this service.";
|
||||
agents = lib.unique (lib.concatLists (lib.attrValues svc.nodes));
|
||||
};
|
||||
} // lib.mapAttrs' (group: agents: {
|
||||
name = "deployGroup-${group}";
|
||||
value = mkDeployAction {
|
||||
description = "Deploy the '${group}' group of this service.";
|
||||
inherit agents;
|
||||
};
|
||||
}) svc.nodes;
|
||||
}) cluster.config.services;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
{ lib, depot }:
|
||||
{ lib, depot, hostName }:
|
||||
|
||||
lib.evalModules {
|
||||
specialArgs = {
|
||||
|
@ -7,18 +7,16 @@ lib.evalModules {
|
|||
modules = [
|
||||
# Arbitrary variables to reference across multiple services
|
||||
./lib/vars
|
||||
{ vars = { inherit hostName; }; }
|
||||
|
||||
# Cluster-level port-magic
|
||||
../modules/port-magic
|
||||
|
||||
../tools/inject.nix
|
||||
./lib/services.nix
|
||||
./lib/inject-nixos-config.nix
|
||||
./lib/port-magic-multi.nix
|
||||
./lib/mesh.nix
|
||||
./lib/secrets.nix
|
||||
./lib/testing.nix
|
||||
./lib/lib.nix
|
||||
|
||||
./import-services.nix
|
||||
];
|
||||
}
|
||||
}
|
15
cluster/inject.nix
Normal file
15
cluster/inject.nix
Normal file
|
@ -0,0 +1,15 @@
|
|||
hostName:
|
||||
{ depot, lib, ... }:
|
||||
|
||||
let
|
||||
cluster = import ./. { inherit lib depot hostName; };
|
||||
in
|
||||
|
||||
{
|
||||
_module.args.cluster = {
|
||||
inherit (cluster.config) vars;
|
||||
inherit (cluster.config.vars) hosts;
|
||||
inherit (cluster) config;
|
||||
};
|
||||
imports = cluster.config.out.injectedNixosConfig;
|
||||
}
|
|
@ -1,10 +1,10 @@
|
|||
{ config, lib, ... }:
|
||||
{ lib, ... }:
|
||||
with lib;
|
||||
|
||||
{
|
||||
options.out = mkOption {
|
||||
description = "Output functions.";
|
||||
type = with types; lazyAttrsOf (functionTo raw);
|
||||
default = const [];
|
||||
options.out.injectedNixosConfig = mkOption {
|
||||
description = "NixOS configuration modules to inject into the host.";
|
||||
type = with types; listOf anything;
|
||||
default = {};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
options.lib = {
|
||||
forService = lib.mkOption {
|
||||
description = "Enable these definitions for a particular service only.";
|
||||
type = lib.types.functionTo lib.types.raw;
|
||||
readOnly = true;
|
||||
default = service: lib.mkIf (!config.simulacrum || lib.any (s: s == service) config.testConfig.activeServices);
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
hostLinks = lib.pipe config.services [
|
||||
(lib.filterAttrs (_: svc: svc.meshLinks != {}))
|
||||
(lib.mapAttrsToList (svcName: svc:
|
||||
lib.mapAttrsToList (groupName: links:
|
||||
lib.genAttrs svc.nodes.${groupName} (hostName: lib.mapAttrs (_: cfg: { ... }: {
|
||||
imports = [ cfg.link ];
|
||||
ipv4 = config.vars.mesh.${hostName}.meshIp;
|
||||
}) links)
|
||||
) svc.meshLinks
|
||||
))
|
||||
(map lib.mkMerge)
|
||||
lib.mkMerge
|
||||
];
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
options.secrets = {
|
||||
extraKeys = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = "Additional keys with which to encrypt all secrets.";
|
||||
default = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL5C7mC5S2gM0K6x0L/jNwAeQYbFSzs16Q73lONUlIkL max@TITAN"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMmdWfmAs/0rno8zJlhBFMY2SumnHbTNdZUXJqxgd9ON max@jericho"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,16 +1,14 @@
|
|||
{ config, lib, name, ... }:
|
||||
vars:
|
||||
{ config, lib, ... }:
|
||||
with lib;
|
||||
|
||||
let
|
||||
filterGroup = group: hostName: builtins.filter (x: x != hostName) group;
|
||||
serviceName = name;
|
||||
notSelf = x: x != vars.hostName;
|
||||
|
||||
filterGroup = builtins.filter notSelf;
|
||||
in
|
||||
|
||||
{
|
||||
imports = [
|
||||
./services/secrets.nix
|
||||
];
|
||||
|
||||
options = {
|
||||
nodes = mkOption {
|
||||
description = ''
|
||||
|
@ -23,12 +21,12 @@ in
|
|||
* X evaluators, Y smallBuilders, Z bigBuilders
|
||||
etc.
|
||||
'';
|
||||
type = with types; lazyAttrsOf (oneOf [ str (listOf str) ]);
|
||||
type = with types; attrsOf (oneOf [ str (listOf str) ]);
|
||||
default = [];
|
||||
};
|
||||
otherNodes = mkOption {
|
||||
description = "Other nodes in the group.";
|
||||
type = with types; lazyAttrsOf (functionTo (listOf str));
|
||||
type = with types; attrsOf (listOf str);
|
||||
default = [];
|
||||
};
|
||||
nixos = mkOption {
|
||||
|
@ -36,36 +34,6 @@ in
|
|||
type = with types; attrs;
|
||||
default = {};
|
||||
};
|
||||
meshLinks = mkOption {
|
||||
description = "Create host links on the mesh network.";
|
||||
type = types.attrsOf (types.attrsOf (types.submodule {
|
||||
options = {
|
||||
link = mkOption {
|
||||
type = types.deferredModule;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = {};
|
||||
};
|
||||
simulacrum = {
|
||||
enable = mkEnableOption "testing this service in the Simulacrum";
|
||||
deps = mkOption {
|
||||
description = "Other services to include.";
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
};
|
||||
settings = mkOption {
|
||||
description = "NixOS test configuration.";
|
||||
type = types.deferredModule;
|
||||
default = {};
|
||||
};
|
||||
augments = mkOption {
|
||||
description = "Cluster augments (will be propagated).";
|
||||
type = types.deferredModule;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
config.otherNodes = builtins.mapAttrs (const filterGroup) config.nodes;
|
||||
config.otherNodes = builtins.mapAttrs (_: filterGroup) config.nodes;
|
||||
}
|
||||
|
|
|
@ -2,48 +2,18 @@
|
|||
with lib;
|
||||
|
||||
let
|
||||
getHostConfigurations = hostName: svcName: svcConfig: let
|
||||
serviceConfigs =
|
||||
lib.mapAttrsToList (groupName: _: svcConfig.nixos.${groupName})
|
||||
(lib.filterAttrs (_: lib.elem hostName) svcConfig.nodes);
|
||||
getHostConfigurations = svcConfig: hostName:
|
||||
lib.mapAttrsToList (groupName: _: svcConfig.nixos.${groupName})
|
||||
(lib.filterAttrs (_: lib.elem hostName) svcConfig.nodes);
|
||||
|
||||
secretsConfig = let
|
||||
secrets = lib.filterAttrs (_: secret: lib.any (node: node == hostName) secret.nodes) svcConfig.secrets;
|
||||
in {
|
||||
age.secrets = lib.mapAttrs' (secretName: secretConfig: {
|
||||
name = "cluster-${svcName}-${secretName}";
|
||||
value = {
|
||||
inherit (secretConfig) path mode owner group;
|
||||
file = ../secrets/${svcName}-${secretName}${lib.optionalString (!secretConfig.shared) "-${hostName}"}.age;
|
||||
};
|
||||
}) secrets;
|
||||
|
||||
systemd.services = lib.mkMerge (lib.mapAttrsToList (secretName: secretConfig: lib.genAttrs secretConfig.services (systemdServiceName: {
|
||||
restartTriggers = [ "${../secrets/${svcName}-${secretName}${lib.optionalString (!secretConfig.shared) "-${hostName}"}.age}" ];
|
||||
})) secrets);
|
||||
};
|
||||
in serviceConfigs ++ [
|
||||
secretsConfig
|
||||
];
|
||||
|
||||
introspectionModule._module.args.cluster = {
|
||||
inherit (config) vars;
|
||||
inherit config;
|
||||
};
|
||||
getServiceConfigurations = svcConfig: getHostConfigurations svcConfig config.vars.hostName;
|
||||
in
|
||||
|
||||
{
|
||||
options.services = mkOption {
|
||||
description = "Cluster services.";
|
||||
type = with types; attrsOf (submodule ./service-module.nix);
|
||||
type = with types; attrsOf (submodule (import ./service-module.nix config.vars));
|
||||
default = {};
|
||||
};
|
||||
|
||||
config.out = {
|
||||
injectNixosConfigForServices = services: hostName: (lib.flatten (lib.mapAttrsToList (getHostConfigurations hostName) (lib.getAttrs services config.services))) ++ [
|
||||
introspectionModule
|
||||
];
|
||||
|
||||
injectNixosConfig = config.out.injectNixosConfigForServices (lib.attrNames config.services);
|
||||
};
|
||||
config.out.injectedNixosConfig = lib.flatten (lib.mapAttrsToList (_: getServiceConfigurations) config.services);
|
||||
}
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
{ lib, name, ... }:
|
||||
|
||||
let
|
||||
serviceName = name;
|
||||
in
|
||||
|
||||
{
|
||||
options.secrets = lib.mkOption {
|
||||
type = lib.types.lazyAttrsOf (lib.types.submodule ({ config, name, ... }: {
|
||||
options = {
|
||||
shared = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Whether this secret should be the same on all nodes.";
|
||||
};
|
||||
|
||||
nodes = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
generate = lib.mkOption {
|
||||
type = with lib.types; nullOr (functionTo str);
|
||||
description = "Command used to generate this secret.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
path = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/run/agenix/cluster-${serviceName}-${name}";
|
||||
};
|
||||
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "0400";
|
||||
};
|
||||
|
||||
owner = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
};
|
||||
|
||||
services = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = "Services to restart when this secret changes.";
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = {};
|
||||
};
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
simulacrum = lib.mkOption {
|
||||
description = "Whether we are in the Simulacrum.";
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
};
|
||||
testConfig = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
readOnly = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./catalog
|
||||
./simulacrum/checks.nix
|
||||
];
|
||||
|
||||
options.cluster = lib.mkOption {
|
||||
type = lib.types.raw;
|
||||
};
|
||||
|
||||
config.cluster = import ./. {
|
||||
inherit depot lib;
|
||||
};
|
||||
}
|
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 d3WGuA ZLjCSe5wrN6abvvRmQjE+VXtRr+avP/CLPD7djXNr0M
|
||||
g8i9ambJGL2Q+ZLB6c6MxV9BryAgX4qZctJ9qByJ4n8
|
||||
-> ssh-ed25519 P/nEqQ zSGcZuufOAnTkPr74ZjwyISdLlfxBxqgmyWivxq1/Uo
|
||||
gArusBfIfsZ5/gwMYHLzDHTbgVGWDttbi0IAhvclRO4
|
||||
-> ssh-ed25519 YIaSKQ J4Fy0VSjdMPRgzysQptIUKiRR0TAgu0q1BYhtIpGkWU
|
||||
kKzmF3OUbGU40d33R15nMraUDZiFRoz9Z00XjjSk9Jw
|
||||
-> ssh-ed25519 NO562A BNQV8JodzTiNs/V+rFQxcsrhKJ3nRIFtWk6VxHzCRio
|
||||
ZyauAdOrPbADSDdBQoB+39MB2r7Ro4d0XwZIjf2z9Jo
|
||||
-> ssh-ed25519 5/zT0w hdMuyOmNKTlMKPn4w9VQFVXZkJNm1XSPAZ/Zip5WW04
|
||||
wcnur+BRQPqKzpV3vl7pn1VIGRK3GxQEUaQIefrZuI4
|
||||
--- 5AdxXgFmDm2w012QjpJ3gqlbfvkPm8fkEJjm8kV18G0
|
||||
&Ãf§äIT¼-ÿY!ŒÍ,Vu<56>Â9õÿöBFrœŠ´½4–ù™BÕÝ/®UäH˜rþ¸ž #ƒˆç
ÄÝÕº†®UóQ¢ÿŽx$G{ÅŠMà2¡^/˜§¥Éè?12É¿t1©¿í¸&[}nêDAÛlýÑýˆ8uG®éZŽ×b¯èàîåd:@ÿ!Õþ
jîƒÚáÈNµrâlA³~
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,9 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A jNUNRaIQC1DUBiacnWc3xjMUAxnAgiyJhRA74cof3Ec
|
||||
oZZq1AQ3F0nvrk7KpinLWgT3cIoCYZ5R1s0us69OI8E
|
||||
-> ssh-ed25519 5/zT0w FmoxaTg75/xaDmSOHL5Xs6QOr5rLG/sr5TmPMfkOqxw
|
||||
XXQrFxxt5GOzXgJoPY8U71NSYi/IWmL3QrenvOuQ43Q
|
||||
-> ssh-ed25519 YIaSKQ ++dqG+dr8ie+4sPW7L+eVkXvOVvM+/oBR722S2sQsSg
|
||||
879pmnhOtZ/MiMUwDlyujykQXNmCepI2FSU2QcvvkrA
|
||||
--- QcvlVdv2fYMKmT/aCpTjdmGJ+9KnUvZCZNtl7WhgCbw
|
||||
ï!jÊwŸ~×f%ÝJ>˜H ³·2ü9¬¥.VhþÅ·<C385>²«O!$iÄ<>ÝžÔ<>4\_̆J¸„šÀT>²J£‘î8Y´\ÁI³kÕýïk—tŒG(ÃAO¦#Ùš“#Ü(·LœøÍáô’0éh=[ÈRîw•Uj¸iVý2ÁÕ(ìÊBGgÔ„^L7fÍÊ«"zVµ<56>nË)ÑõË÷9½ï›<IäõúÃÍw1Š
|
Binary file not shown.
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A eB0Rkoz721eI1UlyAhHWIrBnTEFoh6z3UL24EljaNzA
|
||||
dNsoal+y68XM4HXRyg1PUmrWilW1n3h78TmTcqHFEjc
|
||||
-> ssh-ed25519 5/zT0w SF16JelBZe0vZtzNEHiEfprJOqzoyxhTH3ldQdbo5wE
|
||||
95wJNWQEGqHj4Pknnk1RrgWPOqZOhlNsSvFTv8rfc08
|
||||
-> ssh-ed25519 YIaSKQ 68vS4sQGTDEaTVVxfs/xeTv379MQ3JE7iyLb1PbUuis
|
||||
1Bh53X0QFednXw74lQ+FbqNDkLBra9rx6nOybcD3FiQ
|
||||
--- HIcPirpTTtlUUGEemDXND/nwiWs4BEhM4rYX18mx71E
|
||||
箜_Ÿvw©\ˆ¯j2æVrK(™á2åÚ@ξ€;Y®AQAƒlMÛá[ÙÁW â—ßƀы<v#"ùóBŒ’O€™É^†©¦-ø¡+ž*m}›¦<>ª\“ª¡gÒ¹'kÓ2I~T¾w’M|¼jó¬˜+*BÖ%æ°xx‘‘€Ó¸õ{Ž O™;Fd„M“
|
||||
ÝPÙEB¡mãdBý¡¿¨[•¼í5Þf˜‰ü#öL- ¢³.4gŽ”FnÀ£q¬òv<C3B2>SV¹¥°÷¤êYÉkä·ï@ÓçlRn
|
||||
!¸'mÿSGìqóÊÖ“0dY1ïL!Jñðä üIÿw
|
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A 2Su0u03W90TKuR0wg1/dcokIbTzO5eATBmkFPyqfJG0
|
||||
IhBAWy5YYFFOqG9hc+AkVrKewTls84SFV9Kz/lOTV2U
|
||||
-> ssh-ed25519 5/zT0w YsyFCW1FsiGwiYJNYCITlLWk6Y5dR3K5v+gJqlsWQTg
|
||||
vtR1GCT2zrHNco/yPvMqQmlPyDja53lSRsO1DmnCSlo
|
||||
-> ssh-ed25519 P/nEqQ c8l4fOuvZn9V8+6vpRpGNGldEi4iA+5qVg1B+jArU1w
|
||||
zgS0urO8MZYo8cZq5Nz/R1x9cZ0vZgppJx6X5UecJ0s
|
||||
-> ?^lS,zDo-grease ^ZMN! V*+oK^9 GyJ[
|
||||
ZATLlHQ+kFjStI2ykQXq+KhvAR+XeW+POj6cJ59awzpMwq8JGbyaE1m5Cq8XA6u3
|
||||
xFE6
|
||||
--- 3JfCfv5CJYKGuvnbvoymhHhfkM99NkYMdGOL3Xicga8
|
||||
ðíçqÂ`ë#Ññ›„oq6üÄÑZÃõ˜<>Žh$wH"©läNøØ£¨IÛL3ä¯uY‹WŽœ<11>›T¹À*G<>GÂx¦nD2IÈ
ù«y+]ßT{gäð©<C3B0>ìÓinœÖÈçßEa¥ìœk¸zοP ”M…
|
Binary file not shown.
|
@ -1,15 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A o0R34LvRy19TseKFBi6iJVJSpuPWamIlL1UnX95+yVU
|
||||
9yjfDbf7J9q/L2Z8OkFlOcniYNfO9YJBdtNkLyQAzF4
|
||||
-> ssh-ed25519 5/zT0w AqcfbKIO1vE0TjkDvZOkCcMeRCz5ATfQZyoKecoDWQE
|
||||
beYLRlS/ZzteQ1MNhyGuIenuEHSRqkzYJRasomThBLU
|
||||
-> ssh-ed25519 FfIUuQ 9JeHQPQgOYSzA2cjR6jwisZYPRRYGQMSyOW49LVEo30
|
||||
TAd1otmjEo1CvOVX3gZe2rk6Rk/IEjF2DllpQ9+o6ak
|
||||
-> ssh-ed25519 d3WGuA 1RNgW2d+Nh66Li4hAsP7/3ATZsqHWL+9pwNqPej1ykw
|
||||
tN6e8kBNz4tknvWBrVoQ6nABbeglac6AVUlz32ZFMzA
|
||||
-> ssh-ed25519 P/nEqQ oHqCnNvCWIprBbVy0w2LdHBaJllbNwD+WbdAgIFrhww
|
||||
6Dgnv/HyYaFzAiRUySqYVjBPScLMK8M9Roo8UCqMGEM
|
||||
-> 4Vf|uj93-grease x5,kZ; -xgrjD8
|
||||
6Gw1SIrH9N0GVksuPQm46tD/7Ckn6vkG5Z9CDhu4hj4YO1X8ug
|
||||
--- eo6VHBS0rJXNXA4MFGBtVfJRQ7hNNJ7PMeMjvE1Hms8
|
||||
‘7<EFBFBD>¸ATº<>ÖŸ@OXåø?$ýÛ“XeÞ€<>{T|P†.3;EºÌ3mLÛã"o“´"õcèí—”#ü,"Í¥CtÒô½;¥ÂˆÒ³IÚR FOócD"âúK;¯{HÛÝ×ký™.d[sƒ·/¼R!à‹vk.®®W–‘°ñÿãºóç×<C3A7>ƒ6{Íþ
°òn<C3B2>È_M,¶½¬6o`Óô£?×@…ŠRX¨ñù´€()É<>UPëâ o9qÙFJûˆÆ’ÂúDkŒ#‚{D‰+[pÞu½õÌkúÊÎlMVêm+™ kDiŸ‡ó”l¤œûT=·ji6.ªUS¦–ö³óŽ\Æ-s€¦b«!eɳ‰:¿/—°NgŒSï—«¸
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A BNIU8M5X5C4LSiie6S4zVraFQAsyGKAv7BwLVIXHiFM
|
||||
LLcXZ7tiTUnN+tJLwqqs1hLZ8usCDWqNVGr1lAn5OQs
|
||||
-> ssh-ed25519 5/zT0w H/SGf0oYVg/JCd07bicWL1LWQwExr0gbi+gV1j7Fy2M
|
||||
yHjguPtS8ItpY+pAR3lLVpXQxq7d3cuQYU5DHs2qjMc
|
||||
-> ssh-ed25519 P/nEqQ z1us0mTbOuLrkI7n6doG+JVFAuqwZvC0dEfdGauM+Fg
|
||||
P/tKnt5gZ66HAWR0/pqpmJMHp6hLbcjwE3BhO9NCkZY
|
||||
-> ((I-grease
|
||||
r66LwGiqumMp/NlcnLgOaxZ7cfQMBCr4Rq9aJdjUck69113hNf4orC/bGVCDhmdu
|
||||
s1cSHPVw1hys
|
||||
--- FxWSO98U5IDaGPs57hzO70gVN/ELN0/UxKKmIoxadks
|
||||
1ÊnûEHvóî_QíÄV†7¬Çæ•Ãܲé¶m¡z2'ÛÎ¥¯zWÚ)¼Ôç.»!ãi#¬TXÎT‰k[Fy
üˆEë!>á¨tÁ !‹‚*Ã
|
|
@ -1,60 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
let
|
||||
lift = config;
|
||||
in
|
||||
|
||||
{
|
||||
nowhere.names = {
|
||||
"acme-v02.api.letsencrypt.org" = "stepCa";
|
||||
"api.buypass.com" = "stepCa";
|
||||
};
|
||||
|
||||
nodes.nowhere = { config, ... }: {
|
||||
links.stepCa.protocol = "https";
|
||||
|
||||
environment.etc.step-ca-password.text = "";
|
||||
|
||||
services = {
|
||||
step-ca = {
|
||||
enable = true;
|
||||
address = config.links.stepCa.ipv4;
|
||||
inherit (config.links.stepCa) port;
|
||||
intermediatePasswordFile = "/etc/step-ca-password";
|
||||
settings = {
|
||||
root = "${lift.nowhere.certs.ca}/ca.pem";
|
||||
crt = "${lift.nowhere.certs.intermediate}/cert.pem";
|
||||
key = "${lift.nowhere.certs.intermediate}/cert-key.pem";
|
||||
address = config.links.stepCa.tuple;
|
||||
db = {
|
||||
type = "badgerv2";
|
||||
dataSource = "/var/lib/step-ca/db";
|
||||
};
|
||||
authority.provisioners = [
|
||||
{
|
||||
type = "ACME";
|
||||
name = "snakeoil";
|
||||
challenges = [
|
||||
"dns-01"
|
||||
"http-01"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
nginx.virtualHosts = {
|
||||
"acme-v02.api.letsencrypt.org".locations."/".extraConfig = ''
|
||||
rewrite /directory /acme/snakeoil/directory break;
|
||||
'';
|
||||
"api.buypass.com".locations."/".extraConfig = ''
|
||||
rewrite /acme/directory /acme/snakeoil/directory break;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
defaults.environment.etc."dummy-secrets/acmeDnsApiKey".text = "ACME_DNS_DIRECT_STATIC_KEY=simulacrum";
|
||||
defaults.environment.etc."dummy-secrets/acmeDnsDirectKey".text = "ACME_DNS_DIRECT_STATIC_KEY=simulacrum";
|
||||
defaults.environment.etc."dummy-secrets/acmeDnsDbCredentials".text = "PGPASSWORD=simulacrum";
|
||||
}
|
|
@ -1,82 +1,10 @@
|
|||
{ cluster, config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
authoritativeServers = map
|
||||
(node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple)
|
||||
cluster.config.services.dns.nodes.authoritative;
|
||||
|
||||
execScript = pkgs.writeShellScript "acme-dns-exec" ''
|
||||
action="$1"
|
||||
subdomain="''${2%.${depot.lib.meta.domain}.}"
|
||||
key="$3"
|
||||
umask 77
|
||||
source "$EXEC_ENV_FILE"
|
||||
headersFile="$(mktemp)"
|
||||
echo "X-Direct-Key: $ACME_DNS_DIRECT_STATIC_KEY" > "$headersFile"
|
||||
case "$action" in
|
||||
present)
|
||||
for i in {1..5}; do
|
||||
${pkgs.curl}/bin/curl -X POST -s -f -H "@$headersFile" \
|
||||
"${cluster.config.links.acmeDnsApi.url}/update" \
|
||||
--data '{"subdomain":"'"$subdomain"'","txt":"'"$key"'"}' && break
|
||||
sleep 5
|
||||
done
|
||||
;;
|
||||
esac
|
||||
'';
|
||||
in
|
||||
{ cluster, config, pkgs, ... }:
|
||||
|
||||
{
|
||||
age.secrets.acmeDnsApiKey = {
|
||||
file = ../dns/acme-dns-direct-key.age;
|
||||
owner = "acme";
|
||||
};
|
||||
age.secrets.pdns-api-key-acme = cluster.config.vars.pdns-api-key-secret // { owner = "acme"; };
|
||||
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.maxConcurrentRenewals = 0;
|
||||
security.acme.defaults = {
|
||||
email = depot.lib.meta.adminEmail;
|
||||
extraLegoFlags = lib.flatten [
|
||||
(map (x: [ "--dns.resolvers" x ]) authoritativeServers)
|
||||
"--dns-timeout" "30"
|
||||
];
|
||||
credentialsFile = pkgs.writeText "acme-exec-config" ''
|
||||
EXEC_PATH=${execScript}
|
||||
EXEC_ENV_FILE=${config.age.secrets.acmeDnsApiKey.path}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services = lib.mapAttrs' (name: value: {
|
||||
name = "acme-${name}";
|
||||
value = {
|
||||
distributed.enable = value.dnsProvider != null;
|
||||
preStart = let
|
||||
serverList = lib.pipe authoritativeServers [
|
||||
(map (x: "@${x}"))
|
||||
(map (lib.replaceStrings [":53"] [""]))
|
||||
lib.escapeShellArgs
|
||||
];
|
||||
domainList = lib.pipe ([ value.domain ] ++ value.extraDomainNames) [
|
||||
(map (x: "${x}."))
|
||||
(map (lib.replaceStrings ["*"] ["x"]))
|
||||
lib.unique
|
||||
lib.escapeShellArgs
|
||||
];
|
||||
in ''
|
||||
echo Testing availability of authoritative DNS servers
|
||||
for i in {1..60}; do
|
||||
${pkgs.dig}/bin/dig +short ${serverList} ${domainList} >/dev/null && break
|
||||
echo Retry [$i/60]
|
||||
sleep 10
|
||||
done
|
||||
echo Available
|
||||
'';
|
||||
serviceConfig = {
|
||||
Restart = "on-failure";
|
||||
RestartMaxDelaySec = 30;
|
||||
RestartSteps = 5;
|
||||
RestartMode = "direct";
|
||||
};
|
||||
};
|
||||
}) config.security.acme.certs;
|
||||
security.acme.defaults.credentialsFile = pkgs.writeText "acme-pdns-credentials" ''
|
||||
PDNS_API_URL=${cluster.config.links.powerdns-api.url}
|
||||
PDNS_API_KEY_FILE=${config.age.secrets.pdns-api-key-acme.path}
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
{
|
||||
services.acme-client = {
|
||||
nodes.client = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
nodes.client = [ "checkmate" "thunderskin" "VEGAS" "prophet" ];
|
||||
nixos.client = ./client.nix;
|
||||
simulacrum.augments = ./augment.nix;
|
||||
};
|
||||
}
|
||||
|
|
11
cluster/services/attic/attic-server-token.age
Normal file
11
cluster/services/attic/attic-server-token.age
Normal file
|
@ -0,0 +1,11 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A mLMev+YA6zSxAWIIlwseZk8Skl4hfNNsWQPmLV7DxTo
|
||||
AEi55ZXzyYbZyludcP5Yywx7QDgFODh6z8+M2nxMAl4
|
||||
-> ssh-ed25519 5/zT0w 91baPvXx4UdmyYCCIqc1M+Cb7pqdSx3/cgmfuexeUgY
|
||||
kePQp8flAsXPMLxJiQPoJuHEPPI+FzaSF+VL9U9jhwI
|
||||
-> ssh-ed25519 d3WGuA U8Q68hN+5fI4xto/lpCiVyts00ezCzftfLvFFew7aiY
|
||||
B4wv05Y2gpl5qjV1Rbc6JSJk3coN6TFMB5FspwzLnlI
|
||||
-> :0eX-grease
|
||||
ghW6iCUZj0e04I8Ba3CHzg
|
||||
--- aHnzzTi1WxtHXGcQO1hNgmy04wyyObmYBcSq5fmbnAg
|
||||
Ñdï<EFBFBD>ÎÁŽ#¹<>¬sä®nƒŒó¤ž§–F#5IZ<49><5A>¯áË2wb®×¨âÑÞüË›oœkm÷ÒåN&"¤ü0LeÑzIjx—µzxF€´>ršúEq´Ý¤¥A‘nx¿šB!@‰ÕŸÆò2r©:ïm5í-Xl5çAåð*ëÌSV¿R3`Ð艨{ÿò<C3BF>ï©#ÍJgHÖ‡ÊÉg
|
|
@ -1,16 +1,13 @@
|
|||
{ config, cluster, depot, lib, ... }:
|
||||
with depot.lib.nginx;
|
||||
{ config, tools, ... }:
|
||||
with tools.nginx;
|
||||
let
|
||||
addrSplit' = builtins.split ":" config.services.minio.listenAddress;
|
||||
addrSplit = builtins.filter builtins.isString addrSplit';
|
||||
host' = builtins.head addrSplit;
|
||||
host = if host' == "" then "127.0.0.1" else host';
|
||||
port = builtins.head (builtins.tail addrSplit);
|
||||
in
|
||||
{
|
||||
links = {
|
||||
atticNixStoreInternalRedirect.protocol = "http";
|
||||
garageNixStoreInternalRedirect.protocol = "http";
|
||||
};
|
||||
|
||||
security.acme.certs."cache.${depot.lib.meta.domain}" = {
|
||||
dnsProvider = "exec";
|
||||
webroot = lib.mkForce null;
|
||||
};
|
||||
|
||||
services.nginx.upstreams = {
|
||||
nar-serve.extraConfig = ''
|
||||
random;
|
||||
|
@ -18,73 +15,40 @@ with depot.lib.nginx;
|
|||
server ${config.links.nar-serve-nixos-org.tuple} fail_timeout=0;
|
||||
'';
|
||||
nix-store.servers = {
|
||||
"${config.links.garageNixStoreInternalRedirect.tuple}" = {
|
||||
"${config.links.atticServer.tuple}" = {
|
||||
fail_timeout = 0;
|
||||
};
|
||||
"${config.links.atticNixStoreInternalRedirect.tuple}" = {
|
||||
"${host}:${port}" = {
|
||||
fail_timeout = 0;
|
||||
backup = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
services.nginx.appendHttpConfig = ''
|
||||
proxy_cache_path /var/cache/nginx/nixstore levels=1:2 keys_zone=nixstore:10m max_size=10g inactive=24h use_temp_path=off;
|
||||
'';
|
||||
services.nginx.virtualHosts = {
|
||||
"cache.${depot.lib.meta.domain}" = vhosts.basic // {
|
||||
locations = {
|
||||
"= /".return = "302 /404";
|
||||
"/" = {
|
||||
proxyPass = "http://nix-store";
|
||||
extraConfig = ''
|
||||
proxy_next_upstream error http_500 http_502 http_404;
|
||||
'';
|
||||
};
|
||||
"/nix/store" = {
|
||||
proxyPass = "http://nar-serve";
|
||||
extraConfig = ''
|
||||
proxy_next_upstream error http_500 http_404;
|
||||
'';
|
||||
};
|
||||
};
|
||||
extraConfig = ''
|
||||
proxy_cache nixstore;
|
||||
proxy_cache_use_stale error timeout http_500 http_502;
|
||||
proxy_cache_lock on;
|
||||
proxy_cache_key $request_uri;
|
||||
proxy_cache_valid 200 24h;
|
||||
'';
|
||||
};
|
||||
"garage-nix-store.internal.${depot.lib.meta.domain}" = {
|
||||
serverName = "127.0.0.1";
|
||||
listen = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
inherit (config.links.garageNixStoreInternalRedirect) port;
|
||||
}
|
||||
];
|
||||
locations."/" = {
|
||||
proxyPass = with cluster.config.links.garageWeb; "${protocol}://nix-store.${hostname}";
|
||||
recommendedProxySettings = false;
|
||||
services.nginx.virtualHosts."cache.${tools.meta.domain}" = vhosts.basic // {
|
||||
locations = {
|
||||
"= /".return = "302 /404";
|
||||
"/" = {
|
||||
proxyPass = "http://nix-store/nix-store$request_uri";
|
||||
extraConfig = ''
|
||||
proxy_set_header Host "nix-store.${cluster.config.links.garageWeb.hostname}";
|
||||
'';
|
||||
};
|
||||
};
|
||||
"attic-nix-store.internal.${depot.lib.meta.domain}" = {
|
||||
serverName = "127.0.0.1";
|
||||
listen = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
inherit (config.links.atticNixStoreInternalRedirect) port;
|
||||
}
|
||||
];
|
||||
locations."/" = {
|
||||
proxyPass = "https://cache-api.${depot.lib.meta.domain}/nix-store$request_uri";
|
||||
recommendedProxySettings = false;
|
||||
extraConfig = ''
|
||||
proxy_set_header Host "cache-api.${depot.lib.meta.domain}";
|
||||
proxy_next_upstream error http_500 http_404;
|
||||
'';
|
||||
};
|
||||
"/nix/store" = {
|
||||
proxyPass = "http://nar-serve";
|
||||
extraConfig = ''
|
||||
proxy_next_upstream error http_500 http_404;
|
||||
'';
|
||||
};
|
||||
};
|
||||
extraConfig = ''
|
||||
proxy_cache nixstore;
|
||||
proxy_cache_use_stale error timeout http_500 http_502;
|
||||
proxy_cache_lock on;
|
||||
proxy_cache_key $request_uri;
|
||||
proxy_cache_valid 200 24h;
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,60 +1,14 @@
|
|||
{ config, depot, ... }:
|
||||
|
||||
{
|
||||
services.attic = {
|
||||
nodes = {
|
||||
monolith = [ "VEGAS" "prophet" ];
|
||||
server = [ "VEGAS" "grail" "prophet" ];
|
||||
server = [ "VEGAS" ];
|
||||
};
|
||||
nixos = {
|
||||
monolith = [
|
||||
./server.nix
|
||||
];
|
||||
server = [
|
||||
./server.nix
|
||||
./binary-cache.nix
|
||||
./nar-serve.nix
|
||||
];
|
||||
};
|
||||
meshLinks.server.attic.link.protocol = "http";
|
||||
secrets = let
|
||||
inherit (config.services.attic) nodes;
|
||||
in {
|
||||
serverToken = {
|
||||
nodes = nodes.server;
|
||||
};
|
||||
dbCredentials = {
|
||||
nodes = nodes.server;
|
||||
owner = "atticd";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
garage = config.lib.forService "attic" {
|
||||
keys.attic.locksmith = {
|
||||
nodes = config.services.attic.nodes.server;
|
||||
owner = "atticd";
|
||||
format = "aws";
|
||||
};
|
||||
buckets.attic = {
|
||||
allow.attic = [ "read" "write" ];
|
||||
};
|
||||
};
|
||||
|
||||
dns.records = let
|
||||
serverAddrs = map
|
||||
(node: depot.hours.${node}.interfaces.primary.addrPublic)
|
||||
config.services.attic.nodes.server;
|
||||
in config.lib.forService "attic" {
|
||||
cache.target = serverAddrs;
|
||||
};
|
||||
|
||||
ways = config.lib.forService "attic" {
|
||||
cache-api = {
|
||||
consulService = "atticd";
|
||||
extras.extraConfig = ''
|
||||
client_max_body_size 4G;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ config, depot, ... }:
|
||||
{ config, depot, tools, ... }:
|
||||
|
||||
let
|
||||
mkNarServe = NAR_CACHE_URL: PORT: {
|
||||
|
@ -17,6 +17,6 @@
|
|||
nar-serve-nixos-org.protocol = "http";
|
||||
};
|
||||
|
||||
systemd.services.nar-serve-self = mkNarServe "https://cache.${depot.lib.meta.domain}" config.links.nar-serve-self.portStr;
|
||||
systemd.services.nar-serve-self = mkNarServe "https://cache.${tools.meta.domain}" config.links.nar-serve-self.portStr;
|
||||
systemd.services.nar-serve-nixos-org = mkNarServe "https://cache.nixos.org" config.links.nar-serve-nixos-org.portStr;
|
||||
}
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
{ cluster, config, depot, lib, ... }:
|
||||
{ config, depot, lib, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (cluster.config.services.attic) secrets;
|
||||
|
||||
link = cluster.config.hostLinks.${config.networking.hostName}.attic;
|
||||
|
||||
isMonolith = lib.elem config.networking.hostName cluster.config.services.attic.nodes.monolith;
|
||||
dataDir = "/srv/storage/private/attic";
|
||||
in
|
||||
|
||||
{
|
||||
|
@ -13,39 +9,36 @@ in
|
|||
depot.inputs.attic.nixosModules.atticd
|
||||
];
|
||||
|
||||
services.locksmith.waitForSecrets.atticd = [ "garage-attic" ];
|
||||
ascensions.attic-standalone = {
|
||||
requiredBy = [ "attic.service" ];
|
||||
before = [ "attic.service" ];
|
||||
incantations = i: [ ];
|
||||
};
|
||||
|
||||
age.secrets.atticServerToken.file = ./attic-server-token.age;
|
||||
|
||||
links.atticServer.protocol = "http";
|
||||
|
||||
services.atticd = {
|
||||
enable = true;
|
||||
package = depot.inputs.attic.packages.attic-server;
|
||||
|
||||
credentialsFile = secrets.serverToken.path;
|
||||
mode = if isMonolith then "monolithic" else "api-server";
|
||||
credentialsFile = config.age.secrets.atticServerToken.path;
|
||||
|
||||
settings = {
|
||||
listen = link.tuple;
|
||||
listen = config.links.atticServer.tuple;
|
||||
|
||||
chunking = {
|
||||
nar-size-threshold = 0;
|
||||
min-size = 0;
|
||||
avg-size = 0;
|
||||
max-size = 0;
|
||||
nar-size-threshold = 512 * 1024;
|
||||
min-size = 64 * 1024;
|
||||
avg-size = 512 * 1024;
|
||||
max-size = 1024 * 1024;
|
||||
};
|
||||
|
||||
compression.type = "none";
|
||||
|
||||
database.url = "postgresql://attic@${cluster.config.links.patroni-pg-access.tuple}/attic";
|
||||
database.url = "sqlite://${dataDir}/server.db?mode=rwc";
|
||||
|
||||
storage = {
|
||||
type = "s3";
|
||||
region = "us-east-1";
|
||||
endpoint = cluster.config.links.garageS3.url;
|
||||
bucket = "attic";
|
||||
};
|
||||
|
||||
garbage-collection = {
|
||||
interval = "2 weeks";
|
||||
default-retention-period = "3 months";
|
||||
type = "local";
|
||||
path = "${dataDir}/chunks";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -54,43 +47,20 @@ in
|
|||
users.atticd = {
|
||||
isSystemUser = true;
|
||||
group = "atticd";
|
||||
home = "/var/lib/atticd";
|
||||
home = dataDir;
|
||||
createHome = true;
|
||||
};
|
||||
groups.atticd = {};
|
||||
};
|
||||
|
||||
systemd.services.atticd = {
|
||||
after = [ "postgresql.service" ];
|
||||
distributed = lib.mkIf isMonolith {
|
||||
enable = true;
|
||||
registerService = "atticd";
|
||||
};
|
||||
serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
|
||||
};
|
||||
environment = {
|
||||
AWS_SHARED_CREDENTIALS_FILE = "/run/locksmith/garage-attic";
|
||||
PGPASSFILE = secrets.dbCredentials.path;
|
||||
};
|
||||
systemd.services.atticd.serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
ReadWritePaths = [ dataDir ];
|
||||
};
|
||||
|
||||
consul.services.atticd = {
|
||||
mode = if isMonolith then "manual" else "direct";
|
||||
definition = {
|
||||
name = "atticd";
|
||||
id = "atticd-${config.services.atticd.mode}";
|
||||
address = link.ipv4;
|
||||
inherit (link) port;
|
||||
checks = [
|
||||
{
|
||||
name = "Attic Server";
|
||||
id = "service:atticd:backend";
|
||||
interval = "5s";
|
||||
http = link.url;
|
||||
}
|
||||
];
|
||||
};
|
||||
services.nginx.virtualHosts."cache-api.${tools.meta.domain}" = tools.nginx.vhosts.proxy config.links.atticServer.url // {
|
||||
extraConfig = ''
|
||||
client_max_body_size 4G;
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
services.bitwarden = {
|
||||
nodes.host = [ "VEGAS" ];
|
||||
nixos.host = ./host.nix;
|
||||
};
|
||||
|
||||
dns.records.keychain.target = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
}
|
|
@ -1,9 +1,10 @@
|
|||
{ cluster, depot, ... }:
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
age.secrets.cachixDeployToken.file = ./credentials/${config.networking.hostName}.age;
|
||||
|
||||
services.cachix-agent = {
|
||||
enable = true;
|
||||
credentialsFile = cluster.config.services.cachix-deploy-agent.secrets.token.path;
|
||||
package = depot.packages.cachix;
|
||||
credentialsFile = config.age.secrets.cachixDeployToken.path;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
{
|
||||
services.cachix-deploy-agent = { config, ... }: {
|
||||
nodes.agent = [ "checkmate" "grail" "prophet" "VEGAS" "thunderskin" ];
|
||||
services.cachix-deploy-agent = {
|
||||
nodes.agent = [ "checkmate" "prophet" "VEGAS" "thunderskin" ];
|
||||
nixos.agent = ./agent.nix;
|
||||
secrets.token = {
|
||||
nodes = config.nodes.agent;
|
||||
shared = false;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
dns.records = let
|
||||
cdnShieldAddr = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
in {
|
||||
"fonts-googleapis-com.cdn-shield".target = cdnShieldAddr;
|
||||
"fonts-gstatic-com.cdn-shield".target = cdnShieldAddr;
|
||||
"cdnjs-cloudflare-com.cdn-shield".target = cdnShieldAddr;
|
||||
"wttr-in.cdn-shield".target = cdnShieldAddr;
|
||||
};
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
services.certificates = {
|
||||
nodes = {
|
||||
internal-wildcard = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
internal-wildcard = [ "checkmate" "thunderskin" "VEGAS" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
internal-wildcard = [
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{ config, lib, pkgs, depot, ... }:
|
||||
{ config, lib, pkgs, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (tools.meta) domain;
|
||||
|
||||
extraGroups = [ "nginx" ]
|
||||
++ lib.optional config.services.kanidm.enableServer "kanidm";
|
||||
|
@ -11,12 +11,12 @@ in
|
|||
security.acme.certs."internal.${domain}" = {
|
||||
domain = "*.internal.${domain}";
|
||||
extraDomainNames = [ "*.internal.${domain}" ];
|
||||
dnsProvider = "exec";
|
||||
dnsProvider = "pdns";
|
||||
group = "nginx";
|
||||
postRun = ''
|
||||
${pkgs.acl}/bin/setfacl -Rb .
|
||||
${pkgs.acl}/bin/setfacl -Rb out/
|
||||
${lib.concatStringsSep "\n" (
|
||||
map (group: "${pkgs.acl}/bin/setfacl -Rm g:${group}:rX .") extraGroups
|
||||
map (group: "${pkgs.acl}/bin/setfacl -Rm g:${group}:rX out/") extraGroups
|
||||
)}
|
||||
'';
|
||||
};
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.chant = {
|
||||
nodes.listener = config.services.consul.nodes.agent;
|
||||
nixos.listener = [
|
||||
./listener.nix
|
||||
];
|
||||
simulacrum.deps = [ "consul" ];
|
||||
};
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
consul = config.links.consulAgent;
|
||||
|
||||
validTargets = lib.pipe config.systemd.services [
|
||||
(lib.filterAttrs (name: value: value.chant.enable))
|
||||
lib.attrNames
|
||||
];
|
||||
|
||||
validTargetsJson = pkgs.writeText "chant-targets.json" (builtins.toJSON validTargets);
|
||||
|
||||
eventHandler = pkgs.writers.writePython3 "chant-listener-event-handler" {
|
||||
flakeIgnore = [ "E501" ];
|
||||
} ''
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import base64
|
||||
|
||||
validTargets = set()
|
||||
with open("${validTargetsJson}", "r") as f:
|
||||
validTargets = set(json.load(f))
|
||||
|
||||
events = json.load(sys.stdin)
|
||||
|
||||
cacheDir = os.getenv("CACHE_DIRECTORY", "/var/cache/chant")
|
||||
|
||||
indexFile = f"{cacheDir}/index"
|
||||
|
||||
oldIndex = "old-index"
|
||||
if os.path.isfile(indexFile):
|
||||
with open(indexFile, "r") as f:
|
||||
oldIndex = f.readline()
|
||||
|
||||
newIndex = os.getenv("CONSUL_INDEX", "no-index")
|
||||
|
||||
if oldIndex != newIndex:
|
||||
triggers = set()
|
||||
for event in events:
|
||||
if event["Name"].startswith("chant:"):
|
||||
target = event["Name"].removeprefix("chant:")
|
||||
if target not in validTargets:
|
||||
print(f"Skipping invalid target: {target}")
|
||||
continue
|
||||
with open(f"/run/chant/{target}", "wb") as f:
|
||||
if event["Payload"] is not None:
|
||||
f.write(base64.b64decode(event["Payload"]))
|
||||
triggers.add(target)
|
||||
|
||||
for trigger in triggers:
|
||||
subprocess.run(["${config.systemd.package}/bin/systemctl", "start", f"{trigger}.service"])
|
||||
|
||||
with open(indexFile, "w") as f:
|
||||
f.write(newIndex)
|
||||
'';
|
||||
in
|
||||
{
|
||||
systemd.services.chant-listener = {
|
||||
description = "Chant Listener";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "consul-ready.service" ];
|
||||
after = [ "consul-ready.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${config.services.consul.package}/bin/consul watch --type=event ${eventHandler}";
|
||||
|
||||
RuntimeDirectory = "chant";
|
||||
RuntimeDirectoryMode = "0700";
|
||||
CacheDirectory = "chant";
|
||||
CacheDirectoryMode = "0700";
|
||||
|
||||
RestartSec = 60;
|
||||
Restart = "always";
|
||||
IPAddressDeny = [ "any" ];
|
||||
IPAddressAllow = [ consul.ipv4 ];
|
||||
};
|
||||
environment = {
|
||||
CONSUL_HTTP_ADDR = consul.tuple;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
{ config, cluster, depot, ... }:
|
||||
{ config, cluster, lib, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (tools.meta) domain;
|
||||
inherit (config.networking) hostName;
|
||||
inherit (cluster.config) hostLinks;
|
||||
cfg = cluster.config.services.consul;
|
||||
|
@ -10,12 +10,9 @@ let
|
|||
in
|
||||
|
||||
{
|
||||
links.consulAgent.protocol = "http";
|
||||
|
||||
services.consul = {
|
||||
enable = true;
|
||||
webUi = true;
|
||||
package = depot.packages.consul;
|
||||
extraConfig = {
|
||||
datacenter = "eu-central";
|
||||
domain = "sd-magic.${domain}.";
|
||||
|
@ -24,16 +21,12 @@ in
|
|||
node_name = config.networking.hostName;
|
||||
bind_addr = hl.ipv4;
|
||||
ports.serf_lan = hl.port;
|
||||
retry_join = map (hostName: hostLinks.${hostName}.consul.tuple) (cfg.otherNodes.agent hostName);
|
||||
bootstrap_expect = builtins.length cfg.nodes.agent;
|
||||
addresses.http = config.links.consulAgent.ipv4;
|
||||
ports.http = config.links.consulAgent.port;
|
||||
retry_join = map (hostName: hostLinks.${hostName}.consul.tuple) cfg.otherNodes.agent;
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana-agent.settings.integrations.consul_exporter = {
|
||||
enabled = true;
|
||||
instance = hostName;
|
||||
server = config.links.consulAgent.url;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -11,23 +11,10 @@ in
|
|||
};
|
||||
});
|
||||
services.consul = {
|
||||
nodes = {
|
||||
agent = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
ready = config.services.consul.nodes.agent;
|
||||
};
|
||||
nixos = {
|
||||
agent = [
|
||||
./agent.nix
|
||||
./remote-api.nix
|
||||
];
|
||||
ready = ./ready.nix;
|
||||
};
|
||||
simulacrum = {
|
||||
enable = true;
|
||||
deps = [ "wireguard" ];
|
||||
settings = ./test.nix;
|
||||
};
|
||||
nodes.agent = [ "checkmate" "thunderskin" "VEGAS" "prophet" ];
|
||||
nixos.agent = [
|
||||
./agent.nix
|
||||
./remote-api.nix
|
||||
];
|
||||
};
|
||||
|
||||
dns.records."consul-remote.internal".consulService = "consul-remote";
|
||||
}
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
consulReady = pkgs.writers.writeHaskellBin "consul-ready" {
|
||||
libraries = with pkgs.haskellPackages; [ aeson http-conduit watchdog ];
|
||||
} ''
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
import Control.Watchdog
|
||||
import Control.Exception
|
||||
import System.IO
|
||||
import Network.HTTP.Simple
|
||||
import Data.Aeson
|
||||
|
||||
flushLogger :: WatchdogLogger String
|
||||
flushLogger taskErr delay = do
|
||||
defaultLogger taskErr delay
|
||||
hFlush stdout
|
||||
|
||||
data ConsulHealth = ConsulHealth {
|
||||
healthy :: Bool
|
||||
}
|
||||
|
||||
instance FromJSON ConsulHealth where
|
||||
parseJSON (Object v) = ConsulHealth <$> (v .: "Healthy")
|
||||
|
||||
handleException ex = case ex of
|
||||
(SomeException _) -> return $ Left "Consul is not active"
|
||||
|
||||
main :: IO ()
|
||||
main = watchdog $ do
|
||||
setInitialDelay 300_000
|
||||
setMaximumDelay 30_000_000
|
||||
setLoggingAction flushLogger
|
||||
watch $ handle handleException $ do
|
||||
res <- httpJSON "${config.links.consulAgent.url}/v1/operator/autopilot/health"
|
||||
case getResponseBody res of
|
||||
ConsulHealth True -> return $ Right ()
|
||||
ConsulHealth False -> return $ Left "Consul is unhealthy"
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
systemd.services.consul-ready = {
|
||||
description = "Wait for Consul";
|
||||
requires = lib.mkIf config.services.consul.enable [ "consul.service" ];
|
||||
after = lib.mkIf config.services.consul.enable [ "consul.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = lib.getExe consulReady;
|
||||
DynamicUser = true;
|
||||
TimeoutStartSec = "5m";
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,15 +1,14 @@
|
|||
{ config, depot, lib, ... }:
|
||||
{ config, cluster, depot, lib, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (tools.meta) domain;
|
||||
inherit (depot.reflection) hyprspace;
|
||||
frontendDomain = "consul-remote.internal.${domain}";
|
||||
|
||||
inherit (config.reflection.interfaces.vstub) addr;
|
||||
in
|
||||
|
||||
{
|
||||
services.nginx.virtualHosts.${frontendDomain} = depot.lib.nginx.vhosts.proxy config.links.consulAgent.url // {
|
||||
listenAddresses = lib.singleton addr;
|
||||
services.nginx.virtualHosts.${frontendDomain} = tools.nginx.vhosts.proxy "http://127.0.0.1:8500" // {
|
||||
listenAddresses = lib.singleton hyprspace.addr;
|
||||
enableACME = false;
|
||||
useACMEHost = "internal.${domain}";
|
||||
};
|
||||
|
@ -19,13 +18,13 @@ in
|
|||
mode = "external";
|
||||
definition = {
|
||||
name = "consul-remote";
|
||||
address = addr;
|
||||
address = hyprspace.addr;
|
||||
port = 443;
|
||||
checks = [
|
||||
{
|
||||
name = "Frontend";
|
||||
id = "service:consul-remote:frontend";
|
||||
http = "https://${addr}/v1/status/leader";
|
||||
http = "https://${hyprspace.addr}/v1/status/leader";
|
||||
tls_server_name = frontendDomain;
|
||||
header.Host = lib.singleton frontendDomain;
|
||||
interval = "60s";
|
||||
|
@ -33,7 +32,7 @@ in
|
|||
{
|
||||
name = "Backend";
|
||||
id = "service:consul-remote:backend";
|
||||
http = "${config.links.consulAgent.url}/v1/status/leader";
|
||||
http = "http://127.0.0.1:8500/v1/status/leader";
|
||||
interval = "30s";
|
||||
}
|
||||
];
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
defaults.options.services.locksmith = lib.mkSinkUndeclaredOptions { };
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
|
||||
start_all()
|
||||
|
||||
with subtest("should form cluster"):
|
||||
nodes = [ n for n in machines if n != nowhere ]
|
||||
for machine in nodes:
|
||||
machine.succeed("systemctl start consul-ready.service")
|
||||
for machine in nodes:
|
||||
consulConfig = json.loads(machine.succeed("cat /etc/consul.json"))
|
||||
addr = consulConfig["addresses"]["http"]
|
||||
port = consulConfig["ports"]["http"]
|
||||
setEnv = f"CONSUL_HTTP_ADDR={addr}:{port}"
|
||||
memberList = machine.succeed(f"{setEnv} consul members --status=alive")
|
||||
for machine2 in nodes:
|
||||
assert machine2.name in memberList
|
||||
'';
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
garage = {
|
||||
buckets.content-delivery.web.enable = true;
|
||||
};
|
||||
|
||||
ways.cdn.bucket = "content-delivery";
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A 9n5IirzhNBIPRj9Gir+/yQhFH830sgfezsqY5Ulzz3o
|
||||
VItDDdgfTFcvSq/QpIqTHnfr1VHqfI6nPz+WWKYQjHw
|
||||
-> ssh-ed25519 5/zT0w MfBZrd8wJjoProwdPqsS9CZ9aYNTXgrYviFDwuchQVM
|
||||
8WKPYO+i1ZSkPYDrHVJ5Pclj2hEzqwAtf31Agzei444
|
||||
-> ssh-ed25519 TCgorQ 3QYtSx/2eiFp54W60F8FlERfHx+DUfnXXfugiXNPECg
|
||||
pBx3If3qihD//Aq8hDWCt+U1tiWoCLUDcg/RyVCD0D0
|
||||
-> ssh-ed25519 P/nEqQ NImm+vKuL50G2kdD2svmfkwsovmryCSyKyhnZ0duDDo
|
||||
U0PTKHiCj4SxomnJdgubo+3sStSE+YwvCnrRl7aAS1Q
|
||||
-> ssh-ed25519 FfIUuQ SRgJoBIoW71SiXuHqlnGqRG5AKUrnQy0ecwznGEGTHA
|
||||
a0IS3hjMln1tWEjo30A6gYtaV7TJSY4SZDarhahMoLk
|
||||
-> ssh-ed25519 d3WGuA 0qVNcrYe53Wo46zFJs6UZtX0dq7TUy72WGdGpLqB3yo
|
||||
jTHE9PfhRw5lbBlfznS+ThkSsab3ioearf91xyPBfdQ
|
||||
-> ssh-ed25519 YIaSKQ CCcBlAOms2aSkB6pws6tN+4Gf551idI9Zq0rokd0P1c
|
||||
/3oFp6hf+jggurbcuu0cXdDL8lr6m/LTHEeNgiJt2gg
|
||||
-> K&wn-grease ,Ewz Jc+dQQRp NU~.
|
||||
FvDOuTGNaLuCfDelsrRbthjuJT9fBZAQ+kz+7Stoc2wciXV1YpCcOYDHSF38OwRF
|
||||
X/pyjVudbJKS0Mphda6phw
|
||||
--- 3JFwCzeJsIgRkTpmy9MAvQ64BCZoa98kNKOuT57WI6Y
|
||||
&ÀO¿¹¸p ž-ÚP¶.+"<22>ðjÔG«
|
||||
ëÇÐs<>gnz[t
‘ØóÄD÷•RŽÄ½±šmÃl<!Çê6;³Ù÷<C399>†8{ vmvJJ;lR<6C>×[Yà3˜XPËÜ<C38B>ÈPCÿè¯&¦àåYû×2ÃǤxVúÈF{zäQ‹hnW*I$é;°Yc¨@7Ö-k4—À§xãͶx¿µ% RÝ<52>¤$z|»Ê“ñœ¹¯<C2B9>ëñ3
|
109
cluster/services/dns/admin.nix
Normal file
109
cluster/services/dns/admin.nix
Normal file
|
@ -0,0 +1,109 @@
|
|||
{ cluster, config, lib, pkgs, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (tools.meta) domain;
|
||||
inherit (config.links) pdnsAdmin;
|
||||
inherit (cluster.config) vars;
|
||||
|
||||
pdns-api = cluster.config.links.powerdns-api;
|
||||
|
||||
dataDirUI = "/srv/storage/private/powerdns-admin";
|
||||
|
||||
translateConfig = withQuotes: cfg: let
|
||||
pythonValue = val: if lib.isString val then "'${val}'"
|
||||
else if lib.isAttrs val && val ? file then "[(f.read().strip('\\n'), f.close()) for f in [open('${val.file}')]][0][0]"
|
||||
else if lib.isAttrs val && val ? env then "__import__('os').getenv('${val.env}')"
|
||||
else if lib.isBool val then (if val then "True" else "False")
|
||||
else if lib.isInt val then toString val
|
||||
else throw "translateConfig: unsupported value type";
|
||||
|
||||
quote = str: if withQuotes then pythonValue str else str;
|
||||
|
||||
configList = lib.mapAttrsToList (n: v: "${n}=${quote v}") cfg;
|
||||
in lib.concatStringsSep "\n" configList;
|
||||
|
||||
in {
|
||||
age.secrets = {
|
||||
pdns-admin-oidc-secrets = {
|
||||
file = ./pdns-admin-oidc-secrets.age;
|
||||
mode = "0400";
|
||||
};
|
||||
pdns-admin-salt = {
|
||||
file = ./pdns-admin-salt.age;
|
||||
mode = "0400";
|
||||
owner = "powerdnsadmin";
|
||||
group = "powerdnsadmin";
|
||||
};
|
||||
pdns-admin-secret = {
|
||||
file = ./pdns-admin-secret.age;
|
||||
mode = "0400";
|
||||
owner = "powerdnsadmin";
|
||||
group = "powerdnsadmin";
|
||||
};
|
||||
pdns-api-key = vars.pdns-api-key-secret // { owner = "powerdnsadmin"; };
|
||||
};
|
||||
|
||||
links.pdnsAdmin.protocol = "http";
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d '${dataDirUI}' 0700 powerdnsadmin powerdnsadmin - -"
|
||||
];
|
||||
|
||||
services.powerdns = {
|
||||
enable = true;
|
||||
extraConfig = translateConfig false {
|
||||
api = "yes";
|
||||
webserver-allow-from = "127.0.0.1, ${vars.meshNet.cidr}";
|
||||
webserver-address = pdns-api.ipv4;
|
||||
webserver-port = pdns-api.portStr;
|
||||
api-key = "$scrypt$ln=14,p=1,r=8$ZRgztsniH1y+F7P/RkXq/w==$QTil5kbJPzygpeQRI2jgo5vK6fGol9YS/NVR95cmWRs=";
|
||||
};
|
||||
};
|
||||
|
||||
services.powerdns-admin = {
|
||||
enable = true;
|
||||
secretKeyFile = config.age.secrets.pdns-admin-secret.path;
|
||||
saltFile = config.age.secrets.pdns-admin-salt.path;
|
||||
extraArgs = [ "-b" pdnsAdmin.tuple ];
|
||||
config = translateConfig true {
|
||||
SQLALCHEMY_DATABASE_URI = "sqlite:///${dataDirUI}/pda.db";
|
||||
PDNS_VERSION = pkgs.pdns.version;
|
||||
PDNS_API_URL = pdns-api.url;
|
||||
PDNS_API_KEY.file = config.age.secrets.pdns-api-key.path;
|
||||
|
||||
SIGNUP_ENABLED = false;
|
||||
OIDC_OAUTH_ENABLED = true;
|
||||
OIDC_OAUTH_KEY = "net.privatevoid.dnsadmin1";
|
||||
OIDC_OAUTH_SECRET.env = "OIDC_OAUTH_SECRET";
|
||||
OIDC_OAUTH_SCOPE = "openid profile email roles";
|
||||
|
||||
OIDC_OAUTH_METADATA_URL = "https://login.${domain}/auth/realms/master/.well-known/openid-configuration";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.powerdns-admin.serviceConfig = {
|
||||
BindPaths = [
|
||||
dataDirUI
|
||||
config.age.secrets.pdns-api-key.path
|
||||
];
|
||||
TimeoutStartSec = "300s";
|
||||
EnvironmentFile = config.age.secrets.pdns-admin-oidc-secrets.path;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."dnsadmin.${domain}" = lib.recursiveUpdate
|
||||
(tools.nginx.vhosts.proxy pdnsAdmin.url)
|
||||
# backend sends really big headers for some reason
|
||||
# increase buffer size accordingly
|
||||
{
|
||||
locations."/".extraConfig = ''
|
||||
proxy_busy_buffers_size 512k;
|
||||
proxy_buffers 4 512k;
|
||||
proxy_buffer_size 256k;
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -1,50 +1,30 @@
|
|||
{ cluster, config, depot, lib, pkgs, ... }:
|
||||
{ cluster, config, depot, lib, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (config.reflection) interfaces;
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (depot.reflection) interfaces;
|
||||
inherit (tools.meta) domain;
|
||||
inherit (config.networking) hostName;
|
||||
|
||||
link = cluster.config.hostLinks.${hostName}.dnsAuthoritative;
|
||||
patroni = cluster.config.links.patroni-pg-access;
|
||||
inherit (cluster.config.hostLinks.${hostName}) acmeDnsApi;
|
||||
|
||||
otherDnsServers = lib.pipe (cluster.config.services.dns.otherNodes.authoritative hostName) [
|
||||
otherDnsServers = lib.pipe (with cluster.config.services.dns.otherNodes; master ++ slave) [
|
||||
(map (node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple))
|
||||
(lib.concatStringsSep " ")
|
||||
];
|
||||
|
||||
recordsList = lib.mapAttrsToList (lib.const lib.id) cluster.config.dns.records;
|
||||
recordsPartitioned = lib.partition (record: record.rewrite.target == null) recordsList;
|
||||
|
||||
staticRecords = let
|
||||
escape = type: {
|
||||
TXT = builtins.toJSON;
|
||||
}.${type} or lib.id;
|
||||
|
||||
recordName = record: {
|
||||
"@" = "${record.root}.";
|
||||
}.${record.name} or "${record.name}.${record.root}.";
|
||||
in lib.flatten (
|
||||
map (record: map (target: "${recordName record} ${record.type} ${escape record.type target}") record.target) recordsPartitioned.right
|
||||
);
|
||||
|
||||
rewrites = map (record: let
|
||||
maybeEscapeRegex = str: if record.rewrite.type == "regex" then "${lib.escapeRegex str}$" else str;
|
||||
in "rewrite stop name ${record.rewrite.type} ${record.name}${maybeEscapeRegex ".${record.root}."} ${record.rewrite.target}. answer auto") recordsPartitioned.wrong;
|
||||
|
||||
rewriteConf = pkgs.writeText "coredns-rewrites.conf" ''
|
||||
rewrite stop type DS DS
|
||||
rewrite stop type NS NS
|
||||
rewrite stop type SOA SOA
|
||||
${lib.concatStringsSep "\n" rewrites}
|
||||
'';
|
||||
translateConfig = cfg: let
|
||||
configList = lib.mapAttrsToList (n: v: "${n}=${v}") cfg;
|
||||
in lib.concatStringsSep "\n" configList;
|
||||
in {
|
||||
links.localAuthoritativeDNS = {};
|
||||
|
||||
age.secrets = {
|
||||
acmeDnsDirectKey = {
|
||||
file = ./acme-dns-direct-key.age;
|
||||
pdns-db-credentials = {
|
||||
file = ./pdns-db-credentials.age;
|
||||
mode = "0400";
|
||||
owner = "pdns";
|
||||
group = "pdns";
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -53,37 +33,23 @@ in {
|
|||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
services.acme-dns = {
|
||||
services.powerdns = {
|
||||
enable = true;
|
||||
package = depot.packages.acme-dns;
|
||||
settings = {
|
||||
general = {
|
||||
listen = config.links.localAuthoritativeDNS.tuple;
|
||||
inherit domain;
|
||||
nsadmin = "hostmaster.${domain}";
|
||||
nsname = "eu1.ns.${domain}";
|
||||
records = staticRecords;
|
||||
};
|
||||
api = {
|
||||
ip = acmeDnsApi.ipv4;
|
||||
inherit (acmeDnsApi) port;
|
||||
};
|
||||
database = {
|
||||
engine = "postgres";
|
||||
connection = "postgres://acmedns@${patroni.tuple}/acmedns?sslmode=disable";
|
||||
};
|
||||
extraConfig = translateConfig {
|
||||
launch = "gpgsql";
|
||||
local-address = config.links.localAuthoritativeDNS.tuple;
|
||||
gpgsql-host = patroni.ipv4;
|
||||
gpgsql-port = patroni.portStr;
|
||||
gpgsql-dbname = "powerdns";
|
||||
gpgsql-user = "powerdns";
|
||||
gpgsql-extra-connection-parameters = "passfile=${config.age.secrets.pdns-db-credentials.path}";
|
||||
version-string = "Private Void DNS";
|
||||
enable-lua-records = "yes";
|
||||
expand-alias = "yes";
|
||||
resolver = "127.0.0.1:8600";
|
||||
};
|
||||
};
|
||||
|
||||
services.locksmith.waitForSecrets.acme-dns = [
|
||||
"patroni-acmedns"
|
||||
];
|
||||
|
||||
systemd.services.acme-dns.serviceConfig.EnvironmentFile = with config.age.secrets; [
|
||||
"/run/locksmith/patroni-acmedns"
|
||||
acmeDnsDirectKey.path
|
||||
];
|
||||
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
|
@ -94,14 +60,10 @@ in {
|
|||
success 4000 86400
|
||||
denial 0
|
||||
prefetch 3
|
||||
serve_stale 86400s verify
|
||||
}
|
||||
template ANY DS {
|
||||
rcode NXDOMAIN
|
||||
serve_stale 86400s
|
||||
}
|
||||
forward service.eu-central.sd-magic.${domain} 127.0.0.1:8600
|
||||
forward addr.eu-central.sd-magic.${domain} 127.0.0.1:8600
|
||||
import ${rewriteConf}
|
||||
forward . ${config.links.localAuthoritativeDNS.tuple} ${otherDnsServers} {
|
||||
policy sequential
|
||||
}
|
||||
|
@ -110,34 +72,18 @@ in {
|
|||
};
|
||||
|
||||
systemd.services.coredns = {
|
||||
after = [ "acme-dns.service" ];
|
||||
serviceConfig = {
|
||||
MemoryMax = "200M";
|
||||
MemorySwapMax = "50M";
|
||||
CPUQuota = "25%";
|
||||
};
|
||||
after = [ "pdns.service" ];
|
||||
};
|
||||
|
||||
consul.services = {
|
||||
authoritative-dns = {
|
||||
unit = "acme-dns";
|
||||
definition = {
|
||||
name = "authoritative-dns-backend";
|
||||
address = config.links.localAuthoritativeDNS.ipv4;
|
||||
port = config.links.localAuthoritativeDNS.port;
|
||||
checks = lib.singleton {
|
||||
interval = "60s";
|
||||
tcp = config.links.localAuthoritativeDNS.tuple;
|
||||
};
|
||||
};
|
||||
};
|
||||
acme-dns.definition = {
|
||||
name = "acme-dns";
|
||||
address = acmeDnsApi.ipv4;
|
||||
port = acmeDnsApi.port;
|
||||
consul.services.pdns = {
|
||||
mode = "external";
|
||||
definition = {
|
||||
name = "authoritative-dns-backend";
|
||||
address = config.links.localAuthoritativeDNS.ipv4;
|
||||
port = config.links.localAuthoritativeDNS.port;
|
||||
checks = lib.singleton {
|
||||
interval = "60s";
|
||||
http = "${acmeDnsApi.url}/health";
|
||||
tcp = config.links.localAuthoritativeDNS.tuple;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,21 +1,23 @@
|
|||
{ cluster, config, depot, lib, ... }:
|
||||
{ cluster, config, depot, lib, pkgs, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (config.reflection) interfaces;
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (depot.reflection) interfaces hyprspace;
|
||||
inherit (tools.meta) domain;
|
||||
inherit (config.links) localRecursor;
|
||||
inherit (config.networking) hostName;
|
||||
|
||||
link = cluster.config.hostLinks.${hostName}.dnsResolver;
|
||||
backend = cluster.config.hostLinks.${hostName}.dnsResolverBackend;
|
||||
|
||||
otherRecursors = lib.pipe (cluster.config.services.dns.otherNodes.coredns hostName) [
|
||||
otherRecursors = lib.pipe (cluster.config.services.dns.otherNodes.coredns) [
|
||||
(map (node: cluster.config.hostLinks.${node}.dnsResolverBackend.tuple))
|
||||
(lib.concatStringsSep " ")
|
||||
];
|
||||
|
||||
authoritativeServers = map
|
||||
(node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple)
|
||||
cluster.config.services.dns.nodes.authoritative;
|
||||
authoritativeServers = lib.pipe (with cluster.config.services.dns.nodes; master ++ slave) [
|
||||
(map (node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple))
|
||||
(lib.concatStringsSep ";")
|
||||
];
|
||||
|
||||
inherit (depot.packages) stevenblack-hosts;
|
||||
dot = config.security.acme.certs."securedns.${domain}";
|
||||
|
@ -35,17 +37,14 @@ in
|
|||
];
|
||||
before = [ "acme-securedns.${domain}.service" ];
|
||||
wants = [ "acme-finished-securedns.${domain}.target" ];
|
||||
serviceConfig = {
|
||||
LoadCredential = [
|
||||
"dot-cert.pem:${dot.directory}/fullchain.pem"
|
||||
"dot-key.pem:${dot.directory}/key.pem"
|
||||
];
|
||||
ExecReload = lib.mkForce [];
|
||||
};
|
||||
serviceConfig.LoadCredential = [
|
||||
"dot-cert.pem:${dot.directory}/fullchain.pem"
|
||||
"dot-key.pem:${dot.directory}/key.pem"
|
||||
];
|
||||
};
|
||||
|
||||
security.acme.certs."securedns.${domain}" = {
|
||||
dnsProvider = "exec";
|
||||
dnsProvider = "pdns";
|
||||
# using a different ACME provider because Android Private DNS is fucky
|
||||
server = "https://api.buypass.com/acme/directory";
|
||||
reloadServices = [
|
||||
|
@ -56,29 +55,29 @@ in
|
|||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
(localresolver) {
|
||||
hosts ${stevenblack-hosts} {
|
||||
fallthrough
|
||||
}
|
||||
chaos "Private Void DNS" info@privatevoid.net
|
||||
forward hyprspace. 127.43.104.80:11355
|
||||
forward ${domain}. ${lib.concatStringsSep " " authoritativeServers} {
|
||||
policy random
|
||||
}
|
||||
forward . ${backend.tuple} ${otherRecursors} {
|
||||
policy sequential
|
||||
}
|
||||
}
|
||||
.:${link.portStr} {
|
||||
${lib.optionalString (interfaces ? vstub) "bind ${interfaces.vstub.addr}"}
|
||||
bind 127.0.0.1
|
||||
bind ${link.ipv4}
|
||||
import localresolver
|
||||
${lib.optionalString hyprspace.enable "bind ${hyprspace.addr}"}
|
||||
hosts ${stevenblack-hosts} {
|
||||
fallthrough
|
||||
}
|
||||
chaos "Private Void DNS" info@privatevoid.net
|
||||
forward . ${backend.tuple} ${otherRecursors} {
|
||||
policy sequential
|
||||
}
|
||||
}
|
||||
tls://.:853 {
|
||||
bind ${interfaces.primary.addr}
|
||||
tls {$CREDENTIALS_DIRECTORY}/dot-cert.pem {$CREDENTIALS_DIRECTORY}/dot-key.pem
|
||||
import localresolver
|
||||
hosts ${stevenblack-hosts} {
|
||||
fallthrough
|
||||
}
|
||||
chaos "Private Void DNS" info@privatevoid.net
|
||||
forward . ${backend.tuple} ${otherRecursors} {
|
||||
policy sequential
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
@ -88,7 +87,7 @@ in
|
|||
dnssecValidation = "process";
|
||||
forwardZones = {
|
||||
# optimize queries against our own domain
|
||||
"${domain}" = lib.concatStringsSep ";" authoritativeServers;
|
||||
"${domain}" = authoritativeServers;
|
||||
};
|
||||
dns = {
|
||||
inherit (backend) port;
|
||||
|
|
|
@ -1,37 +1,30 @@
|
|||
{ config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (depot) hours;
|
||||
inherit (depot.config) hours;
|
||||
cfg = config.services.dns;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./options.nix
|
||||
./nodes.nix
|
||||
./ns-records.nix
|
||||
];
|
||||
|
||||
vars.pdns-api-key-secret = {
|
||||
file = ./pdns-api-key.age;
|
||||
mode = "0400";
|
||||
};
|
||||
links = {
|
||||
dnsResolver = {
|
||||
ipv4 = hours.VEGAS.interfaces.vstub.addr;
|
||||
port = 53;
|
||||
};
|
||||
acmeDnsApi = {
|
||||
hostname = "acme-dns-challenge.internal.${depot.lib.meta.domain}";
|
||||
powerdns-api = {
|
||||
ipv4 = config.vars.mesh.VEGAS.meshIp;
|
||||
protocol = "http";
|
||||
};
|
||||
};
|
||||
hostLinks = lib.mkMerge [
|
||||
(lib.genAttrs cfg.nodes.authoritative (node: {
|
||||
(lib.genAttrs (with cfg.nodes; master ++ slave) (node: {
|
||||
dnsAuthoritative = {
|
||||
ipv4 = hours.${node}.interfaces.primary.addrPublic;
|
||||
port = 53;
|
||||
};
|
||||
acmeDnsApi = {
|
||||
ipv4 = config.vars.mesh.${node}.meshIp;
|
||||
inherit (config.links.acmeDnsApi) port;
|
||||
protocol = "http";
|
||||
};
|
||||
}))
|
||||
(lib.genAttrs cfg.nodes.coredns (node: {
|
||||
dnsResolver = {
|
||||
|
@ -47,34 +40,19 @@ in
|
|||
];
|
||||
services.dns = {
|
||||
nodes = {
|
||||
authoritative = [ "VEGAS" "checkmate" "prophet" ];
|
||||
master = [ "VEGAS" ];
|
||||
slave = [ "checkmate" "prophet" ];
|
||||
coredns = [ "checkmate" "VEGAS" ];
|
||||
client = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
client = [ "checkmate" "thunderskin" "VEGAS" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
authoritative = ./authoritative.nix;
|
||||
master = [
|
||||
./authoritative.nix
|
||||
./admin.nix
|
||||
];
|
||||
slave = ./authoritative.nix;
|
||||
coredns = ./coredns.nix;
|
||||
client = ./client.nix;
|
||||
};
|
||||
simulacrum = {
|
||||
enable = true;
|
||||
deps = [ "consul" "acme-client" "patroni" ];
|
||||
settings = ./test.nix;
|
||||
};
|
||||
};
|
||||
|
||||
patroni = {
|
||||
databases.acmedns = {};
|
||||
users.acmedns = {
|
||||
locksmith = {
|
||||
nodes = config.services.dns.nodes.authoritative;
|
||||
format = "envFile";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dns.records = {
|
||||
securedns.consulService = "securedns";
|
||||
"acme-dns-challenge.internal".consulService = "acme-dns";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
{
|
||||
dns.records = lib.mapAttrs' (name: hour: {
|
||||
name = lib.toLower "${name}.${hour.enterprise.subdomain}";
|
||||
value = {
|
||||
type = "A";
|
||||
target = [ hour.interfaces.primary.addrPublic ];
|
||||
};
|
||||
}) depot.gods.fromLight;
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
{ config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.dns;
|
||||
|
||||
nsNodes = lib.imap1 (idx: node: {
|
||||
name = "eu${toString idx}.ns";
|
||||
value = {
|
||||
type = "A";
|
||||
target = [ depot.hours.${node}.interfaces.primary.addrPublic ];
|
||||
};
|
||||
}) cfg.nodes.authoritative;
|
||||
in
|
||||
|
||||
{
|
||||
dns.records = lib.mkMerge [
|
||||
(lib.listToAttrs nsNodes)
|
||||
{
|
||||
NS = {
|
||||
name = "@";
|
||||
type = "NS";
|
||||
target = map (ns: "${ns.name}.${depot.lib.meta.domain}.") nsNodes;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
recordType = types.submodule ({ config, name, ... }: {
|
||||
options = {
|
||||
root = mkOption {
|
||||
type = types.str;
|
||||
default = depot.lib.meta.domain;
|
||||
};
|
||||
consulServicesRoot = mkOption {
|
||||
type = types.str;
|
||||
default = "service.eu-central.sd-magic.${depot.lib.meta.domain}";
|
||||
};
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
type = mkOption {
|
||||
type = types.enum [ "A" "CNAME" "AAAA" "NS" "MX" "SOA" "TXT" ];
|
||||
default = "A";
|
||||
};
|
||||
target = mkOption {
|
||||
type = with types; listOf str;
|
||||
};
|
||||
ttl = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 86400;
|
||||
};
|
||||
|
||||
consulService = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
};
|
||||
rewrite = {
|
||||
target = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.enum [ "exact" "substring" "prefix" "suffix" "regex" ];
|
||||
default = "exact";
|
||||
};
|
||||
};
|
||||
};
|
||||
config = {
|
||||
rewrite.target = mkIf (config.consulService != null) "${config.consulService}.${config.consulServicesRoot}";
|
||||
};
|
||||
});
|
||||
in
|
||||
|
||||
{
|
||||
options.dns = {
|
||||
records = mkOption {
|
||||
type = with types; attrsOf recordType;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}
|
BIN
cluster/services/dns/pdns-admin-oidc-secrets.age
Normal file
BIN
cluster/services/dns/pdns-admin-oidc-secrets.age
Normal file
Binary file not shown.
11
cluster/services/dns/pdns-admin-salt.age
Normal file
11
cluster/services/dns/pdns-admin-salt.age
Normal file
|
@ -0,0 +1,11 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A d/YNanH/cHoFLPp8WcCXHh/LQLRwaUa95JiRLbgb8RI
|
||||
UPEHpnHHTU6dGKi2MbApEspcpt1lFtFZ4XJjShL7OoE
|
||||
-> ssh-ed25519 5/zT0w Rv9ZS5P2Eca3npPLR7yym/XTRSDfVmgRwH1pAGR79T8
|
||||
4A/KXc2wxxokfDAwWYf0ZTUEzQ8ldkC+zRNZY3KjBTs
|
||||
-> ssh-ed25519 d3WGuA 2R0kaVjuhU3wT9pjj214zkEaHYNSlMxf9Z+MfBssHwY
|
||||
EU5LWk6xfohWM/3sAqYtUvFmRgIPxOLXHnlqbsQ3+ok
|
||||
-> -|(-grease W=cc~ O2q5
|
||||
FZzh/ZwDS2EqvVZ9NErmUwCMN72op1Qy
|
||||
--- Ducan3ugRJC3dmWLr7+FKok+WmInOgOzW0ccYeqAFAQ
|
||||
Ì•ãÆ*Q. SC<53>ûf¹‰*`5<>„ÑÖw"~ÍxwÜ*–ã\‹êÙ"²ÅtŒ '’É0ï™<C3AF>L£ï
|
12
cluster/services/dns/pdns-admin-secret.age
Normal file
12
cluster/services/dns/pdns-admin-secret.age
Normal file
|
@ -0,0 +1,12 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A hUR+UdHnpazhANM8DKToI5Th3lv1aAuxZ1IQKvCOv34
|
||||
PvsiSym8YdleDULLnWuTs1x08KO3EmAg/AAjulgrgqE
|
||||
-> ssh-ed25519 5/zT0w qMXS2xLOLv/+l6brG11i+3FwHdrhlmxZBNtBiU9hu2g
|
||||
BlFYPvH4mFJRMHTlHwnBdJb6QcugylwZuT5bgSKcQa0
|
||||
-> ssh-ed25519 d3WGuA k2fRQ3+HyZP+bb/gkVKQqUmbITJLPm9tGp67DbRfiCs
|
||||
RX9CACfYpYKvSqyfXjvEokTGsp4+ECQBD8i1ehD5xRg
|
||||
-> IB@F$9G-grease
|
||||
cXRgUVdIPGEjft1CJA
|
||||
--- si16Det/GwF7GLHLt0ha8v4rFFeJXyhEylIiqzZVAK8
|
||||
Ö°å¤pÐǺ#ê4^©—
~u
UuçaòQ´™Bâj˜(N)qÃ<"¤%ì’,V9û5ZÔh§#W«[»ò¶”"Mÿ&”îäøÖýá+%Œ«„SQ€B÷Þ›ÕÀèÕyàÜî<aéó]P‚$´Ä±B¨½qQÑÉQ‡M‰TËt°
|
||||
·s¹mÿ~qW–Ö«çêõÜ×Ì=.Q“"ù”–Þø¶ÏnqRk<52>=ÏcÿçüßÃqv¢¾>#ŠÏ«²tïwq,÷ »3YyIq}Ê“ì>sgíz™ûs±Þ ¸Æ†FÄPê|ÍüÅ¡=ùÃþ~KQR,DZuÐ+ÕºZGHëa=‹©;ÀõC.ÏuVShÅ$Và€AË9Ð=
?•¢
|
BIN
cluster/services/dns/pdns-api-key.age
Normal file
BIN
cluster/services/dns/pdns-api-key.age
Normal file
Binary file not shown.
20
cluster/services/dns/pdns-db-credentials.age
Normal file
20
cluster/services/dns/pdns-db-credentials.age
Normal file
|
@ -0,0 +1,20 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A OQaDWMrfvfQoluWFIldZgZFEdqzFfXhPvO6BqOZofnU
|
||||
qoUEZlKSTNJ53jgTK9eP2GDJogugtCfKqBaVH7mCqZY
|
||||
-> ssh-ed25519 5/zT0w U5w9w/DE+zDgw4YI6DDVAMSaAAcR+3+BIioVXAGMfHg
|
||||
9Ps2qB+P2DWDdYPRPuzmBECWzJ90LVq8B71LlrO0Gyk
|
||||
-> ssh-ed25519 TCgorQ s91OjOZH6825aSBRfiSN+ODBOJvbjff6s2fzf/8o2Wk
|
||||
zJI/5oKwagyOJUy1siwAcZ7wcsEMUyekYjP7TlsAjoY
|
||||
-> ssh-ed25519 d3WGuA 1gPF8W/p+wVclVrMGbvnBAO9IvSX9G8qNEaKpHeX23w
|
||||
L4N6MxD5SeEhqcjRx1e8M/rMtK2Qg+elYgKCHkHi71o
|
||||
-> ssh-ed25519 YIaSKQ eOwUbPa6RceRM4zsB8lHSCYtSJoLX1Fqs8CdzM7qkCQ
|
||||
8OPkkFP0B+uN0zBZAUmEgogp97YO+qlvsG6wnMwkzLw
|
||||
-> L_-grease 51PFh7A
|
||||
k9hZ2FbD3JDWGN8/WFjOCM0Ud/uvQhZZDceL/Esa8cfp
|
||||
--- v5Noo1KII/WFJxNGjEO2hqdhgHdastilx/M1vFos5dE
|
||||
 mÄÜ´Räx¡˜ ÐòÁ¬;ä³ÁH°p‘æáµå-ìásÌï–aÎᙵ›€Ô ™÷Ð4ö®y
ˆÑYýÀïQ<>ûÂHP–e 0Ó0[ÙÕ» É
|
||||
ÔŽÜyÖ'ª±¨|È2[q<>—ÀÛ<C380><C39B>WS/dö.ÏQÁÒÙé49ÆÄ,͆±¢}o¦<6F>Ú
ÍGO¦k€rGMGœ&öÊ¡²
|
||||
‰4Óá"8.êm槫¹<C2AB>7Pkuð@XAå$• >·¦+Äì|Çå–è<1F>ÎVtn¡”Â|Cµ>\a<>2
|
||||
{U²´ªÝs„<0B>Ù èé¾Ï‚‘÷„b½É‡›Â<E280BA>¿½gÀ.sœ3‡M24[š+ÀU£ÊD!PØ´õù7Á[½_†ºÁ>aº¿Õ3
|
||||
†
|
||||
Šñs
|
|
@ -1,35 +0,0 @@
|
|||
{ cluster, ... }:
|
||||
|
||||
let
|
||||
inherit (cluster._module.specialArgs.depot.lib.meta) domain;
|
||||
in
|
||||
{
|
||||
nodes.nowhere = { pkgs, ... }: {
|
||||
passthru = cluster;
|
||||
environment.systemPackages = [
|
||||
pkgs.knot-dns
|
||||
pkgs.openssl
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
nodeNames = json.loads('${builtins.toJSON cluster.config.services.dns.nodes.authoritative}')
|
||||
dotNames = json.loads('${builtins.toJSON cluster.config.services.dns.nodes.coredns}')
|
||||
nodes = [ n for n in machines if n.name in nodeNames ]
|
||||
dotServers = [ n for n in machines if n.name in dotNames ]
|
||||
|
||||
start_all()
|
||||
|
||||
with subtest("should allow external name resolution for own domain"):
|
||||
for node in nodes:
|
||||
node.wait_for_unit("coredns.service")
|
||||
nowhere.wait_until_succeeds("[[ $(kdig +short securedns.${domain} | wc -l) -ne 0 ]]", timeout=60)
|
||||
nowhere.fail("[[ $(kdig +short example.com | wc -l) -ne 0 ]]")
|
||||
|
||||
with subtest("should have valid certificate on DoT endpoint"):
|
||||
for node in dotServers:
|
||||
node.wait_for_unit("acme-finished-securedns.${domain}.target")
|
||||
nowhere.wait_until_succeeds("openssl </dev/null s_client -connect securedns.${domain}:853 -verify_return_error -strict -verify_hostname securedns.${domain}", timeout=60)
|
||||
'';
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
services.fbi = {
|
||||
nodes.host = [ "VEGAS" ];
|
||||
nixos.host = ./host.nix;
|
||||
};
|
||||
|
||||
dns.records = let
|
||||
fbiAddr = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
in {
|
||||
fbi-index.target = fbiAddr;
|
||||
fbi-requests.target = fbiAddr;
|
||||
radarr.target = fbiAddr;
|
||||
sonarr.target = fbiAddr;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A YQQrnpQI/qyEZugiRwsrPbW4oMYK/rlmRKAdD3JjYz4
|
||||
JRGFqNc4BVflfR4WUuEOym39IhZlUI778NtOFtxE8eY
|
||||
-> ssh-ed25519 5/zT0w utH25Xa9WQK9hXbKWsEWK5LJtCbhjpDX6JaomxnRaCI
|
||||
2MfxxDjs0doUTVsGP9942rx1tyCYsDxhlDo1542BhKQ
|
||||
-> ssh-ed25519 d3WGuA 6qD02cluQEBqEvupHf93Onlpv8QJJSl/bJm/XqyD+gQ
|
||||
bLz/ULSaIW6HnPXDKD5dxCbQWv0VC2R+E5wlj7VxOc0
|
||||
-> Ovax-grease ^1$]}H G4 FpDF XKHkj{
|
||||
IVdVFYcVe9PoHCCqM3GG1pM6xgTZ5r8XWlkBjlQimgaDArotF4dPpsSTpyc
|
||||
--- wdTYr6EpFPFsDJI0qQf74c6ce+v5ek6j+mgAx2CI9uI
|
||||
ÜA³×oÈð:±‹`ÜVd±å(Kät:fk¼’}3*#MJš<4A>Áõ]ê,¤éÐÈÍ69i›l`ÛÆJKwAè8y@Ýœ¯à+&ðÖ©s]ÅÓ–›Ç>~Ší„+Úô
|
||||
üÁ»<C381>qa©h<C2A9>( YÕ<17>eÇjýI•ê·/ð^å~Ý’wÊ
|
||||
ÆÜßÌZî!^þRˆéÿv¾…ïk‹Êp»ÛPÌ)ý̆ÍpÓV5²F΄ÆÚÙÚÞhBÇ»ßb#Š<>´ùºãi”»¸9ìQy¹¾<C2B9>Êè‹}€ß ƒ¬E}~ZHûjmyq{òxŠ–Éôß<C3B4>"”éÀ´C#šójÿÐ.ò§yÔ£¸v¦
<0A>ÉÐòê<1“Œúâ¾ìßzâš#/êGñ?që
|
|
@ -1,45 +1,6 @@
|
|||
{ config, depot, ... }:
|
||||
|
||||
{
|
||||
services.forge = {
|
||||
nodes.server = [ "VEGAS" ];
|
||||
nixos.server = ./server.nix;
|
||||
meshLinks.server.forge.link.protocol = "http";
|
||||
secrets = with config.services.forge.nodes; {
|
||||
oidcSecret = {
|
||||
nodes = server;
|
||||
owner = "forgejo";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
ways = let
|
||||
host = builtins.head config.services.forge.nodes.server;
|
||||
in config.lib.forService "forge" {
|
||||
forge.target = config.hostLinks.${host}.forge.url;
|
||||
};
|
||||
|
||||
patroni = config.lib.forService "forge" {
|
||||
databases.forge = {};
|
||||
users.forge.locksmith = {
|
||||
nodes = config.services.forge.nodes.server;
|
||||
format = "raw";
|
||||
};
|
||||
};
|
||||
|
||||
garage = config.lib.forService "forge" {
|
||||
keys.forgejo.locksmith.nodes = config.services.forge.nodes.server;
|
||||
buckets.forgejo.allow.forgejo = [ "read" "write" ];
|
||||
};
|
||||
|
||||
monitoring.blackbox.targets.forge = config.lib.forService "forge" {
|
||||
address = "https://forge.${depot.lib.meta.domain}/api/v1/version";
|
||||
module = "https2xx";
|
||||
};
|
||||
|
||||
dns.records = config.lib.forService "forge" {
|
||||
"ssh.forge".target = map
|
||||
(node: depot.hours.${node}.interfaces.primary.addrPublic)
|
||||
config.services.forge.nodes.server;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,37 +1,37 @@
|
|||
{ cluster, config, depot, lib, pkgs, ... }:
|
||||
{ cluster, config, depot, lib, pkgs, tools, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (cluster.config.services.forge) secrets;
|
||||
inherit (tools.meta) domain;
|
||||
inherit (tools.nginx) vhosts;
|
||||
inherit (config.age) secrets;
|
||||
|
||||
patroni = cluster.config.links.patroni-pg-access;
|
||||
|
||||
host = "forge.${domain}";
|
||||
|
||||
link = cluster.config.hostLinks.${config.networking.hostName}.forge;
|
||||
link = config.links.forge;
|
||||
|
||||
exe = lib.getExe config.services.forgejo.package;
|
||||
exe = lib.getExe config.services.gitea.package;
|
||||
in
|
||||
|
||||
{
|
||||
system.ascensions.forgejo = {
|
||||
requiredBy = [ "forgejo.service" ];
|
||||
before = [ "forgejo.service" ];
|
||||
incantations = i: [
|
||||
(i.execShell "chown -R forgejo:forgejo /srv/storage/private/forge")
|
||||
(i.execShell "rm -rf /srv/storage/private/forge/data/{attachments,lfs,avatars,repo-avatars,repo-archive,packages,actions_log,actions_artifacts}")
|
||||
];
|
||||
age.secrets = {
|
||||
forgejoOidcSecret = {
|
||||
file = ./credentials/forgejo-oidc-secret.age;
|
||||
owner = "gitea";
|
||||
};
|
||||
forgejoDbCredentials = {
|
||||
file = ./credentials/forgejo-db-credentials.age;
|
||||
owner = "gitea";
|
||||
};
|
||||
};
|
||||
|
||||
services.locksmith.waitForSecrets.forgejo = [
|
||||
"garage-forgejo-id"
|
||||
"garage-forgejo-secret"
|
||||
"patroni-forge"
|
||||
];
|
||||
links.forge.protocol = "http";
|
||||
|
||||
services.forgejo = {
|
||||
services.gitea = {
|
||||
enable = true;
|
||||
package = depot.packages.forgejo;
|
||||
appName = "The Forge";
|
||||
stateDir = "/srv/storage/private/forge";
|
||||
database = {
|
||||
createDatabase = false;
|
||||
|
@ -40,19 +40,15 @@ in
|
|||
inherit (patroni) port;
|
||||
name = "forge";
|
||||
user = "forge";
|
||||
passwordFile = "/run/locksmith/patroni-forge";
|
||||
passwordFile = secrets.forgejoDbCredentials.path;
|
||||
};
|
||||
settings = {
|
||||
DEFAULT = {
|
||||
APP_NAME = "The Forge";
|
||||
};
|
||||
server = {
|
||||
DOMAIN = host;
|
||||
ROOT_URL = "https://${host}/";
|
||||
PROTOCOL = link.protocol;
|
||||
HTTP_ADDR = link.ipv4;
|
||||
HTTP_PORT = link.port;
|
||||
SSH_DOMAIN = "ssh.${host}";
|
||||
};
|
||||
oauth2_client = {
|
||||
REGISTER_EMAIL_CONFIRM = false;
|
||||
|
@ -66,26 +62,15 @@ in
|
|||
ALLOW_ONLY_INTERNAL_REGISTRATION = false;
|
||||
ALLOW_ONLY_EXTERNAL_REGISTRATION = true;
|
||||
};
|
||||
storage = {
|
||||
STORAGE_TYPE = "minio";
|
||||
MINIO_ENDPOINT = cluster.config.links.garageS3.hostname;
|
||||
MINIO_BUCKET = "forgejo";
|
||||
MINIO_USE_SSL = true;
|
||||
MINIO_BUCKET_LOOKUP = "path";
|
||||
};
|
||||
log."logger.xorm.MODE" = "";
|
||||
log.ENABLE_XORM_LOG = false;
|
||||
# enabling this will leak secrets to the log
|
||||
database.LOG_SQL = false;
|
||||
};
|
||||
secrets = {
|
||||
storage = {
|
||||
MINIO_ACCESS_KEY_ID = "/run/locksmith/garage-forgejo-id";
|
||||
MINIO_SECRET_ACCESS_KEY = "/run/locksmith/garage-forgejo-secret";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.forgejo.preStart = let
|
||||
services.nginx.virtualHosts."${host}" = vhosts.proxy link.url;
|
||||
|
||||
systemd.services.gitea.preStart = let
|
||||
providerName = "PrivateVoidAccount";
|
||||
args = lib.escapeShellArgs [
|
||||
"--name" providerName
|
||||
|
@ -98,9 +83,9 @@ in
|
|||
in lib.mkAfter /*bash*/ ''
|
||||
providerId="$(${exe} admin auth list | ${pkgs.gnugrep}/bin/grep -w '${providerName}' | cut -f1)"
|
||||
if [[ -z "$providerId" ]]; then
|
||||
FORGEJO_ADMIN_OAUTH2_SECRET="$(< ${secrets.oidcSecret.path})" ${exe} admin auth add-oauth ${args}
|
||||
FORGEJO_ADMIN_OAUTH2_SECRET="$(< ${secrets.forgejoOidcSecret.path})" ${exe} admin auth add-oauth ${args}
|
||||
else
|
||||
FORGEJO_ADMIN_OAUTH2_SECRET="$(< ${secrets.oidcSecret.path})" ${exe} admin auth update-oauth --id "$providerId" ${args}
|
||||
FORGEJO_ADMIN_OAUTH2_SECRET="$(< ${secrets.forgejoOidcSecret.path})" ${exe} admin auth update-oauth --id "$providerId" ${args}
|
||||
fi
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
services.gitlab = {
|
||||
nodes.host = [ "VEGAS" ];
|
||||
nixos.host = ./host.nix;
|
||||
};
|
||||
|
||||
dns.records.git.target = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
}
|
|
@ -1,9 +1,7 @@
|
|||
{ cluster, config, depot, lib, ... }:
|
||||
{ config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (cluster.config.services.hercules-ci-multi-agent) nodes secrets;
|
||||
|
||||
mapAgents = lib.flip lib.mapAttrs nodes;
|
||||
mapAgents = lib.flip lib.mapAttrs config.services.hercules-ci-agents;
|
||||
|
||||
mergeMap = f: let
|
||||
outputs = mapAgents f;
|
||||
|
@ -19,40 +17,33 @@ let
|
|||
in
|
||||
{
|
||||
imports = [
|
||||
./modules/multi-agent-refactored
|
||||
depot.inputs.hercules-ci-agent.nixosModules.multi-agent-service
|
||||
];
|
||||
|
||||
systemd.services = mergeMap (_: _: {
|
||||
age.secrets = mergeMap (name: _: {
|
||||
hci-token = {
|
||||
file = ./secrets + "/hci-token-${name}-${config.networking.hostName}.age";
|
||||
owner = "hci-${name}";
|
||||
group = "hci-${name}";
|
||||
};
|
||||
hci-cache-credentials = {
|
||||
file = ./secrets + "/hci-cache-credentials-${config.networking.hostName}.age";
|
||||
owner = "hci-${name}";
|
||||
group = "hci-${name}";
|
||||
};
|
||||
hci-cache-config = {
|
||||
file = ./secrets/hci-cache-config.age;
|
||||
owner = "hci-${name}";
|
||||
group = "hci-${name}";
|
||||
};
|
||||
});
|
||||
systemd.services = mergeMap (name: _: {
|
||||
hercules-ci-agent = {
|
||||
# hercules-ci-agent-restarter should take care of this
|
||||
restartIfChanged = false;
|
||||
environment = {
|
||||
AWS_SHARED_CREDENTIALS_FILE = secrets.cacheCredentials.path;
|
||||
AWS_EC2_METADATA_DISABLED = "true";
|
||||
AWS_SHARED_CREDENTIALS_FILE = config.age.secrets."hci-cache-credentials-${name}".path;
|
||||
};
|
||||
serviceConfig.Slice = "builder.slice";
|
||||
};
|
||||
});
|
||||
|
||||
services.hercules-ci-agents = lib.genAttrs (lib.attrNames nodes) (org: {
|
||||
enable = true;
|
||||
package = depot.inputs.hercules-ci-agent.packages.hercules-ci-agent;
|
||||
settings = {
|
||||
clusterJoinTokenPath = secrets."clusterJoinToken-${org}".path;
|
||||
binaryCachesPath = secrets.cacheConfig.path;
|
||||
concurrentTasks = lib.pipe config.reflection.hardware.cpu.cores [
|
||||
(lib.flip builtins.div 2)
|
||||
builtins.floor
|
||||
(lib.max 2)
|
||||
];
|
||||
};
|
||||
});
|
||||
|
||||
nix.settings.cores = lib.pipe config.reflection.hardware.cpu.cores [
|
||||
(builtins.mul 0.75)
|
||||
builtins.floor
|
||||
(lib.max 1)
|
||||
];
|
||||
|
||||
users.groups.hercules-ci-agent.members = map (org: "hci-${org}") (lib.attrNames nodes);
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
{ config, lib, ... }:
|
||||
{ config, lib, tools, ... }:
|
||||
|
||||
{
|
||||
services.hercules-ci-multi-agent = {
|
||||
|
@ -6,67 +6,20 @@
|
|||
private-void = [ "VEGAS" "prophet" ];
|
||||
nixpak = [ "VEGAS" "prophet" ];
|
||||
max = [ "VEGAS" "prophet" ];
|
||||
hyprspace = [ "VEGAS" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
private-void = [
|
||||
./common.nix
|
||||
{
|
||||
services.hercules-ci-agents.private-void.settings = {
|
||||
secretsJsonPath = config.services.hercules-ci-multi-agent.secrets.effectsSecrets.path;
|
||||
};
|
||||
}
|
||||
./orgs/private-void.nix
|
||||
];
|
||||
nixpak = [
|
||||
./common.nix
|
||||
./orgs/nixpak.nix
|
||||
];
|
||||
max = [
|
||||
./common.nix
|
||||
./orgs/max.nix
|
||||
];
|
||||
hyprspace = [
|
||||
./common.nix
|
||||
];
|
||||
};
|
||||
secrets = let
|
||||
inherit (config.services.hercules-ci-multi-agent) nodes;
|
||||
allNodes = lib.unique (lib.concatLists (lib.attrValues nodes));
|
||||
in {
|
||||
cacheConfig = {
|
||||
nodes = allNodes;
|
||||
mode = "0440";
|
||||
group = "hercules-ci-agent";
|
||||
};
|
||||
cacheCredentials = {
|
||||
nodes = allNodes;
|
||||
shared = false;
|
||||
mode = "0440";
|
||||
group = "hercules-ci-agent";
|
||||
};
|
||||
effectsSecrets = {
|
||||
nodes = nodes.private-void;
|
||||
owner = "hci-private-void";
|
||||
};
|
||||
} // lib.mapAttrs' (org: nodes: {
|
||||
name = "clusterJoinToken-${org}";
|
||||
value = {
|
||||
inherit nodes;
|
||||
shared = false;
|
||||
owner = "hci-${org}";
|
||||
};
|
||||
}) nodes;
|
||||
};
|
||||
garage = let
|
||||
hciAgentKeys = lib.pipe config.services.hercules-ci-multi-agent.nodes [
|
||||
(lib.collect lib.isList)
|
||||
lib.flatten
|
||||
lib.unique
|
||||
(map (x: "hci-agent-${x}"))
|
||||
];
|
||||
in config.lib.forService "hercules-ci-multi-agent" {
|
||||
keys = lib.genAttrs hciAgentKeys (lib.const {});
|
||||
buckets.nix-store = {
|
||||
allow = lib.genAttrs hciAgentKeys (lib.const [ "read" "write" ]);
|
||||
web.enable = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,143 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
services.hercules-ci-agents = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf (lib.types.submodule (import ./options.nix { inherit config lib pkgs; }));
|
||||
description = lib.mdDoc "Hercules CI Agent instances.";
|
||||
example = {
|
||||
agent1.enable = true;
|
||||
|
||||
agent2 = {
|
||||
enable = true;
|
||||
settings.labels.myMetadata = "agent2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
forAllAgents = f: lib.mkMerge (lib.mapAttrsToList (name: agent: lib.mkIf agent.enable (f name agent)) config.services.hercules-ci-agents);
|
||||
in
|
||||
{
|
||||
users = forAllAgents (name: agent: {
|
||||
users.${agent.user} = {
|
||||
inherit (agent) group;
|
||||
description = "Hercules CI Agent system user for ${name}";
|
||||
isSystemUser = true;
|
||||
home = agent.settings.baseDirectory;
|
||||
createHome = true;
|
||||
};
|
||||
groups.${agent.group} = { };
|
||||
});
|
||||
|
||||
systemd = forAllAgents (name: agent:
|
||||
let
|
||||
command = "${agent.package}/bin/hercules-ci-agent --config ${agent.tomlFile}";
|
||||
testCommand = "${command} --test-configuration";
|
||||
in
|
||||
{
|
||||
tmpfiles.rules = [ "d ${agent.settings.workDirectory} 0700 ${agent.user} ${agent.group} - -" ];
|
||||
|
||||
services."hercules-ci-agent-${name}" = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
startLimitBurst = 30 * 1000000; # practically infinite
|
||||
serviceConfig = {
|
||||
User = agent.user;
|
||||
Group = agent.group;
|
||||
ExecStart = command;
|
||||
ExecStartPre = testCommand;
|
||||
Restart = "on-failure";
|
||||
RestartSec = 120;
|
||||
|
||||
# If a worker goes OOM, don't kill the main process. It needs to
|
||||
# report the failure and it's unlikely to be part of the problem.
|
||||
OOMPolicy = "continue";
|
||||
|
||||
# Work around excessive stack use by libstdc++ regex
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86164
|
||||
# A 256 MiB stack allows between 400 KiB and 1.5 MiB file to be matched by ".*".
|
||||
LimitSTACK = 256 * 1024 * 1024;
|
||||
|
||||
# Hardening.
|
||||
DeviceAllow = "";
|
||||
LockPersonality = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "full";
|
||||
RemoveIPC = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||
SystemCallArchitectures = "native";
|
||||
UMask = "077";
|
||||
WorkingDirectory = agent.settings.workDirectory;
|
||||
};
|
||||
};
|
||||
|
||||
# Changes in the secrets do not affect the unit in any way that would cause
|
||||
# a restart, which is currently necessary to reload the secrets.
|
||||
paths."hercules-ci-agent-${name}-restart-files" = {
|
||||
wantedBy = [ "hercules-ci-agent-${name}.service" ];
|
||||
pathConfig = {
|
||||
Unit = "hercules-ci-agent-${name}-restarter.service";
|
||||
PathChanged = [ agent.settings.clusterJoinTokenPath agent.settings.binaryCachesPath ];
|
||||
};
|
||||
};
|
||||
|
||||
services."hercules-ci-agent-restarter-${name}" = {
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
# Wait a bit, with the effect of bundling up file changes into a single
|
||||
# run of this script and hopefully a single restart.
|
||||
sleep 10
|
||||
if systemctl is-active --quiet 'hercules-ci-agent-${name}.service'; then
|
||||
if ${testCommand}; then
|
||||
systemctl restart 'hercules-ci-agent-${name}.service'
|
||||
else
|
||||
echo 1>&2 'WARNING: Not restarting hercules-ci-agent-${name} because config is not valid at this time.'
|
||||
fi
|
||||
else
|
||||
echo 1>&2 'Not restarting hercules-ci-agent-${name} despite config file update, because it is not already active.'
|
||||
fi
|
||||
'';
|
||||
};
|
||||
});
|
||||
|
||||
nix.settings = forAllAgents (_: agent: {
|
||||
trusted-users = [ agent.user ];
|
||||
# A store path that was missing at first may well have finished building,
|
||||
# even shortly after the previous lookup. This *also* applies to the daemon.
|
||||
narinfo-cache-negative-ttl = 0;
|
||||
});
|
||||
|
||||
# Trusted user allows simplified configuration and better performance
|
||||
# when operating in a cluster.
|
||||
assertions = forAllAgents (_: agent: [
|
||||
{
|
||||
assertion = (agent.settings.nixUserIsTrusted or false) -> builtins.match ".*(^|\n)[ \t]*trusted-users[ \t]*=.*" config.nix.extraOptions == null;
|
||||
message = ''
|
||||
hercules-ci-agent: Please do not set `trusted-users` in `nix.extraOptions`.
|
||||
|
||||
The hercules-ci-agent module by default relies on `nix.settings.trusted-users`
|
||||
to be effectful, but a line like `trusted-users = ...` in `nix.extraOptions`
|
||||
will override the value set in `nix.settings.trusted-users`.
|
||||
|
||||
Instead of setting `trusted-users` in the `nix.extraOptions` string, you should
|
||||
set an option with additive semantics, such as
|
||||
- the NixOS option `nix.settings.trusted-users`, or
|
||||
- the Nix option in the `extraOptions` string, `extra-trusted-users`
|
||||
'';
|
||||
}
|
||||
]);
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ roberth kranzes ];
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
systemConfig = config;
|
||||
in
|
||||
{ config, name, ... }:
|
||||
let
|
||||
inherit (lib) types;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
Hercules CI Agent as a system service.
|
||||
|
||||
[Hercules CI](https://hercules-ci.com) is a
|
||||
continuous integation service that is centered around Nix.
|
||||
|
||||
Support is available at [help@hercules-ci.com](mailto:help@hercules-ci.com).
|
||||
'');
|
||||
|
||||
package = lib.mkPackageOption pkgs "hercules-ci-agent" { };
|
||||
|
||||
user = lib.mkOption {
|
||||
type = types.str;
|
||||
default = "hci-${name}";
|
||||
description = lib.mdDoc "User account under which hercules-ci-agent runs.";
|
||||
internal = true;
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = types.str;
|
||||
default = "hci-${name}";
|
||||
description = lib.mdDoc "Group account under which hercules-ci-agent runs.";
|
||||
internal = true;
|
||||
};
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = types.submodule (import ./settings.nix { inherit systemConfig lib name pkgs; agent = config; });
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
These settings are written to the `agent.toml` file.
|
||||
|
||||
Not all settings are listed as options, can be set nonetheless.
|
||||
|
||||
For the exhaustive list of settings, see <https://docs.hercules-ci.com/hercules-ci/reference/agent-config/>.
|
||||
'';
|
||||
};
|
||||
|
||||
tomlFile = lib.mkOption {
|
||||
type = types.path;
|
||||
internal = true;
|
||||
defaultText = lib.literalMD "generated `hercules-ci-agent-${name}.toml`";
|
||||
description = lib.mdDoc ''
|
||||
The fully assembled config file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
tomlFile = (pkgs.formats.toml { }).generate "hercules-ci-agent-${name}.toml" config.settings;
|
||||
};
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
{ agent, systemConfig, lib, name, pkgs, ... }:
|
||||
|
||||
{ config, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) types;
|
||||
format = pkgs.formats.toml { };
|
||||
in
|
||||
{
|
||||
freeformType = format.type;
|
||||
options = {
|
||||
apiBaseUrl = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
API base URL that the agent will connect to.
|
||||
|
||||
When using Hercules CI Enterprise, set this to the URL where your
|
||||
Hercules CI server is reachable.
|
||||
'';
|
||||
type = types.str;
|
||||
default = "https://hercules-ci.com";
|
||||
};
|
||||
baseDirectory = lib.mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/hercules-ci-agent-${name}";
|
||||
description = lib.mdDoc ''
|
||||
State directory (secrets, work directory, etc) for agent
|
||||
'';
|
||||
};
|
||||
concurrentTasks = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Number of tasks to perform simultaneously.
|
||||
|
||||
A task is a single derivation build, an evaluation or an effect run.
|
||||
At minimum, you need 2 concurrent tasks for `x86_64-linux`
|
||||
in your cluster, to allow for import from derivation.
|
||||
|
||||
`concurrentTasks` can be around the CPU core count or lower if memory is
|
||||
the bottleneck.
|
||||
|
||||
The optimal value depends on the resource consumption characteristics of your workload,
|
||||
including memory usage and in-task parallelism. This is typically determined empirically.
|
||||
|
||||
When scaling, it is generally better to have a double-size machine than two machines,
|
||||
because each split of resources causes inefficiencies; particularly with regards
|
||||
to build latency because of extra downloads.
|
||||
'';
|
||||
type = types.either types.ints.positive (types.enum [ "auto" ]);
|
||||
default = "auto";
|
||||
defaultText = lib.literalMD ''
|
||||
`"auto"`, meaning equal to the number of CPU cores.
|
||||
'';
|
||||
};
|
||||
labels = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
A key-value map of user data.
|
||||
|
||||
This data will be available to organization members in the dashboard and API.
|
||||
|
||||
The values can be of any TOML type that corresponds to a JSON type, but arrays
|
||||
can not contain tables/objects due to limitations of the TOML library. Values
|
||||
involving arrays of non-primitive types may not be representable currently.
|
||||
'';
|
||||
type = format.type;
|
||||
defaultText = lib.literalExpression ''
|
||||
{
|
||||
agent.source = "..."; # One of "nixpkgs", "flake", "override"
|
||||
lib.version = "...";
|
||||
pkgs.version = "...";
|
||||
}
|
||||
'';
|
||||
};
|
||||
nixUserIsTrusted = lib.mkOption {
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether the agent's user should be considered trusted by Nix.
|
||||
'';
|
||||
type = types.bool;
|
||||
default = lib.elem agent.user systemConfig.nix.settings.trusted-users;
|
||||
};
|
||||
workDirectory = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
The directory in which temporary subdirectories are created for task state. This includes sources for Nix evaluation.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.baseDirectory + "/work";
|
||||
defaultText = lib.literalExpression ''baseDirectory + "/work"'';
|
||||
};
|
||||
staticSecretsDirectory = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
This is the default directory to look for statically configured secrets like `cluster-join-token.key`.
|
||||
|
||||
See also `clusterJoinTokenPath` and `binaryCachesPath` for fine-grained configuration.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.baseDirectory + "/secrets";
|
||||
defaultText = lib.literalExpression ''baseDirectory + "/secrets"'';
|
||||
};
|
||||
clusterJoinTokenPath = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Location of the cluster-join-token.key file.
|
||||
|
||||
You can retrieve the contents of the file when creating a new agent via
|
||||
<https://hercules-ci.com/dashboard>.
|
||||
|
||||
As this value is confidential, it should not be in the store, but
|
||||
installed using other means, such as agenix, NixOps
|
||||
`deployment.keys`, or manual installation.
|
||||
|
||||
The contents of the file are used for authentication between the agent and the API.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.staticSecretsDirectory + "/cluster-join-token.key";
|
||||
defaultText = lib.literalExpression ''staticSecretsDirectory + "/cluster-join-token.key"'';
|
||||
};
|
||||
binaryCachesPath = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Path to a JSON file containing binary cache secret keys.
|
||||
|
||||
As these values are confidential, they should not be in the store, but
|
||||
copied over using other means, such as agenix, NixOps
|
||||
`deployment.keys`, or manual installation.
|
||||
|
||||
The format is described on <https://docs.hercules-ci.com/hercules-ci-agent/binary-caches-json/>.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.staticSecretsDirectory + "/binary-caches.json";
|
||||
defaultText = lib.literalExpression ''staticSecretsDirectory + "/binary-caches.json"'';
|
||||
};
|
||||
secretsJsonPath = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Path to a JSON file containing secrets for effects.
|
||||
|
||||
As these values are confidential, they should not be in the store, but
|
||||
copied over using other means, such as agenix, NixOps
|
||||
`deployment.keys`, or manual installation.
|
||||
|
||||
The format is described on <https://docs.hercules-ci.com/hercules-ci-agent/secrets-json/>.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.staticSecretsDirectory + "/secrets.json";
|
||||
defaultText = lib.literalExpression ''staticSecretsDirectory + "/secrets.json"'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
labels =
|
||||
let
|
||||
mkIfNotNull = x: lib.mkIf (x != null) x;
|
||||
in
|
||||
{
|
||||
nixos = {
|
||||
inherit (systemConfig.system.nixos)
|
||||
release
|
||||
codeName
|
||||
tags;
|
||||
configurationRevision = mkIfNotNull systemConfig.system.configurationRevision;
|
||||
label = mkIfNotNull systemConfig.system.nixos.label;
|
||||
systemName = mkIfNotNull systemConfig.system.name;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
10
cluster/services/hercules-ci-multi-agent/orgs/max.nix
Normal file
10
cluster/services/hercules-ci-multi-agent/orgs/max.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
{ config, lib, depot, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.hercules-ci-agents.max = {
|
||||
settings = {
|
||||
clusterJoinTokenPath = config.age.secrets.hci-token-max.path;
|
||||
binaryCachesPath = config.age.secrets.hci-cache-config-max.path;
|
||||
};
|
||||
};
|
||||
}
|
10
cluster/services/hercules-ci-multi-agent/orgs/nixpak.nix
Normal file
10
cluster/services/hercules-ci-multi-agent/orgs/nixpak.nix
Normal file
|
@ -0,0 +1,10 @@
|
|||
{ config, lib, depot, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.hercules-ci-agents.nixpak = {
|
||||
settings = {
|
||||
clusterJoinTokenPath = config.age.secrets.hci-token-nixpak.path;
|
||||
binaryCachesPath = config.age.secrets.hci-cache-config-nixpak.path;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
{ config, lib, depot, pkgs, ... }:
|
||||
|
||||
{
|
||||
age.secrets.hci-effects-secrets-private-void = {
|
||||
file = ../secrets/hci-effects-secrets-private-void.age;
|
||||
owner = "hci-private-void";
|
||||
group = "hci-private-void";
|
||||
};
|
||||
services.hercules-ci-agents.private-void = {
|
||||
settings = {
|
||||
clusterJoinTokenPath = config.age.secrets.hci-token-private-void.path;
|
||||
binaryCachesPath = config.age.secrets.hci-cache-config-private-void.path;
|
||||
secretsJsonPath = config.age.secrets.hci-effects-secrets-private-void.path;
|
||||
};
|
||||
};
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,11 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A IGECbw4weSSXVbGlVh5FThXvmbSKspBUvrA0WlN9dU4
|
||||
O8YzszymNmB7TPZJDZ1HP2qL6X02MlCgz2ZluHU11+k
|
||||
-> ssh-ed25519 5/zT0w 7N+azK2aQ99WJy+VwAYP4gWYUOKaLZ+ojD35brLaXyc
|
||||
Sf61lodtjhytPL90fWpkjxCf4WBQ/uLi0NEC/lijoCg
|
||||
-> ssh-ed25519 d3WGuA 9Xwa9e1ICTKkkLALKTxDQXTuBnEiQyt/RC6ybhXvp2U
|
||||
j3UWevFXjtH4FL+qm7rP/2XSbZ2TsE/NRL8nq+z14ds
|
||||
-> xI-grease h
|
||||
mG4MthDomNv5hS0OFa6pzl8esK7aMcFcUtU2PA
|
||||
--- /T1g1Q/J26DxpbGzpXyORZvQK4uO37LLLPAfL1VlETQ
|
||||
t%¢”$—Ž<E28094>ÔP§ÈÛtXScs5yï‡n¤¬F•·7H”n-ù‹gEU¬0é4ÈkfcÊZðÒßasÈÃ
½v Zl-;'e<>C4ß{‚‚ &)Ôõ`$CY‹<>W}¥/Ëåsg+\apn4I?tfù鸎„‹’¼x—j£¯¸<C2AF>‚k;rúíÄ/ú
|
|
@ -0,0 +1,12 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A XV9fjixDzjYkcTAh/uAWS+vbvqe19HhF1D3ak1g1jiE
|
||||
t5PEwAn+I4bJN27fYEZVZQh/SVxQocBMxqxc1O5CCgE
|
||||
-> ssh-ed25519 5/zT0w 0KuTIG51h+oX3QWZukAjoBVHXE6NxKBcSfDN9u/A2H8
|
||||
SGm8Eh5L5ELB3gjmV5pfh3HqDnGrdif0I7mF7ulabW4
|
||||
-> ssh-ed25519 YIaSKQ bjHZIN85glRN0hdH76iu7kg243enfH6VlX8Yr54FfzM
|
||||
0RzxbV9ABYElM2DIfimkvzeVuhobpsiDTH39PgVDTvE
|
||||
-> 8&39JRT-grease {)Hfc"$ |,#c1\: Vf>^[!hm ;2o>+a"M
|
||||
y9JRDuvO1YC61IhxUofWLAYfOEldTR9/SwnGuo7lAbAp8smTrlWO2qVe3Ztp+gQU
|
||||
NXZ9K3PaKKm1VWg
|
||||
--- p75QmGUUBK5sNhkG6zDmEGa5injwKH119i6bHod55+Y
|
||||
ªzÕïMç(_b*öj6\Ý‹1CgÁ?
ãЂAvÝA¯tŠöx_¶H<>¯èïø‹ø 5Ï'Ófú
*÷Mb¹«„ëŽF¦Ït®,)6ýÈÒÿQf)˜ú¿²šHgBQ~-7<>•RþçÚ‘?°¡ÑRUßÓòPõÛ(îU¾W>'¨àF«ˆ’B²§ÿOV<4F>¸Ÿ<C2B8>¤
|
1461
cluster/services/idm/backports/pam.nix
Normal file
1461
cluster/services/idm/backports/pam.nix
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue