Compare commits
No commits in common. "master" and "d2n-bug-symlink-bin" have entirely different histories.
master
...
d2n-bug-sy
573 changed files with 4853 additions and 19903 deletions
11
.github/dependabot.yml
vendored
11
.github/dependabot.yml
vendored
|
@ -1,11 +0,0 @@
|
|||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip" # See documentation for possible values
|
||||
directory: "/packages/servers/reflex-cache" # Location of package manifests
|
||||
schedule:
|
||||
interval: "weekly"
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -1,7 +1,4 @@
|
|||
/wip
|
||||
/result
|
||||
result
|
||||
result-*
|
||||
**/.direnv/
|
||||
.data/
|
||||
.cache/
|
||||
.nixos-test-history
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
perSystem = {
|
||||
options.catalog = lib.mkOption {
|
||||
type = with lib.types; lazyAttrsOf (lazyAttrsOf (lazyAttrsOf (submodule ./target.nix)));
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
{ lib, name, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
description = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
actions = lib.mkOption {
|
||||
type = with lib.types; lazyAttrsOf (submodule {
|
||||
options = {
|
||||
description = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
command = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
};
|
||||
|
||||
packages = lib.mkOption {
|
||||
type = with lib.types; listOf package;
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
});
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
{
|
||||
imports = [
|
||||
./services.nix
|
||||
./secrets.nix
|
||||
];
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
{ config, lib, withSystem, ... }:
|
||||
|
||||
let
|
||||
inherit (config) cluster hours;
|
||||
in
|
||||
|
||||
{
|
||||
perSystem = { config, pkgs, system, ... }: {
|
||||
catalog.cluster = {
|
||||
secrets = lib.pipe cluster.config.services [
|
||||
(lib.mapAttrsToList (svcName: svcConfig: lib.mapAttrsToList (secretName: secretConfig: {
|
||||
name = "${svcName}/${secretName}";
|
||||
value = {
|
||||
description = "Cluster secret '${secretName}' of service '${svcName}'";
|
||||
actions = let
|
||||
agenixRules = builtins.toFile "agenix-rules-shim.nix" /*nix*/ ''
|
||||
builtins.fromJSON (builtins.readFile (builtins.getEnv "AGENIX_KEYS_JSON"))
|
||||
'';
|
||||
|
||||
mkKeys = secretFile: nodes: builtins.toFile "agenix-keys.json" (builtins.toJSON {
|
||||
"${secretFile}".publicKeys = (map (hour: hours.${hour}.ssh.id.publicKey) nodes) ++ cluster.config.secrets.extraKeys;
|
||||
});
|
||||
|
||||
setupCommands = secretFile: nodes: let
|
||||
agenixKeysJson = mkKeys secretFile nodes;
|
||||
in ''
|
||||
export RULES='${agenixRules}'
|
||||
export AGENIX_KEYS_JSON='${agenixKeysJson}'
|
||||
mkdir -p "$PRJ_ROOT/cluster/secrets"
|
||||
cd "$PRJ_ROOT/cluster/secrets"
|
||||
'';
|
||||
in (lib.optionalAttrs (secretConfig.generate != null) {
|
||||
generateSecret = {
|
||||
description = "Generate this secret";
|
||||
command = if secretConfig.shared then let
|
||||
secretFile = "${svcName}-${secretName}.age";
|
||||
in ''
|
||||
${setupCommands secretFile secretConfig.nodes}
|
||||
${withSystem system secretConfig.generate} | agenix -e '${secretFile}'
|
||||
'' else lib.concatStringsSep "\n" (map (node: let
|
||||
secretFile = "${svcName}-${secretName}-${node}.age";
|
||||
in ''
|
||||
${setupCommands secretFile [ node ]}
|
||||
${withSystem system secretConfig.generate} | agenix -e '${secretFile}'
|
||||
'') secretConfig.nodes);
|
||||
};
|
||||
}) // (if secretConfig.shared then let
|
||||
secretFile = "${svcName}-${secretName}.age";
|
||||
in {
|
||||
editSecret = {
|
||||
description = "Edit this secret";
|
||||
command = ''
|
||||
${setupCommands secretFile secretConfig.nodes}
|
||||
agenix -e '${secretFile}'
|
||||
'';
|
||||
};
|
||||
} else lib.mapAttrs' (name: lib.nameValuePair "editSecretInstance-${name}") (lib.genAttrs secretConfig.nodes (node: let
|
||||
secretFile = "${svcName}-${secretName}-${node}.age";
|
||||
in {
|
||||
description = "Edit this secret for '${node}'";
|
||||
command = ''
|
||||
${setupCommands secretFile [ node ]}
|
||||
agenix -e '${secretFile}'
|
||||
'';
|
||||
})));
|
||||
};
|
||||
}) svcConfig.secrets))
|
||||
lib.concatLists
|
||||
lib.listToAttrs
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (config) cluster flake;
|
||||
in
|
||||
|
||||
{
|
||||
perSystem = { config, pkgs, ... }: {
|
||||
catalog.cluster = {
|
||||
services = lib.mapAttrs (name: svc: {
|
||||
description = "Cluster service: ${name}";
|
||||
actions = let
|
||||
mkDeployAction = { description, agents }: {
|
||||
inherit description;
|
||||
packages = [
|
||||
config.packages.cachix
|
||||
pkgs.tmux
|
||||
];
|
||||
command = let
|
||||
cachixDeployJson = pkgs.writeText "cachix-deploy.json" (builtins.toJSON {
|
||||
agents = lib.genAttrs agents (name: builtins.unsafeDiscardStringContext flake.nixosConfigurations.${name}.config.system.build.toplevel);
|
||||
});
|
||||
in ''
|
||||
set -e
|
||||
echo building ${toString (lib.length agents)} configurations in parallel
|
||||
tmux new-session ${lib.concatStringsSep " split-window " (
|
||||
map (host: let
|
||||
drvPath = builtins.unsafeDiscardStringContext flake.nixosConfigurations.${host}.config.system.build.toplevel.drvPath;
|
||||
in '' 'echo building configuration for ${host}; nix build -L --no-link --store "ssh-ng://${host}" --eval-store auto "${drvPath}^*"'\; '') agents
|
||||
)} select-layout even-vertical
|
||||
|
||||
source ~/.config/cachix/deploy
|
||||
cachix deploy activate ${cachixDeployJson}
|
||||
echo
|
||||
'';
|
||||
};
|
||||
in {
|
||||
deployAll = mkDeployAction {
|
||||
description = "Deploy ALL groups of this service.";
|
||||
agents = lib.unique (lib.concatLists (lib.attrValues svc.nodes));
|
||||
};
|
||||
} // lib.mapAttrs' (group: agents: {
|
||||
name = "deployGroup-${group}";
|
||||
value = mkDeployAction {
|
||||
description = "Deploy the '${group}' group of this service.";
|
||||
inherit agents;
|
||||
};
|
||||
}) svc.nodes;
|
||||
}) cluster.config.services;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
{ lib, depot }:
|
||||
|
||||
lib.evalModules {
|
||||
specialArgs = {
|
||||
inherit depot;
|
||||
};
|
||||
modules = [
|
||||
# Arbitrary variables to reference across multiple services
|
||||
./lib/vars
|
||||
|
||||
# Cluster-level port-magic
|
||||
../modules/port-magic
|
||||
|
||||
./lib/services.nix
|
||||
./lib/inject-nixos-config.nix
|
||||
./lib/port-magic-multi.nix
|
||||
./lib/mesh.nix
|
||||
./lib/secrets.nix
|
||||
./lib/testing.nix
|
||||
./lib/lib.nix
|
||||
|
||||
./import-services.nix
|
||||
];
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
let
|
||||
svcs' = builtins.readDir ./services;
|
||||
svcs = lib.filterAttrs (_: type: type == "directory") svcs';
|
||||
loadService = ent: import ./services/${ent};
|
||||
in {
|
||||
imports = map loadService (builtins.attrNames svcs);
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
with lib;
|
||||
|
||||
{
|
||||
options.out = mkOption {
|
||||
description = "Output functions.";
|
||||
type = with types; lazyAttrsOf (functionTo raw);
|
||||
default = const [];
|
||||
};
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
options.lib = {
|
||||
forService = lib.mkOption {
|
||||
description = "Enable these definitions for a particular service only.";
|
||||
type = lib.types.functionTo lib.types.raw;
|
||||
readOnly = true;
|
||||
default = service: lib.mkIf (!config.simulacrum || lib.any (s: s == service) config.testConfig.activeServices);
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
hostLinks = lib.pipe config.services [
|
||||
(lib.filterAttrs (_: svc: svc.meshLinks != {}))
|
||||
(lib.mapAttrsToList (svcName: svc:
|
||||
lib.mapAttrsToList (groupName: links:
|
||||
lib.genAttrs svc.nodes.${groupName} (hostName: lib.mapAttrs (_: cfg: { ... }: {
|
||||
imports = [ cfg.link ];
|
||||
ipv4 = config.vars.mesh.${hostName}.meshIp;
|
||||
}) links)
|
||||
) svc.meshLinks
|
||||
))
|
||||
(map lib.mkMerge)
|
||||
lib.mkMerge
|
||||
];
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
options.hostLinks = mkOption {
|
||||
type = types.attrsOf (types.attrsOf (types.submodule ../../modules/port-magic/link.nix));
|
||||
description = "Port Magic links, per host.";
|
||||
default = {};
|
||||
};
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
options.secrets = {
|
||||
extraKeys = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = "Additional keys with which to encrypt all secrets.";
|
||||
default = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL5C7mC5S2gM0K6x0L/jNwAeQYbFSzs16Q73lONUlIkL max@TITAN"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMmdWfmAs/0rno8zJlhBFMY2SumnHbTNdZUXJqxgd9ON max@jericho"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
{ config, lib, name, ... }:
|
||||
with lib;
|
||||
|
||||
let
|
||||
filterGroup = group: hostName: builtins.filter (x: x != hostName) group;
|
||||
serviceName = name;
|
||||
in
|
||||
|
||||
{
|
||||
imports = [
|
||||
./services/secrets.nix
|
||||
];
|
||||
|
||||
options = {
|
||||
nodes = mkOption {
|
||||
description = ''
|
||||
Groups of worker machines to run this service on.
|
||||
Allows for arbitrary multi-node constructs, such as:
|
||||
* 1 master, N workers
|
||||
* N masters, M workers
|
||||
* N nodes
|
||||
* 1 node
|
||||
* X evaluators, Y smallBuilders, Z bigBuilders
|
||||
etc.
|
||||
'';
|
||||
type = with types; lazyAttrsOf (oneOf [ str (listOf str) ]);
|
||||
default = [];
|
||||
};
|
||||
otherNodes = mkOption {
|
||||
description = "Other nodes in the group.";
|
||||
type = with types; lazyAttrsOf (functionTo (listOf str));
|
||||
default = [];
|
||||
};
|
||||
nixos = mkOption {
|
||||
description = "NixOS configurations per node group.";
|
||||
type = with types; attrs;
|
||||
default = {};
|
||||
};
|
||||
meshLinks = mkOption {
|
||||
description = "Create host links on the mesh network.";
|
||||
type = types.attrsOf (types.attrsOf (types.submodule {
|
||||
options = {
|
||||
link = mkOption {
|
||||
type = types.deferredModule;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = {};
|
||||
};
|
||||
simulacrum = {
|
||||
enable = mkEnableOption "testing this service in the Simulacrum";
|
||||
deps = mkOption {
|
||||
description = "Other services to include.";
|
||||
type = with types; listOf str;
|
||||
default = [];
|
||||
};
|
||||
settings = mkOption {
|
||||
description = "NixOS test configuration.";
|
||||
type = types.deferredModule;
|
||||
default = {};
|
||||
};
|
||||
augments = mkOption {
|
||||
description = "Cluster augments (will be propagated).";
|
||||
type = types.deferredModule;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
config.otherNodes = builtins.mapAttrs (const filterGroup) config.nodes;
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
with lib;
|
||||
|
||||
let
|
||||
getHostConfigurations = hostName: svcName: svcConfig: let
|
||||
serviceConfigs =
|
||||
lib.mapAttrsToList (groupName: _: svcConfig.nixos.${groupName})
|
||||
(lib.filterAttrs (_: lib.elem hostName) svcConfig.nodes);
|
||||
|
||||
secretsConfig = let
|
||||
secrets = lib.filterAttrs (_: secret: lib.any (node: node == hostName) secret.nodes) svcConfig.secrets;
|
||||
in {
|
||||
age.secrets = lib.mapAttrs' (secretName: secretConfig: {
|
||||
name = "cluster-${svcName}-${secretName}";
|
||||
value = {
|
||||
inherit (secretConfig) path mode owner group;
|
||||
file = ../secrets/${svcName}-${secretName}${lib.optionalString (!secretConfig.shared) "-${hostName}"}.age;
|
||||
};
|
||||
}) secrets;
|
||||
|
||||
systemd.services = lib.mkMerge (lib.mapAttrsToList (secretName: secretConfig: lib.genAttrs secretConfig.services (systemdServiceName: {
|
||||
restartTriggers = [ "${../secrets/${svcName}-${secretName}${lib.optionalString (!secretConfig.shared) "-${hostName}"}.age}" ];
|
||||
})) secrets);
|
||||
};
|
||||
in serviceConfigs ++ [
|
||||
secretsConfig
|
||||
];
|
||||
|
||||
introspectionModule._module.args.cluster = {
|
||||
inherit (config) vars;
|
||||
inherit config;
|
||||
};
|
||||
in
|
||||
|
||||
{
|
||||
options.services = mkOption {
|
||||
description = "Cluster services.";
|
||||
type = with types; attrsOf (submodule ./service-module.nix);
|
||||
default = {};
|
||||
};
|
||||
|
||||
config.out = {
|
||||
injectNixosConfigForServices = services: hostName: (lib.flatten (lib.mapAttrsToList (getHostConfigurations hostName) (lib.getAttrs services config.services))) ++ [
|
||||
introspectionModule
|
||||
];
|
||||
|
||||
injectNixosConfig = config.out.injectNixosConfigForServices (lib.attrNames config.services);
|
||||
};
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
{ lib, name, ... }:
|
||||
|
||||
let
|
||||
serviceName = name;
|
||||
in
|
||||
|
||||
{
|
||||
options.secrets = lib.mkOption {
|
||||
type = lib.types.lazyAttrsOf (lib.types.submodule ({ config, name, ... }: {
|
||||
options = {
|
||||
shared = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Whether this secret should be the same on all nodes.";
|
||||
};
|
||||
|
||||
nodes = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
default = [ ];
|
||||
};
|
||||
|
||||
generate = lib.mkOption {
|
||||
type = with lib.types; nullOr (functionTo str);
|
||||
description = "Command used to generate this secret.";
|
||||
default = null;
|
||||
};
|
||||
|
||||
path = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/run/agenix/cluster-${serviceName}-${name}";
|
||||
};
|
||||
|
||||
mode = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "0400";
|
||||
};
|
||||
|
||||
owner = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "root";
|
||||
};
|
||||
|
||||
services = lib.mkOption {
|
||||
type = with lib.types; listOf str;
|
||||
description = "Services to restart when this secret changes.";
|
||||
default = [];
|
||||
};
|
||||
};
|
||||
}));
|
||||
default = {};
|
||||
};
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
simulacrum = lib.mkOption {
|
||||
description = "Whether we are in the Simulacrum.";
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
};
|
||||
testConfig = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
readOnly = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
{ lib, ... }:
|
||||
with lib;
|
||||
{
|
||||
options.vars = mkOption {
|
||||
description = "Miscellaneous variables.";
|
||||
type = types.attrs;
|
||||
default = {};
|
||||
};
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./catalog
|
||||
./simulacrum/checks.nix
|
||||
];
|
||||
|
||||
options.cluster = lib.mkOption {
|
||||
type = lib.types.raw;
|
||||
};
|
||||
|
||||
config.cluster = import ./. {
|
||||
inherit depot lib;
|
||||
};
|
||||
}
|
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 d3WGuA ZLjCSe5wrN6abvvRmQjE+VXtRr+avP/CLPD7djXNr0M
|
||||
g8i9ambJGL2Q+ZLB6c6MxV9BryAgX4qZctJ9qByJ4n8
|
||||
-> ssh-ed25519 P/nEqQ zSGcZuufOAnTkPr74ZjwyISdLlfxBxqgmyWivxq1/Uo
|
||||
gArusBfIfsZ5/gwMYHLzDHTbgVGWDttbi0IAhvclRO4
|
||||
-> ssh-ed25519 YIaSKQ J4Fy0VSjdMPRgzysQptIUKiRR0TAgu0q1BYhtIpGkWU
|
||||
kKzmF3OUbGU40d33R15nMraUDZiFRoz9Z00XjjSk9Jw
|
||||
-> ssh-ed25519 NO562A BNQV8JodzTiNs/V+rFQxcsrhKJ3nRIFtWk6VxHzCRio
|
||||
ZyauAdOrPbADSDdBQoB+39MB2r7Ro4d0XwZIjf2z9Jo
|
||||
-> ssh-ed25519 5/zT0w hdMuyOmNKTlMKPn4w9VQFVXZkJNm1XSPAZ/Zip5WW04
|
||||
wcnur+BRQPqKzpV3vl7pn1VIGRK3GxQEUaQIefrZuI4
|
||||
--- 5AdxXgFmDm2w012QjpJ3gqlbfvkPm8fkEJjm8kV18G0
|
||||
&Ãf§äIT¼-ÿY!ŒÍ,Vu<56>Â9õÿöBFrœŠ´½4–ù™BÕÝ/®UäH˜rþ¸ž #ƒˆç
ÄÝÕº†®UóQ¢ÿŽx$G{ÅŠMà2¡^/˜§¥Éè?12É¿t1©¿í¸&[}nêDAÛlýÑýˆ8uG®éZŽ×b¯èàîåd:@ÿ!Õþ
jîƒÚáÈNµrâlA³~
|
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A GNlG6hVK8HfQKEWmMJwQ3qhmZOv9zSVWP6V7/5LVslc
|
||||
8i6bXmEmU8T9lApB0avHZublAUZiT3wHxmM5CUYxMo4
|
||||
-> ssh-ed25519 5/zT0w emP71+/eiA/GQ7EUekXlcXdQpL3yNVT3llw5hGNerXI
|
||||
gQ9QYqo3/V7AnQjK1MYOclsVX0B2Yg8QLqs5tTaYBFY
|
||||
-> ssh-ed25519 d3WGuA I2JHyhEO3xb9rniTY10FTujaWRDLAtChR7SQzbSw3nU
|
||||
AsNx/YxGHOTuon/ZEyu+s9zJ+OmELXFwcnRyu/XLlp4
|
||||
-> c[ehZ89-grease "^$r q6K1MR <4 '!b
|
||||
L/iRQ+g
|
||||
--- rH4ZWJU4EIRFC4ffXvBbnYS7Y/khTCu2Bu1SJHrOhcs
|
||||
?+1/}$qxë<78>ÕF<C395><w/ÑÑ<>a–y•$º”Žæ{V<>Ômñ¿±yšèKÝl¥¬ÔݦUÛGf(³9tiŒ™^%y¬‰cî`šµ…{»dÒý\T0†GÒXkç*lë;*‚,8AŒªa4'ŠQÍ«Íi¿™yĹåÌZè?¬Ó#ˆ]3}5iÁÉ|\ßØã®p©Ó½¨mÆ—xDæÌ= Àx”³ï#<23>Òƺ±Ÿêã<øWOÈE®v‡P!zÙÛÅÙsñLÃK
|
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A ZP3yjzSbThB4TLzZTQ/numJtFThdJWSe/Q9BMgM1bVQ
|
||||
6OuYa43hZFSWdBISWTFkWNQGhmBbCdJgu6anJA3Zqh0
|
||||
-> ssh-ed25519 5/zT0w YmDcssGTmOawG7ajGWaprSO2wAiYJeTv4MPTmtRIvh0
|
||||
CJungpLxidWgJTe3vyMpryBpnIGotKCuC1KUlQhhYRs
|
||||
-> ssh-ed25519 TCgorQ sVuFjKWVxsFbmzn+jyiW8psOzTneUQGmCZbzJ7/XLRg
|
||||
1vRKXRWxsQ3BceWYbqxerbFz4IO5U0sF93G6dLGjzgk
|
||||
-> 1W#:-grease lN~;YPE^ YAa8 7s BEq(."'
|
||||
HKosiz64wAOAc4QckSNsMC6i4Bq6uxTBuPttJoaUOrJ9sWAL4K8aY8s
|
||||
--- ucYe/fF2tsm2+9HmTOnFLSt6VN3F+gNkXcxYDMWn1bY
|
||||
œó@p¢v/ÆPR[ÿ§»—€d®ºÂüÍå,mµ[/ú@pLË›ê}¯·‚*tÐïÞd|Ô-9!>l}"âöÈÏß›Ê/gÛôL‰Dã<44>“»hÛÝE©<>×»e’9+Ãdï<64>ÒyÀ<79>f½xn‹ŠFÂð;‡ókÌkNè÷æ™9lƒœÍeñŠß—tègŠ®ñ&•0Ë%Ø<1ÌÔôìHaйA™3$ÓôØ`Þ?b¹!¼Å’?<3F>EdÝ7ú¦¸Ÿw
|
Binary file not shown.
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A ejtj6uy0U2VGcZ/TL5Izny/xZ2UyqajHJnPoelogenw
|
||||
JRcAb/P/QxYbVFwmnlqqnEQHOkMTepjgFHEmr7CyCYk
|
||||
-> ssh-ed25519 5/zT0w XyKUhRUuw3jVxEfImymDRv6Yds8IP885AGk1hRdZ4Rc
|
||||
5IrW0varzs44P+25vQe1+88oVNyXTnmzpytO8f2hX4s
|
||||
-> ssh-ed25519 YIaSKQ NKvJ4j+UQk6vdddf5YuGlIxlZPiUY1JdD611RQD2vlQ
|
||||
kkGF2wR5hoOpWM34/48I9EAM0kMI+VZSfOcal5ikGKY
|
||||
-> q9os-grease #6 _f|$T F4q*O ",M\
|
||||
sQRm7N0k+xtMD0a/lg7bif11LYTmo72t/+a3OfIwsXKUInz7Mij21ZMhkBS9NEpg
|
||||
ep7ywonuBh0Sb5ro2FNmcw9tm2p+qQ0/lLeDHCDBsi9rEcC0RD0uxHEJQbykxQ
|
||||
--- W71uGICSIj89KLvZDEVB02LtjNOa6vM9sEfUAk2VltI
|
||||
—å®!#jŠªúÝ<}Åêí‚ñ¿uü£ L•6 ©MãϼVé…`À<1C>rºE£ãÊØ,)>õ<>½ÏC¿Ÿ3¤‡Yèù€T^@s@š²%b³5îýG¼[msF»t+†Î©Ó•&kg*4[úøÊ.æ`Ѭ&ù• *Ÿ§×óÇøÄTvÄÕ³è 8μNöRó‰‚á¿Žsª7{›}Š]y "ŠÝ!E|í0°´Nýyiý'úsöÌî:<3A>
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,9 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A jNUNRaIQC1DUBiacnWc3xjMUAxnAgiyJhRA74cof3Ec
|
||||
oZZq1AQ3F0nvrk7KpinLWgT3cIoCYZ5R1s0us69OI8E
|
||||
-> ssh-ed25519 5/zT0w FmoxaTg75/xaDmSOHL5Xs6QOr5rLG/sr5TmPMfkOqxw
|
||||
XXQrFxxt5GOzXgJoPY8U71NSYi/IWmL3QrenvOuQ43Q
|
||||
-> ssh-ed25519 YIaSKQ ++dqG+dr8ie+4sPW7L+eVkXvOVvM+/oBR722S2sQsSg
|
||||
879pmnhOtZ/MiMUwDlyujykQXNmCepI2FSU2QcvvkrA
|
||||
--- QcvlVdv2fYMKmT/aCpTjdmGJ+9KnUvZCZNtl7WhgCbw
|
||||
ï!jÊwŸ~×f%ÝJ>˜H ³·2ü9¬¥.VhþÅ·<C385>²«O!$iÄ<>ÝžÔ<>4\_̆J¸„šÀT>²J£‘î8Y´\ÁI³kÕýïk—tŒG(ÃAO¦#Ùš“#Ü(·LœøÍáô’0éh=[ÈRîw•Uj¸iVý2ÁÕ(ìÊBGgÔ„^L7fÍÊ«"zVµ<56>nË)ÑõË÷9½ï›<IäõúÃÍw1Š
|
Binary file not shown.
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A eB0Rkoz721eI1UlyAhHWIrBnTEFoh6z3UL24EljaNzA
|
||||
dNsoal+y68XM4HXRyg1PUmrWilW1n3h78TmTcqHFEjc
|
||||
-> ssh-ed25519 5/zT0w SF16JelBZe0vZtzNEHiEfprJOqzoyxhTH3ldQdbo5wE
|
||||
95wJNWQEGqHj4Pknnk1RrgWPOqZOhlNsSvFTv8rfc08
|
||||
-> ssh-ed25519 YIaSKQ 68vS4sQGTDEaTVVxfs/xeTv379MQ3JE7iyLb1PbUuis
|
||||
1Bh53X0QFednXw74lQ+FbqNDkLBra9rx6nOybcD3FiQ
|
||||
--- HIcPirpTTtlUUGEemDXND/nwiWs4BEhM4rYX18mx71E
|
||||
箜_Ÿvw©\ˆ¯j2æVrK(™á2åÚ@ξ€;Y®AQAƒlMÛá[ÙÁW â—ßƀы<v#"ùóBŒ’O€™É^†©¦-ø¡+ž*m}›¦<>ª\“ª¡gÒ¹'kÓ2I~T¾w’M|¼jó¬˜+*BÖ%æ°xx‘‘€Ó¸õ{Ž O™;Fd„M“
|
||||
ÝPÙEB¡mãdBý¡¿¨[•¼í5Þf˜‰ü#öL- ¢³.4gŽ”FnÀ£q¬òv<C3B2>SV¹¥°÷¤êYÉkä·ï@ÓçlRn
|
||||
!¸'mÿSGìqóÊÖ“0dY1ïL!Jñðä üIÿw
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A 2Su0u03W90TKuR0wg1/dcokIbTzO5eATBmkFPyqfJG0
|
||||
IhBAWy5YYFFOqG9hc+AkVrKewTls84SFV9Kz/lOTV2U
|
||||
-> ssh-ed25519 5/zT0w YsyFCW1FsiGwiYJNYCITlLWk6Y5dR3K5v+gJqlsWQTg
|
||||
vtR1GCT2zrHNco/yPvMqQmlPyDja53lSRsO1DmnCSlo
|
||||
-> ssh-ed25519 P/nEqQ c8l4fOuvZn9V8+6vpRpGNGldEi4iA+5qVg1B+jArU1w
|
||||
zgS0urO8MZYo8cZq5Nz/R1x9cZ0vZgppJx6X5UecJ0s
|
||||
-> ?^lS,zDo-grease ^ZMN! V*+oK^9 GyJ[
|
||||
ZATLlHQ+kFjStI2ykQXq+KhvAR+XeW+POj6cJ59awzpMwq8JGbyaE1m5Cq8XA6u3
|
||||
xFE6
|
||||
--- 3JfCfv5CJYKGuvnbvoymhHhfkM99NkYMdGOL3Xicga8
|
||||
ðíçqÂ`ë#Ññ›„oq6üÄÑZÃõ˜<>Žh$wH"©läNøØ£¨IÛL3ä¯uY‹WŽœ<11>›T¹À*G<>GÂx¦nD2IÈ
ù«y+]ßT{gäð©<C3B0>ìÓinœÖÈçßEa¥ìœk¸zοP ”M…
|
Binary file not shown.
|
@ -1,13 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A a2L3S1TjwcnIzL1HIrpwJSOE8Eg6hpgqWsFdXM0F7D8
|
||||
+1ZQR5AVTdyc3G47yBc17qCHVfHAChc8xgo5/dPiCR0
|
||||
-> ssh-ed25519 5/zT0w VGbJ45NOODZu+z0ZoKibiTcKmHn1kSl9eE2Hl+nC2jA
|
||||
U8L3eqGtIqM+QLFUKuARoQ527BGCy3Unba4bAmspnRA
|
||||
-> ssh-ed25519 ygudAg 3arSWho21Yv0hZlO1xAdtWlqY/ZS4mYzbRzYicd061A
|
||||
gvMaXTeJ4zit0WxwMhX2nBCGQF4lJ7MxM9RBOEqSqA4
|
||||
-> }Y-grease
|
||||
MmJNND/ycD5UGgt80hcIhay+fUQKmI3thTj6u6rx3KrPMxsW6tDTd/sYJYmZBy4k
|
||||
oOuaS+ZeG3pM4Qy9tAFkKh4q2gbwx1mNbysy3GeQIQ
|
||||
--- 1k5+oNWHtRqQC5kr04aCxaKglweG5Nc9aKkXB1JnQfg
|
||||
<EFBFBD>€;ÌK…dòët¶¥0ŸMf›
SËWQý{E,ZäÔ¸ ᪗H›]ô-B¸îª,ˆôkaåþa§D5H¶¢.›ø
|
||||
†úéM \›æØ<C3A6>ÖêÀo¾°=ŒÕ®¤†dt‰x½(ÅÜf/¡ì<>;K§Û\?FLŸ
|
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A 9Os91rQ4j/7/AyLMi2bngHI6aEln1Ij1rJh63xPjeQA
|
||||
cpmJRRIL+j9wHYbNSLzbXmpnZAc40+Og1vcWGyJMUkM
|
||||
-> ssh-ed25519 5/zT0w vajc7L8iJoodwX4oIgYyY/TAd0TWUNL2wl6wMyeNLi4
|
||||
QMe/bKmjUypzQHDdxoTkA/HDZypF+hByf99bahE73EU
|
||||
-> ssh-ed25519 FfIUuQ 7pwwH1jSFSNayCLUk8lir1UKOyunozrXHDA4vYqLQjo
|
||||
LsMeAhUGlZCNipaECYWE2oHPku8otsAFHV9GWIrtOg0
|
||||
-> s*r|b-grease Yu M>1\\ M!frVhk%
|
||||
jub17NjQWtGOyIFnF5na4ize1ifOjv6Nv6aqAa+ZJQHREUjPr2D7Rd2Fi6oyIRFo
|
||||
xWV0WDab7iWL
|
||||
--- n432BjqdbuNkeP9eW0TDEUyho88/RRdZ9TUKcWlVsok
|
||||
§â™n<EFBFBD>Ø
€1;U9ì(•koT™®·Ã«·–{}¤nÉêãgüÞnãíÝ€›B½LZj¯°¦zM'“T«ôÞZñÎëAø͸=Ô¾?T(<28>°Ìøæ ¶;<3B>êá“Òظ“ Ø´¦ÝÆ8¿<IÕE8BÊØN6žº$t®›
|
|
@ -1,15 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A nJxCLQ1WNjjDuk+x1SQK6QLxru9EHNnYC9BcTPbnejs
|
||||
FWW5PAcc5kV5H6ImN2tEKZs6wtDVF3Gb8aQdRBU5XPA
|
||||
-> ssh-ed25519 5/zT0w dBOD8ygBCyuH9+Cvg/GN+gCoQMP0StROLaWv7qPOeHc
|
||||
/Omz/8G4H6/wdb8TZOuI7u8fkwvMmKMp0EAsHxayCs0
|
||||
-> ssh-ed25519 d3WGuA t1kgNXHn1VYgbRC3oNDlKDlhlIbTsiS3mBWK9fjTO2g
|
||||
ryak3Nz8t+tJhhx4N3VS9sDV+dpijG0fgiJQOMiTgQc
|
||||
-> ssh-ed25519 YIaSKQ baRoE08c1ncfJOqvdo3KlJCyQcf2tlIl0gdCXqeHARg
|
||||
0sW4yu00A2uLxc3QWNVmlzoxV+6YuouIjkniIVvf/KI
|
||||
-> /Zshlxv%-grease +I-jlP0> *AP?2Ie l-,[cd}
|
||||
8E4RtrINNlnRc3nma3XFC22jfL2L571O4YRgSFQ125jNj9K3RwI1CTag+4Vpkoyn
|
||||
vx7pyk6csFlgpAfi6HjZQhy6SaLDLuivgg+5l7movEQQVFphKvZGXb1b3OXBPnw7
|
||||
|
||||
--- joBBM2uJbY81OgACM4fzeivBoYa+or+txZczR7FHQ28
|
||||
¿Œˆ.zê€k`ô/*ZÌIÕ…ý7àq•+”ã„ÚéÃæÈi<äÝ¢¹Ÿ£?p÷ú&ºÀ¦®„4{Gºƒf=NlͳêüñøNËÔ²`‰.ëIüº<ÐŒ‡#-<2D>\%UZ0Ü°êRU8u
|
Binary file not shown.
|
@ -1,15 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A QOWutzolRXJOngLYzsJTCdLb05XK5bxnIePA12iM/wM
|
||||
Fu9Q2pedDHGx5iIaZX31GMZmLlZlzfub64clEFHeJjA
|
||||
-> ssh-ed25519 5/zT0w WzQIh2tozhSF1p6QRVtMTz//wozc3SFRmmqv+GPPj0Y
|
||||
Ddf1we/UO19IstJe8XL7IU42ZjYCHbxdpwUSsBuNajs
|
||||
-> ssh-ed25519 d3WGuA 7qNzTg58PuEIKTXTCDuFwXUgyetdqReJq3MDOVynHRE
|
||||
cEevu4v+p1e6UNNSn/H1t56ut0xfM5Yok79OnGNtdHU
|
||||
-> ssh-ed25519 YIaSKQ YilDeBJ/ovJgeCbVANV5WIpYL3M17ktKH7Y/ziGcAAg
|
||||
gKrDxm4uNeR7BCbdepxdFYEL61gd2v1HYfiQ1mT02vY
|
||||
-> E~-grease 0;
|
||||
eqHwlFqZ/7Acq6anIU6kFg+rNZnL7el2YIU2DrCugZhKGBbxzqQMjRsq0A0F1z19
|
||||
C903hOEFwD7rGuo
|
||||
--- LyTH8wHncE/blz5X1Pi4A9r8iTmc/FXqofQ5JMNZFlg
|
||||
|
||||
G×íŽaðH¨(gF®1
Ö¯ýÛÏààJ0õÈÒÜ<C392>÷„€Rê;ZÌ3{;Q±p|Pžñ¶à6Ntªùë\pÝÐjgZ$»¤iyÛÅašk£C"U‚ÂQœY<C593>Uñ_Jx
|
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A rUwm3B9bXVr9yQEVb+0T8TESFX3TtQ/36jzACQ/wPjs
|
||||
Z9uNi+t0/0uxQcRrmESjw5y442+YXYTineRJCyeP+Cc
|
||||
-> ssh-ed25519 5/zT0w EqEi9yGubbrohSMbXho2g+Bfs1wlLQ5r3jNmeDHEhzQ
|
||||
pUMrCW/pktQ2e2hrGlaMRMCCzLEQ0StArhZNjoqiJUs
|
||||
-> ssh-ed25519 d3WGuA P5gHDU9MHDe88QmIEX1xLqw07QB0rMtHMThxqCd2IHw
|
||||
TePeD3eny5ptgor08ORKVslB4LOX5ITz1ebssB1F2bw
|
||||
-> X-grease d c4UL V1
|
||||
y+2gDQ
|
||||
--- oEZLM3hETNqGb7gl5COcl8NzEL4029rFRVZWtZ1IjWI
|
||||
0z [©D9B–Yb×WaÇ[
HÍCE:<sHÑ*Ó7ë‡Ñ†ï÷Eñ½‡ƒºp°õó_
˜²ÁýPBÄÅ6Ô<36>)<29>¤OkîGšW<C5A1>Å=Ûˆe;^;ÆÕÐÿ<C390><¤<ÅC&¬w<0B>"T¸ppýJSëpA<70>oõ”Lq*ýò,ÕúU2<[Íœ<C38D>áßÝ<C39F>ܹÖÇLïò7Ï<37>ø¨fºÔ¬ðDZê·X!xž.€++a…{?–¿õrÀÂîð,`ì`£3F$²2oðˆ?l·Kö¼Sâ‚,ÁñM‘‰21£¾åärÓ’(+΃DxŒ7 ŒV„.‹Út7aQ÷bîè»7N=¶PÑä07À…c|Q& ¸j¬²,Q‡¿ Ï£<C38F>*½‰_,’®¿ÿ‚ÿÍýžÃ.᫧w÷I|
|
Binary file not shown.
|
@ -1,15 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A o0R34LvRy19TseKFBi6iJVJSpuPWamIlL1UnX95+yVU
|
||||
9yjfDbf7J9q/L2Z8OkFlOcniYNfO9YJBdtNkLyQAzF4
|
||||
-> ssh-ed25519 5/zT0w AqcfbKIO1vE0TjkDvZOkCcMeRCz5ATfQZyoKecoDWQE
|
||||
beYLRlS/ZzteQ1MNhyGuIenuEHSRqkzYJRasomThBLU
|
||||
-> ssh-ed25519 FfIUuQ 9JeHQPQgOYSzA2cjR6jwisZYPRRYGQMSyOW49LVEo30
|
||||
TAd1otmjEo1CvOVX3gZe2rk6Rk/IEjF2DllpQ9+o6ak
|
||||
-> ssh-ed25519 d3WGuA 1RNgW2d+Nh66Li4hAsP7/3ATZsqHWL+9pwNqPej1ykw
|
||||
tN6e8kBNz4tknvWBrVoQ6nABbeglac6AVUlz32ZFMzA
|
||||
-> ssh-ed25519 P/nEqQ oHqCnNvCWIprBbVy0w2LdHBaJllbNwD+WbdAgIFrhww
|
||||
6Dgnv/HyYaFzAiRUySqYVjBPScLMK8M9Roo8UCqMGEM
|
||||
-> 4Vf|uj93-grease x5,kZ; -xgrjD8
|
||||
6Gw1SIrH9N0GVksuPQm46tD/7Ckn6vkG5Z9CDhu4hj4YO1X8ug
|
||||
--- eo6VHBS0rJXNXA4MFGBtVfJRQ7hNNJ7PMeMjvE1Hms8
|
||||
‘7<EFBFBD>¸ATº<>ÖŸ@OXåø?$ýÛ“XeÞ€<>{T|P†.3;EºÌ3mLÛã"o“´"õcèí—”#ü,"Í¥CtÒô½;¥ÂˆÒ³IÚR FOócD"âúK;¯{HÛÝ×ký™.d[sƒ·/¼R!à‹vk.®®W–‘°ñÿãºóç×<C3A7>ƒ6{Íþ
°òn<C3B2>È_M,¶½¬6o`Óô£?×@…ŠRX¨ñù´€()É<>UPëâ o9qÙFJûˆÆ’ÂúDkŒ#‚{D‰+[pÞu½õÌkúÊÎlMVêm+™ kDiŸ‡ó”l¤œûT=·ji6.ªUS¦–ö³óŽ\Æ-s€¦b«!eɳ‰:¿/—°NgŒSï—«¸
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,11 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A K4GQQWJwXbpc0RCIj7+l6YgmXFNOHRrtIrtuCwEd9FQ
|
||||
9ZKAzhqdmjZ6u/nmDdD1lm7sn+C4orLDYh667twLFrA
|
||||
-> ssh-ed25519 5/zT0w Sbt0FKgTtCbAXTPfJzuXV1Erm88W5s+lm1fzzWq/G0M
|
||||
Dl8xl8DProREk/wcpabRaYwIcM2kQBrE3mM8MD453w8
|
||||
-> ssh-ed25519 d3WGuA QLXbvtQSKYWpQsGISyr7XY6ZrabXN75jAHSorfg4HDg
|
||||
3QZkuHKBEETwrcZVIzn8hOh9r1PCmRUQmMh9xfm+NrY
|
||||
-> |(-grease Y}fl\6J<
|
||||
+IF+TRTiuAuxUwWfA5qPumSSp4bnokwwNECqYVNDWVdiuw0/
|
||||
--- stUqfmRdJG1YQAdEVaZJvM9IfnVShk/f5RQwdmUNkFI
|
||||
サルコル’ZZユ朕ノリ壻萄゙ア閾ヒワlsロ<08><>促褄{0チ<30><EFBE81>ス<EFBFBD>屈 谿UノX沖<1A>lュ瞋d<02>rラt|価uエォ
|
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A rE85lK37XeM803mXkugmTjfAp3LNqKy2yuGGbY4IOAM
|
||||
nDielwqyuaW72OKiUBgFPWK45aZhh768+MskQ5+vhUs
|
||||
-> ssh-ed25519 5/zT0w QxXHVLpk2qeXjO8c3a0cQ1oKk3fUn9+yIoHAK1hLYgQ
|
||||
d4s/F2ck8Z4AsCQReghxj+M0JjBYKoMpfU+K21AzwFg
|
||||
-> ssh-ed25519 TCgorQ lqg5aPJuj5NPEAgAaw52lwpQ++eWPxO4BITdpLKoZFg
|
||||
KS0kRB2K/+/+U2xfr2VE09XdjVvIflTweU93Vy7Okr8
|
||||
-> ?).-grease =%LA 5cVQvduw
|
||||
gs9TPdbaRJVf50LDiUdlg7Vr4LUfg2Kj2bPAbN2f2z4LKDnSbWHkJ6B3EfOMDxTN
|
||||
KmX8mGCi7QBGOfb1EY3h5cDgteBXiLN4aLh6kpCe0F3/DQ
|
||||
--- vLjmBMfCrvOuF1ww5UcHQAmBUo0LgIuJKcNEDlOCZ3g
|
||||
ß<EFBFBD>&„îd!¾Žqƒ©H<oÄžˆ×“5ç屃бÝ0&Ÿ<>ý⬻¯3~Ù´ð5Œ<35>§Ž÷Ñ¡“Ko)å6³üWÜ°‹
|
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A BNIU8M5X5C4LSiie6S4zVraFQAsyGKAv7BwLVIXHiFM
|
||||
LLcXZ7tiTUnN+tJLwqqs1hLZ8usCDWqNVGr1lAn5OQs
|
||||
-> ssh-ed25519 5/zT0w H/SGf0oYVg/JCd07bicWL1LWQwExr0gbi+gV1j7Fy2M
|
||||
yHjguPtS8ItpY+pAR3lLVpXQxq7d3cuQYU5DHs2qjMc
|
||||
-> ssh-ed25519 P/nEqQ z1us0mTbOuLrkI7n6doG+JVFAuqwZvC0dEfdGauM+Fg
|
||||
P/tKnt5gZ66HAWR0/pqpmJMHp6hLbcjwE3BhO9NCkZY
|
||||
-> ((I-grease
|
||||
r66LwGiqumMp/NlcnLgOaxZ7cfQMBCr4Rq9aJdjUck69113hNf4orC/bGVCDhmdu
|
||||
s1cSHPVw1hys
|
||||
--- FxWSO98U5IDaGPs57hzO70gVN/ELN0/UxKKmIoxadks
|
||||
1ÊnûEHvóî_QíÄV†7¬Çæ•Ãܲé¶m¡z2'ÛÎ¥¯zWÚ)¼Ôç.»!ãi#¬TXÎT‰k[Fy
üˆEë!>á¨tÁ !‹‚*Ã
|
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A pt5GwvS/t0gmDzyqNAgGa0bbg1fUUGCGtGOh8+kFYiM
|
||||
6t53t5IArPIMI5bZFc83S1OzBUb5HJ2BnYBoIks6tdU
|
||||
-> ssh-ed25519 5/zT0w iudKEK9eZVrY2cMJLExyL+hxX0X9ObK6Qru6hEkoCWY
|
||||
6ro91JnfQta7GhmCmPGlKK7AH/cHTHJWpWjvzf7DqJc
|
||||
-> ssh-ed25519 YIaSKQ XBhAf0RK8EFGSZU1alUn9ySRNa4fBN3rDWYKChUhRSc
|
||||
j52b16yy/LJIsBmwYF1Xc0yl71kx3fPswUdM6UrRDWg
|
||||
-> y)UX@-grease
|
||||
Wxs/GGbg7NiGP3KBWqBpunBdaJATNV2kITZ/Qcwx43NIlN//tV/C4721brrspcQ2
|
||||
UW14hhkN0mOn6xach2EnT71+vIPD3V/nc4bF
|
||||
--- bi81AZsfd3VWHugOdOcCf4HwjRpy5GeA8eSNZG+xelY
|
||||
™,Éêhݯý8Zþ7“ÿõ?<3F>ý‡rC„9^ Ê–ô›†—°EñddK¥èì×”BêJé¥C(dIãò$,ØM9øªÊZ|»É
|
|
@ -1,12 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A rdUnhWrA+Y4kORXQj3EXN2g0ocT+fCgWrWUzng4H2h0
|
||||
GpDWguEAJbruVKLsyg1UrajayaBTRqV4keuXOlUN4Dw
|
||||
-> ssh-ed25519 5/zT0w XhGp6wmN3PiZtYa7z5aTbTOesYsJ0ldJ9FAydDOBHjE
|
||||
KJKLJs5W9eVgyIxWvd7PGTCKF6+GKhfjCf/sS9oghKY
|
||||
-> ssh-ed25519 FfIUuQ H1Y6kiQrhvhXMFiZ5S1aKFV/squ7NaqduuaCk3T9dms
|
||||
8zn45DGkEH+vtkCjsnOlxeiZ+cEW/71bCYyj449axW0
|
||||
-> ZxL9h,-grease h7Z
|
||||
1EWFRzEk6ikC2LnZLuB4Z3n69SHr/AoxBZHjsha2K1DYKJspfb+NCrNVkC1A9F0t
|
||||
SllAo58gJsWkKdpyoKaE2nrV3SXoREMlcQvq/Z2X9WpR+A
|
||||
--- IxR5wj2vdqY/Tcsur39cFEICvfxmo2OrcdQFR4LLJRw
|
||||
[‚{{ËÈg=<•Ùi—“dJ9‹3ïEÔ;ƒ¨=ÙFeøgAXÞX\#¯¦”.<2E>%æÂðáó§=Ö<>^HT…c>)<29>q|¿{â7Íü
|
|
@ -1,60 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
let
|
||||
lift = config;
|
||||
in
|
||||
|
||||
{
|
||||
nowhere.names = {
|
||||
"acme-v02.api.letsencrypt.org" = "stepCa";
|
||||
"api.buypass.com" = "stepCa";
|
||||
};
|
||||
|
||||
nodes.nowhere = { config, ... }: {
|
||||
links.stepCa.protocol = "https";
|
||||
|
||||
environment.etc.step-ca-password.text = "";
|
||||
|
||||
services = {
|
||||
step-ca = {
|
||||
enable = true;
|
||||
address = config.links.stepCa.ipv4;
|
||||
inherit (config.links.stepCa) port;
|
||||
intermediatePasswordFile = "/etc/step-ca-password";
|
||||
settings = {
|
||||
root = "${lift.nowhere.certs.ca}/ca.pem";
|
||||
crt = "${lift.nowhere.certs.intermediate}/cert.pem";
|
||||
key = "${lift.nowhere.certs.intermediate}/cert-key.pem";
|
||||
address = config.links.stepCa.tuple;
|
||||
db = {
|
||||
type = "badgerv2";
|
||||
dataSource = "/var/lib/step-ca/db";
|
||||
};
|
||||
authority.provisioners = [
|
||||
{
|
||||
type = "ACME";
|
||||
name = "snakeoil";
|
||||
challenges = [
|
||||
"dns-01"
|
||||
"http-01"
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
nginx.virtualHosts = {
|
||||
"acme-v02.api.letsencrypt.org".locations."/".extraConfig = ''
|
||||
rewrite /directory /acme/snakeoil/directory break;
|
||||
'';
|
||||
"api.buypass.com".locations."/".extraConfig = ''
|
||||
rewrite /acme/directory /acme/snakeoil/directory break;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
defaults.environment.etc."dummy-secrets/acmeDnsApiKey".text = "ACME_DNS_DIRECT_STATIC_KEY=simulacrum";
|
||||
defaults.environment.etc."dummy-secrets/acmeDnsDirectKey".text = "ACME_DNS_DIRECT_STATIC_KEY=simulacrum";
|
||||
defaults.environment.etc."dummy-secrets/acmeDnsDbCredentials".text = "PGPASSWORD=simulacrum";
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
{ cluster, config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
authoritativeServers = map
|
||||
(node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple)
|
||||
cluster.config.services.dns.nodes.authoritative;
|
||||
|
||||
execScript = pkgs.writeShellScript "acme-dns-exec" ''
|
||||
action="$1"
|
||||
subdomain="''${2%.${depot.lib.meta.domain}.}"
|
||||
key="$3"
|
||||
umask 77
|
||||
source "$EXEC_ENV_FILE"
|
||||
headersFile="$(mktemp)"
|
||||
echo "X-Direct-Key: $ACME_DNS_DIRECT_STATIC_KEY" > "$headersFile"
|
||||
case "$action" in
|
||||
present)
|
||||
for i in {1..5}; do
|
||||
${pkgs.curl}/bin/curl -X POST -s -f -H "@$headersFile" \
|
||||
"${cluster.config.links.acmeDnsApi.url}/update" \
|
||||
--data '{"subdomain":"'"$subdomain"'","txt":"'"$key"'"}' && break
|
||||
sleep 5
|
||||
done
|
||||
;;
|
||||
esac
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
age.secrets.acmeDnsApiKey = {
|
||||
file = ../dns/acme-dns-direct-key.age;
|
||||
owner = "acme";
|
||||
};
|
||||
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.maxConcurrentRenewals = 0;
|
||||
security.acme.defaults = {
|
||||
email = depot.lib.meta.adminEmail;
|
||||
extraLegoFlags = lib.flatten [
|
||||
(map (x: [ "--dns.resolvers" x ]) authoritativeServers)
|
||||
"--dns-timeout" "30"
|
||||
];
|
||||
credentialsFile = pkgs.writeText "acme-exec-config" ''
|
||||
EXEC_PATH=${execScript}
|
||||
EXEC_ENV_FILE=${config.age.secrets.acmeDnsApiKey.path}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services = lib.mapAttrs' (name: value: {
|
||||
name = "acme-${name}";
|
||||
value = {
|
||||
distributed.enable = value.dnsProvider != null;
|
||||
preStart = let
|
||||
serverList = lib.pipe authoritativeServers [
|
||||
(map (x: "@${x}"))
|
||||
(map (lib.replaceStrings [":53"] [""]))
|
||||
lib.escapeShellArgs
|
||||
];
|
||||
domainList = lib.pipe ([ value.domain ] ++ value.extraDomainNames) [
|
||||
(map (x: "${x}."))
|
||||
(map (lib.replaceStrings ["*"] ["x"]))
|
||||
lib.unique
|
||||
lib.escapeShellArgs
|
||||
];
|
||||
in ''
|
||||
echo Testing availability of authoritative DNS servers
|
||||
for i in {1..60}; do
|
||||
${pkgs.dig}/bin/dig +short ${serverList} ${domainList} >/dev/null && break
|
||||
echo Retry [$i/60]
|
||||
sleep 10
|
||||
done
|
||||
echo Available
|
||||
'';
|
||||
serviceConfig = {
|
||||
Restart = "on-failure";
|
||||
RestartMaxDelaySec = 30;
|
||||
RestartSteps = 5;
|
||||
RestartMode = "direct";
|
||||
};
|
||||
};
|
||||
}) config.security.acme.certs;
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
services.acme-client = {
|
||||
nodes.client = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
nixos.client = ./client.nix;
|
||||
simulacrum.augments = ./augment.nix;
|
||||
};
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
{ config, cluster, depot, lib, ... }:
|
||||
with depot.lib.nginx;
|
||||
{
|
||||
links = {
|
||||
atticNixStoreInternalRedirect.protocol = "http";
|
||||
garageNixStoreInternalRedirect.protocol = "http";
|
||||
};
|
||||
|
||||
security.acme.certs."cache.${depot.lib.meta.domain}" = {
|
||||
dnsProvider = "exec";
|
||||
webroot = lib.mkForce null;
|
||||
};
|
||||
|
||||
services.nginx.upstreams = {
|
||||
nar-serve.extraConfig = ''
|
||||
random;
|
||||
server ${config.links.nar-serve-self.tuple} fail_timeout=0;
|
||||
server ${config.links.nar-serve-nixos-org.tuple} fail_timeout=0;
|
||||
'';
|
||||
nix-store.servers = {
|
||||
"${config.links.garageNixStoreInternalRedirect.tuple}" = {
|
||||
fail_timeout = 0;
|
||||
};
|
||||
"${config.links.atticNixStoreInternalRedirect.tuple}" = {
|
||||
fail_timeout = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
services.nginx.appendHttpConfig = ''
|
||||
proxy_cache_path /var/cache/nginx/nixstore levels=1:2 keys_zone=nixstore:10m max_size=10g inactive=24h use_temp_path=off;
|
||||
'';
|
||||
services.nginx.virtualHosts = {
|
||||
"cache.${depot.lib.meta.domain}" = vhosts.basic // {
|
||||
locations = {
|
||||
"= /".return = "302 /404";
|
||||
"/" = {
|
||||
proxyPass = "http://nix-store";
|
||||
extraConfig = ''
|
||||
proxy_next_upstream error http_500 http_502 http_404;
|
||||
'';
|
||||
};
|
||||
"/nix/store" = {
|
||||
proxyPass = "http://nar-serve";
|
||||
extraConfig = ''
|
||||
proxy_next_upstream error http_500 http_404;
|
||||
'';
|
||||
};
|
||||
};
|
||||
extraConfig = ''
|
||||
proxy_cache nixstore;
|
||||
proxy_cache_use_stale error timeout http_500 http_502;
|
||||
proxy_cache_lock on;
|
||||
proxy_cache_key $request_uri;
|
||||
proxy_cache_valid 200 24h;
|
||||
'';
|
||||
};
|
||||
"garage-nix-store.internal.${depot.lib.meta.domain}" = {
|
||||
serverName = "127.0.0.1";
|
||||
listen = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
inherit (config.links.garageNixStoreInternalRedirect) port;
|
||||
}
|
||||
];
|
||||
locations."/" = {
|
||||
proxyPass = with cluster.config.links.garageWeb; "${protocol}://nix-store.${hostname}";
|
||||
recommendedProxySettings = false;
|
||||
extraConfig = ''
|
||||
proxy_set_header Host "nix-store.${cluster.config.links.garageWeb.hostname}";
|
||||
'';
|
||||
};
|
||||
};
|
||||
"attic-nix-store.internal.${depot.lib.meta.domain}" = {
|
||||
serverName = "127.0.0.1";
|
||||
listen = [
|
||||
{
|
||||
addr = "127.0.0.1";
|
||||
inherit (config.links.atticNixStoreInternalRedirect) port;
|
||||
}
|
||||
];
|
||||
locations."/" = {
|
||||
proxyPass = "https://cache-api.${depot.lib.meta.domain}/nix-store$request_uri";
|
||||
recommendedProxySettings = false;
|
||||
extraConfig = ''
|
||||
proxy_set_header Host "cache-api.${depot.lib.meta.domain}";
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
{ config, depot, ... }:
|
||||
|
||||
{
|
||||
services.attic = {
|
||||
nodes = {
|
||||
monolith = [ "VEGAS" "prophet" ];
|
||||
server = [ "VEGAS" "grail" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
monolith = [
|
||||
./server.nix
|
||||
];
|
||||
server = [
|
||||
./server.nix
|
||||
./binary-cache.nix
|
||||
./nar-serve.nix
|
||||
];
|
||||
};
|
||||
meshLinks.server.attic.link.protocol = "http";
|
||||
secrets = let
|
||||
inherit (config.services.attic) nodes;
|
||||
in {
|
||||
serverToken = {
|
||||
nodes = nodes.server;
|
||||
};
|
||||
dbCredentials = {
|
||||
nodes = nodes.server;
|
||||
owner = "atticd";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
garage = config.lib.forService "attic" {
|
||||
keys.attic.locksmith = {
|
||||
nodes = config.services.attic.nodes.server;
|
||||
owner = "atticd";
|
||||
format = "aws";
|
||||
};
|
||||
buckets.attic = {
|
||||
allow.attic = [ "read" "write" ];
|
||||
};
|
||||
};
|
||||
|
||||
dns.records = let
|
||||
serverAddrs = map
|
||||
(node: depot.hours.${node}.interfaces.primary.addrPublic)
|
||||
config.services.attic.nodes.server;
|
||||
in config.lib.forService "attic" {
|
||||
cache.target = serverAddrs;
|
||||
};
|
||||
|
||||
ways = config.lib.forService "attic" {
|
||||
cache-api = {
|
||||
consulService = "atticd";
|
||||
extras.extraConfig = ''
|
||||
client_max_body_size 4G;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
{ cluster, config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (cluster.config.services.attic) secrets;
|
||||
|
||||
link = cluster.config.hostLinks.${config.networking.hostName}.attic;
|
||||
|
||||
isMonolith = lib.elem config.networking.hostName cluster.config.services.attic.nodes.monolith;
|
||||
in
|
||||
|
||||
{
|
||||
services.locksmith.waitForSecrets.atticd = [ "garage-attic" ];
|
||||
|
||||
services.atticd = {
|
||||
enable = true;
|
||||
package = depot.inputs.attic.packages.attic-server;
|
||||
|
||||
environmentFile = secrets.serverToken.path;
|
||||
mode = if isMonolith then "monolithic" else "api-server";
|
||||
|
||||
settings = {
|
||||
listen = link.tuple;
|
||||
|
||||
chunking = {
|
||||
nar-size-threshold = 0;
|
||||
min-size = 0;
|
||||
avg-size = 0;
|
||||
max-size = 0;
|
||||
};
|
||||
|
||||
compression.type = "none";
|
||||
|
||||
database.url = "postgresql://attic@${cluster.config.links.patroni-pg-access.tuple}/attic";
|
||||
|
||||
storage = {
|
||||
type = "s3";
|
||||
region = "us-east-1";
|
||||
endpoint = cluster.config.links.garageS3.url;
|
||||
bucket = "attic";
|
||||
};
|
||||
|
||||
garbage-collection = {
|
||||
interval = "2 weeks";
|
||||
default-retention-period = "3 months";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
users = {
|
||||
users.atticd = {
|
||||
isSystemUser = true;
|
||||
group = "atticd";
|
||||
home = "/var/lib/atticd";
|
||||
createHome = true;
|
||||
};
|
||||
groups.atticd = {};
|
||||
};
|
||||
|
||||
systemd.services.atticd = {
|
||||
after = [ "postgresql.service" ];
|
||||
distributed = lib.mkIf isMonolith {
|
||||
enable = true;
|
||||
registerService = "atticd";
|
||||
};
|
||||
serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
|
||||
SystemCallFilter = lib.mkAfter [ "@resources" ];
|
||||
};
|
||||
environment = {
|
||||
AWS_SHARED_CREDENTIALS_FILE = "/run/locksmith/garage-attic";
|
||||
PGPASSFILE = secrets.dbCredentials.path;
|
||||
};
|
||||
};
|
||||
|
||||
consul.services.atticd = {
|
||||
mode = if isMonolith then "manual" else "direct";
|
||||
definition = {
|
||||
name = "atticd";
|
||||
id = "atticd-${config.services.atticd.mode}";
|
||||
address = link.ipv4;
|
||||
inherit (link) port;
|
||||
checks = [
|
||||
{
|
||||
name = "Attic Server";
|
||||
id = "service:atticd:backend";
|
||||
interval = "5s";
|
||||
http = link.url;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
services.bitwarden = {
|
||||
nodes.host = [ "VEGAS" ];
|
||||
nixos.host = ./host.nix;
|
||||
};
|
||||
|
||||
dns.records.keychain.target = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
{ cluster, depot, ... }:
|
||||
|
||||
{
|
||||
services.cachix-agent = {
|
||||
enable = true;
|
||||
credentialsFile = cluster.config.services.cachix-deploy-agent.secrets.token.path;
|
||||
package = depot.packages.cachix;
|
||||
};
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
services.cachix-deploy-agent = { config, ... }: {
|
||||
nodes.agent = [ "checkmate" "grail" "prophet" "VEGAS" "thunderskin" ];
|
||||
nixos.agent = ./agent.nix;
|
||||
secrets.token = {
|
||||
nodes = config.nodes.agent;
|
||||
shared = false;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
dns.records = let
|
||||
cdnShieldAddr = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
in {
|
||||
"fonts-googleapis-com.cdn-shield".target = cdnShieldAddr;
|
||||
"fonts-gstatic-com.cdn-shield".target = cdnShieldAddr;
|
||||
"cdnjs-cloudflare-com.cdn-shield".target = cdnShieldAddr;
|
||||
"wttr-in.cdn-shield".target = cdnShieldAddr;
|
||||
};
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
services.certificates = {
|
||||
nodes = {
|
||||
internal-wildcard = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
internal-wildcard = [
|
||||
./internal-wildcard.nix
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
{ config, lib, pkgs, depot, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
|
||||
extraGroups = [ "nginx" ]
|
||||
++ lib.optional config.services.kanidm.enableServer "kanidm";
|
||||
in
|
||||
|
||||
{
|
||||
security.acme.certs."internal.${domain}" = {
|
||||
domain = "*.internal.${domain}";
|
||||
extraDomainNames = [ "*.internal.${domain}" ];
|
||||
dnsProvider = "exec";
|
||||
group = "nginx";
|
||||
postRun = ''
|
||||
${pkgs.acl}/bin/setfacl -Rb .
|
||||
${lib.concatStringsSep "\n" (
|
||||
map (group: "${pkgs.acl}/bin/setfacl -Rm g:${group}:rX .") extraGroups
|
||||
)}
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
services.chant = {
|
||||
nodes.listener = config.services.consul.nodes.agent;
|
||||
nixos.listener = [
|
||||
./listener.nix
|
||||
];
|
||||
simulacrum.deps = [ "consul" ];
|
||||
};
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
consul = config.links.consulAgent;
|
||||
|
||||
validTargets = lib.pipe config.systemd.services [
|
||||
(lib.filterAttrs (name: value: value.chant.enable))
|
||||
lib.attrNames
|
||||
];
|
||||
|
||||
validTargetsJson = pkgs.writeText "chant-targets.json" (builtins.toJSON validTargets);
|
||||
|
||||
eventHandler = pkgs.writers.writePython3 "chant-listener-event-handler" {
|
||||
flakeIgnore = [ "E501" ];
|
||||
} ''
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import base64
|
||||
|
||||
validTargets = set()
|
||||
with open("${validTargetsJson}", "r") as f:
|
||||
validTargets = set(json.load(f))
|
||||
|
||||
events = json.load(sys.stdin)
|
||||
|
||||
cacheDir = os.getenv("CACHE_DIRECTORY", "/var/cache/chant")
|
||||
|
||||
indexFile = f"{cacheDir}/index"
|
||||
|
||||
oldIndex = "old-index"
|
||||
if os.path.isfile(indexFile):
|
||||
with open(indexFile, "r") as f:
|
||||
oldIndex = f.readline()
|
||||
|
||||
newIndex = os.getenv("CONSUL_INDEX", "no-index")
|
||||
|
||||
if oldIndex != newIndex:
|
||||
triggers = set()
|
||||
for event in events:
|
||||
if event["Name"].startswith("chant:"):
|
||||
target = event["Name"].removeprefix("chant:")
|
||||
if target not in validTargets:
|
||||
print(f"Skipping invalid target: {target}")
|
||||
continue
|
||||
with open(f"/run/chant/{target}", "wb") as f:
|
||||
if event["Payload"] is not None:
|
||||
f.write(base64.b64decode(event["Payload"]))
|
||||
triggers.add(target)
|
||||
|
||||
for trigger in triggers:
|
||||
subprocess.run(["${config.systemd.package}/bin/systemctl", "start", f"{trigger}.service"])
|
||||
|
||||
with open(indexFile, "w") as f:
|
||||
f.write(newIndex)
|
||||
'';
|
||||
in
|
||||
{
|
||||
systemd.services.chant-listener = {
|
||||
description = "Chant Listener";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "consul-ready.service" ];
|
||||
after = [ "consul-ready.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${config.services.consul.package}/bin/consul watch --type=event ${eventHandler}";
|
||||
|
||||
RuntimeDirectory = "chant";
|
||||
RuntimeDirectoryMode = "0700";
|
||||
CacheDirectory = "chant";
|
||||
CacheDirectoryMode = "0700";
|
||||
|
||||
RestartSec = 60;
|
||||
Restart = "always";
|
||||
IPAddressDeny = [ "any" ];
|
||||
IPAddressAllow = [ consul.ipv4 ];
|
||||
};
|
||||
environment = {
|
||||
CONSUL_HTTP_ADDR = consul.tuple;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
{ config, cluster, depot, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (config.networking) hostName;
|
||||
inherit (cluster.config) hostLinks;
|
||||
cfg = cluster.config.services.consul;
|
||||
|
||||
hl = hostLinks.${hostName}.consul;
|
||||
in
|
||||
|
||||
{
|
||||
links.consulAgent.protocol = "http";
|
||||
|
||||
services.consul = {
|
||||
enable = true;
|
||||
webUi = true;
|
||||
package = depot.packages.consul;
|
||||
extraConfig = {
|
||||
datacenter = "eu-central";
|
||||
domain = "sd-magic.${domain}.";
|
||||
recursors = [ "127.0.0.1" cluster.config.links.dnsResolver.ipv4 ];
|
||||
server = true;
|
||||
node_name = config.networking.hostName;
|
||||
bind_addr = hl.ipv4;
|
||||
ports.serf_lan = hl.port;
|
||||
retry_join = map (hostName: hostLinks.${hostName}.consul.tuple) (cfg.otherNodes.agent hostName);
|
||||
bootstrap_expect = builtins.length cfg.nodes.agent;
|
||||
addresses.http = config.links.consulAgent.ipv4;
|
||||
ports.http = config.links.consulAgent.port;
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana-agent.settings.integrations.consul_exporter = {
|
||||
enabled = true;
|
||||
instance = hostName;
|
||||
server = config.links.consulAgent.url;
|
||||
};
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.consul;
|
||||
in
|
||||
|
||||
{
|
||||
hostLinks = lib.genAttrs cfg.nodes.agent (hostName: {
|
||||
consul = {
|
||||
ipv4 = config.vars.mesh.${hostName}.meshIp;
|
||||
};
|
||||
});
|
||||
services.consul = {
|
||||
nodes = {
|
||||
agent = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
ready = config.services.consul.nodes.agent;
|
||||
};
|
||||
nixos = {
|
||||
agent = [
|
||||
./agent.nix
|
||||
./remote-api.nix
|
||||
];
|
||||
ready = ./ready.nix;
|
||||
};
|
||||
simulacrum = {
|
||||
enable = true;
|
||||
deps = [ "wireguard" ];
|
||||
settings = ./test.nix;
|
||||
};
|
||||
};
|
||||
|
||||
dns.records."consul-remote.internal".consulService = "consul-remote";
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
consulReady = pkgs.writers.writeHaskellBin "consul-ready" {
|
||||
libraries = with pkgs.haskellPackages; [ aeson http-conduit watchdog ];
|
||||
} ''
|
||||
{-# LANGUAGE OverloadedStrings #-}
|
||||
import Control.Watchdog
|
||||
import Control.Exception
|
||||
import System.IO
|
||||
import Network.HTTP.Simple
|
||||
import Data.Aeson
|
||||
|
||||
flushLogger :: WatchdogLogger String
|
||||
flushLogger taskErr delay = do
|
||||
defaultLogger taskErr delay
|
||||
hFlush stdout
|
||||
|
||||
data ConsulHealth = ConsulHealth {
|
||||
healthy :: Bool
|
||||
}
|
||||
|
||||
instance FromJSON ConsulHealth where
|
||||
parseJSON (Object v) = ConsulHealth <$> (v .: "Healthy")
|
||||
|
||||
handleException ex = case ex of
|
||||
(SomeException _) -> return $ Left "Consul is not active"
|
||||
|
||||
main :: IO ()
|
||||
main = watchdog $ do
|
||||
setInitialDelay 300_000
|
||||
setMaximumDelay 30_000_000
|
||||
setLoggingAction flushLogger
|
||||
watch $ handle handleException $ do
|
||||
res <- httpJSON "${config.links.consulAgent.url}/v1/operator/autopilot/health"
|
||||
case getResponseBody res of
|
||||
ConsulHealth True -> return $ Right ()
|
||||
ConsulHealth False -> return $ Left "Consul is unhealthy"
|
||||
'';
|
||||
in
|
||||
|
||||
{
|
||||
systemd.services.consul-ready = {
|
||||
description = "Wait for Consul";
|
||||
requires = lib.mkIf config.services.consul.enable [ "consul.service" ];
|
||||
after = lib.mkIf config.services.consul.enable [ "consul.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = lib.getExe consulReady;
|
||||
DynamicUser = true;
|
||||
TimeoutStartSec = "5m";
|
||||
Type = "oneshot";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
{ config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
frontendDomain = "consul-remote.internal.${domain}";
|
||||
|
||||
inherit (config.reflection.interfaces.vstub) addr;
|
||||
in
|
||||
|
||||
{
|
||||
services.nginx.virtualHosts.${frontendDomain} = depot.lib.nginx.vhosts.proxy config.links.consulAgent.url // {
|
||||
listenAddresses = lib.singleton addr;
|
||||
enableACME = false;
|
||||
useACMEHost = "internal.${domain}";
|
||||
};
|
||||
|
||||
consul.services.consul-remote = {
|
||||
unit = "consul";
|
||||
mode = "external";
|
||||
definition = {
|
||||
name = "consul-remote";
|
||||
address = addr;
|
||||
port = 443;
|
||||
checks = [
|
||||
{
|
||||
name = "Frontend";
|
||||
id = "service:consul-remote:frontend";
|
||||
http = "https://${addr}/v1/status/leader";
|
||||
tls_server_name = frontendDomain;
|
||||
header.Host = lib.singleton frontendDomain;
|
||||
interval = "60s";
|
||||
}
|
||||
{
|
||||
name = "Backend";
|
||||
id = "service:consul-remote:backend";
|
||||
http = "${config.links.consulAgent.url}/v1/status/leader";
|
||||
interval = "30s";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
defaults.options.services.locksmith = lib.mkSinkUndeclaredOptions { };
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
|
||||
start_all()
|
||||
|
||||
with subtest("should form cluster"):
|
||||
nodes = [ n for n in machines if n != nowhere ]
|
||||
for machine in nodes:
|
||||
machine.succeed("systemctl start consul-ready.service")
|
||||
for machine in nodes:
|
||||
consulConfig = json.loads(machine.succeed("cat /etc/consul.json"))
|
||||
addr = consulConfig["addresses"]["http"]
|
||||
port = consulConfig["ports"]["http"]
|
||||
setEnv = f"CONSUL_HTTP_ADDR={addr}:{port}"
|
||||
memberList = machine.succeed(f"{setEnv} consul members --status=alive")
|
||||
for machine2 in nodes:
|
||||
assert machine2.name in memberList
|
||||
'';
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
garage = {
|
||||
buckets.content-delivery.web.enable = true;
|
||||
};
|
||||
|
||||
ways.cdn.bucket = "content-delivery";
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 NO562A 9n5IirzhNBIPRj9Gir+/yQhFH830sgfezsqY5Ulzz3o
|
||||
VItDDdgfTFcvSq/QpIqTHnfr1VHqfI6nPz+WWKYQjHw
|
||||
-> ssh-ed25519 5/zT0w MfBZrd8wJjoProwdPqsS9CZ9aYNTXgrYviFDwuchQVM
|
||||
8WKPYO+i1ZSkPYDrHVJ5Pclj2hEzqwAtf31Agzei444
|
||||
-> ssh-ed25519 TCgorQ 3QYtSx/2eiFp54W60F8FlERfHx+DUfnXXfugiXNPECg
|
||||
pBx3If3qihD//Aq8hDWCt+U1tiWoCLUDcg/RyVCD0D0
|
||||
-> ssh-ed25519 P/nEqQ NImm+vKuL50G2kdD2svmfkwsovmryCSyKyhnZ0duDDo
|
||||
U0PTKHiCj4SxomnJdgubo+3sStSE+YwvCnrRl7aAS1Q
|
||||
-> ssh-ed25519 FfIUuQ SRgJoBIoW71SiXuHqlnGqRG5AKUrnQy0ecwznGEGTHA
|
||||
a0IS3hjMln1tWEjo30A6gYtaV7TJSY4SZDarhahMoLk
|
||||
-> ssh-ed25519 d3WGuA 0qVNcrYe53Wo46zFJs6UZtX0dq7TUy72WGdGpLqB3yo
|
||||
jTHE9PfhRw5lbBlfznS+ThkSsab3ioearf91xyPBfdQ
|
||||
-> ssh-ed25519 YIaSKQ CCcBlAOms2aSkB6pws6tN+4Gf551idI9Zq0rokd0P1c
|
||||
/3oFp6hf+jggurbcuu0cXdDL8lr6m/LTHEeNgiJt2gg
|
||||
-> K&wn-grease ,Ewz Jc+dQQRp NU~.
|
||||
FvDOuTGNaLuCfDelsrRbthjuJT9fBZAQ+kz+7Stoc2wciXV1YpCcOYDHSF38OwRF
|
||||
X/pyjVudbJKS0Mphda6phw
|
||||
--- 3JFwCzeJsIgRkTpmy9MAvQ64BCZoa98kNKOuT57WI6Y
|
||||
&ÀO¿¹¸p ž-ÚP¶.+"<22>ðjÔG«
|
||||
ëÇÐs<>gnz[t
‘ØóÄD÷•RŽÄ½±šmÃl<!Çê6;³Ù÷<C399>†8{ vmvJJ;lR<6C>×[Yà3˜XPËÜ<C38B>ÈPCÿè¯&¦àåYû×2ÃǤxVúÈF{zäQ‹hnW*I$é;°Yc¨@7Ö-k4—À§xãͶx¿µ% RÝ<52>¤$z|»Ê“ñœ¹¯<C2B9>ëñ3
|
|
@ -1,144 +0,0 @@
|
|||
{ cluster, config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (config.reflection) interfaces;
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (config.networking) hostName;
|
||||
|
||||
link = cluster.config.hostLinks.${hostName}.dnsAuthoritative;
|
||||
patroni = cluster.config.links.patroni-pg-access;
|
||||
inherit (cluster.config.hostLinks.${hostName}) acmeDnsApi;
|
||||
|
||||
otherDnsServers = lib.pipe (cluster.config.services.dns.otherNodes.authoritative hostName) [
|
||||
(map (node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple))
|
||||
(lib.concatStringsSep " ")
|
||||
];
|
||||
|
||||
recordsList = lib.mapAttrsToList (lib.const lib.id) cluster.config.dns.records;
|
||||
recordsPartitioned = lib.partition (record: record.rewrite.target == null) recordsList;
|
||||
|
||||
staticRecords = let
|
||||
escape = type: {
|
||||
TXT = builtins.toJSON;
|
||||
}.${type} or lib.id;
|
||||
|
||||
recordName = record: {
|
||||
"@" = "${record.root}.";
|
||||
}.${record.name} or "${record.name}.${record.root}.";
|
||||
in lib.flatten (
|
||||
map (record: map (target: "${recordName record} ${record.type} ${escape record.type target}") record.target) recordsPartitioned.right
|
||||
);
|
||||
|
||||
rewrites = map (record: let
|
||||
maybeEscapeRegex = str: if record.rewrite.type == "regex" then "${lib.escapeRegex str}$" else str;
|
||||
in "rewrite stop name ${record.rewrite.type} ${record.name}${maybeEscapeRegex ".${record.root}."} ${record.rewrite.target}. answer auto") recordsPartitioned.wrong;
|
||||
|
||||
rewriteConf = pkgs.writeText "coredns-rewrites.conf" ''
|
||||
rewrite stop type DS DS
|
||||
rewrite stop type NS NS
|
||||
rewrite stop type SOA SOA
|
||||
${lib.concatStringsSep "\n" rewrites}
|
||||
'';
|
||||
in {
|
||||
links.localAuthoritativeDNS = {};
|
||||
|
||||
age.secrets = {
|
||||
acmeDnsDirectKey = {
|
||||
file = ./acme-dns-direct-key.age;
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 53 ];
|
||||
allowedUDPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
services.acme-dns = {
|
||||
enable = true;
|
||||
package = depot.packages.acme-dns;
|
||||
settings = {
|
||||
general = {
|
||||
listen = config.links.localAuthoritativeDNS.tuple;
|
||||
inherit domain;
|
||||
nsadmin = "hostmaster.${domain}";
|
||||
nsname = "eu1.ns.${domain}";
|
||||
records = staticRecords;
|
||||
};
|
||||
api = {
|
||||
ip = acmeDnsApi.ipv4;
|
||||
inherit (acmeDnsApi) port;
|
||||
};
|
||||
database = {
|
||||
engine = "postgres";
|
||||
connection = "postgres://acmedns@${patroni.tuple}/acmedns?sslmode=disable";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.locksmith.waitForSecrets.acme-dns = [
|
||||
"patroni-acmedns"
|
||||
];
|
||||
|
||||
systemd.services.acme-dns.serviceConfig.EnvironmentFile = with config.age.secrets; [
|
||||
"/run/locksmith/patroni-acmedns"
|
||||
acmeDnsDirectKey.path
|
||||
];
|
||||
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
.:${link.portStr} {
|
||||
bind ${interfaces.primary.addr}
|
||||
chaos "Private Void DNS" info@privatevoid.net
|
||||
cache {
|
||||
success 4000 86400
|
||||
denial 0
|
||||
prefetch 3
|
||||
serve_stale 86400s verify
|
||||
}
|
||||
template ANY DS {
|
||||
rcode NXDOMAIN
|
||||
}
|
||||
forward service.eu-central.sd-magic.${domain} 127.0.0.1:8600
|
||||
forward addr.eu-central.sd-magic.${domain} 127.0.0.1:8600
|
||||
import ${rewriteConf}
|
||||
forward . ${config.links.localAuthoritativeDNS.tuple} ${otherDnsServers} {
|
||||
policy sequential
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services.coredns = {
|
||||
after = [ "acme-dns.service" ];
|
||||
serviceConfig = {
|
||||
MemoryMax = "200M";
|
||||
MemorySwapMax = "50M";
|
||||
CPUQuota = "25%";
|
||||
};
|
||||
};
|
||||
|
||||
consul.services = {
|
||||
authoritative-dns = {
|
||||
unit = "acme-dns";
|
||||
definition = {
|
||||
name = "authoritative-dns-backend";
|
||||
address = config.links.localAuthoritativeDNS.ipv4;
|
||||
port = config.links.localAuthoritativeDNS.port;
|
||||
checks = lib.singleton {
|
||||
interval = "60s";
|
||||
tcp = config.links.localAuthoritativeDNS.tuple;
|
||||
};
|
||||
};
|
||||
};
|
||||
acme-dns.definition = {
|
||||
name = "acme-dns";
|
||||
address = acmeDnsApi.ipv4;
|
||||
port = acmeDnsApi.port;
|
||||
checks = lib.singleton {
|
||||
interval = "60s";
|
||||
http = "${acmeDnsApi.url}/health";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
{ cluster, lib, ... }:
|
||||
|
||||
let
|
||||
recursors = lib.pipe (cluster.config.services.dns.nodes.coredns) [
|
||||
(map (node: cluster.config.hostLinks.${node}.dnsResolverBackend.ipv4))
|
||||
];
|
||||
in
|
||||
|
||||
{
|
||||
networking.nameservers = [ cluster.config.links.dnsResolver.ipv4 ] ++ recursors;
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
{ cluster, config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (config.reflection) interfaces;
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (config.networking) hostName;
|
||||
|
||||
link = cluster.config.hostLinks.${hostName}.dnsResolver;
|
||||
backend = cluster.config.hostLinks.${hostName}.dnsResolverBackend;
|
||||
|
||||
otherRecursors = lib.pipe (cluster.config.services.dns.otherNodes.coredns hostName) [
|
||||
(map (node: cluster.config.hostLinks.${node}.dnsResolverBackend.tuple))
|
||||
(lib.concatStringsSep " ")
|
||||
];
|
||||
|
||||
authoritativeServers = map
|
||||
(node: cluster.config.hostLinks.${node}.dnsAuthoritative.tuple)
|
||||
cluster.config.services.dns.nodes.authoritative;
|
||||
|
||||
inherit (depot.packages) stevenblack-hosts;
|
||||
dot = config.security.acme.certs."securedns.${domain}";
|
||||
in
|
||||
|
||||
{
|
||||
links.localRecursor = {};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 853 ];
|
||||
allowedUDPPorts = [ 853 ];
|
||||
};
|
||||
|
||||
systemd.services.coredns = {
|
||||
after = (lib.optional (interfaces ? vstub) "network-addresses-vstub.service") ++ [
|
||||
"acme-selfsigned-securedns.${domain}.service"
|
||||
];
|
||||
before = [ "acme-securedns.${domain}.service" ];
|
||||
wants = [ "acme-finished-securedns.${domain}.target" ];
|
||||
serviceConfig = {
|
||||
LoadCredential = [
|
||||
"dot-cert.pem:${dot.directory}/fullchain.pem"
|
||||
"dot-key.pem:${dot.directory}/key.pem"
|
||||
];
|
||||
ExecReload = lib.mkForce [];
|
||||
};
|
||||
};
|
||||
|
||||
security.acme.certs."securedns.${domain}" = {
|
||||
dnsProvider = "exec";
|
||||
# using a different ACME provider because Android Private DNS is fucky
|
||||
server = "https://api.buypass.com/acme/directory";
|
||||
reloadServices = [
|
||||
"coredns.service"
|
||||
];
|
||||
};
|
||||
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
(localresolver) {
|
||||
hosts ${stevenblack-hosts} {
|
||||
fallthrough
|
||||
}
|
||||
chaos "Private Void DNS" info@privatevoid.net
|
||||
forward hyprspace. 127.43.104.80:11355
|
||||
forward ${domain}. ${lib.concatStringsSep " " authoritativeServers} {
|
||||
policy random
|
||||
}
|
||||
forward . ${backend.tuple} ${otherRecursors} {
|
||||
policy sequential
|
||||
}
|
||||
}
|
||||
.:${link.portStr} {
|
||||
${lib.optionalString (interfaces ? vstub) "bind ${interfaces.vstub.addr}"}
|
||||
bind 127.0.0.1
|
||||
bind ${link.ipv4}
|
||||
import localresolver
|
||||
}
|
||||
tls://.:853 {
|
||||
bind ${interfaces.primary.addr}
|
||||
tls {$CREDENTIALS_DIRECTORY}/dot-cert.pem {$CREDENTIALS_DIRECTORY}/dot-key.pem
|
||||
import localresolver
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
services.pdns-recursor = {
|
||||
enable = true;
|
||||
dnssecValidation = "process";
|
||||
forwardZones = {
|
||||
# optimize queries against our own domain
|
||||
"${domain}" = lib.concatStringsSep ";" authoritativeServers;
|
||||
};
|
||||
dns = {
|
||||
inherit (backend) port;
|
||||
address = backend.ipv4;
|
||||
allowFrom = [ "127.0.0.1" cluster.config.vars.meshNet.cidr "10.100.3.0/24" ];
|
||||
};
|
||||
};
|
||||
|
||||
consul.services.securedns = {
|
||||
unit = "coredns";
|
||||
mode = "external";
|
||||
definition = rec {
|
||||
name = "securedns";
|
||||
address = interfaces.primary.addrPublic;
|
||||
port = 853;
|
||||
checks = lib.singleton {
|
||||
name = "SecureDNS";
|
||||
tcp = "${address}:${toString port}";
|
||||
interval = "30s";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
{ config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (depot) hours;
|
||||
cfg = config.services.dns;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./options.nix
|
||||
./nodes.nix
|
||||
./ns-records.nix
|
||||
];
|
||||
|
||||
links = {
|
||||
dnsResolver = {
|
||||
ipv4 = hours.VEGAS.interfaces.vstub.addr;
|
||||
port = 53;
|
||||
};
|
||||
acmeDnsApi = {
|
||||
hostname = "acme-dns-challenge.internal.${depot.lib.meta.domain}";
|
||||
protocol = "http";
|
||||
};
|
||||
};
|
||||
hostLinks = lib.mkMerge [
|
||||
(lib.genAttrs cfg.nodes.authoritative (node: {
|
||||
dnsAuthoritative = {
|
||||
ipv4 = hours.${node}.interfaces.primary.addrPublic;
|
||||
port = 53;
|
||||
};
|
||||
acmeDnsApi = {
|
||||
ipv4 = config.vars.mesh.${node}.meshIp;
|
||||
inherit (config.links.acmeDnsApi) port;
|
||||
protocol = "http";
|
||||
};
|
||||
}))
|
||||
(lib.genAttrs cfg.nodes.coredns (node: {
|
||||
dnsResolver = {
|
||||
ipv4 = config.vars.mesh.${node}.meshIp;
|
||||
port = 53;
|
||||
};
|
||||
}))
|
||||
(lib.genAttrs cfg.nodes.coredns (node: {
|
||||
dnsResolverBackend = {
|
||||
ipv4 = config.vars.mesh.${node}.meshIp;
|
||||
};
|
||||
}))
|
||||
];
|
||||
services.dns = {
|
||||
nodes = {
|
||||
authoritative = [ "VEGAS" "checkmate" "prophet" ];
|
||||
coredns = [ "checkmate" "VEGAS" ];
|
||||
client = [ "checkmate" "grail" "thunderskin" "VEGAS" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
authoritative = ./authoritative.nix;
|
||||
coredns = ./coredns.nix;
|
||||
client = ./client.nix;
|
||||
};
|
||||
simulacrum = {
|
||||
enable = true;
|
||||
deps = [ "consul" "acme-client" "patroni" ];
|
||||
settings = ./test.nix;
|
||||
};
|
||||
};
|
||||
|
||||
patroni = {
|
||||
databases.acmedns = {};
|
||||
users.acmedns = {
|
||||
locksmith = {
|
||||
nodes = config.services.dns.nodes.authoritative;
|
||||
format = "envFile";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dns.records = {
|
||||
securedns.consulService = "securedns";
|
||||
"acme-dns-challenge.internal".consulService = "acme-dns";
|
||||
};
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
{
|
||||
dns.records = lib.mapAttrs' (name: hour: {
|
||||
name = lib.toLower "${name}.${hour.enterprise.subdomain}";
|
||||
value = {
|
||||
type = "A";
|
||||
target = [ hour.interfaces.primary.addrPublic ];
|
||||
};
|
||||
}) depot.gods.fromLight;
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
{ config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.dns;
|
||||
|
||||
nsNodes = lib.imap1 (idx: node: {
|
||||
name = "eu${toString idx}.ns";
|
||||
value = {
|
||||
type = "A";
|
||||
target = [ depot.hours.${node}.interfaces.primary.addrPublic ];
|
||||
};
|
||||
}) cfg.nodes.authoritative;
|
||||
in
|
||||
|
||||
{
|
||||
dns.records = lib.mkMerge [
|
||||
(lib.listToAttrs nsNodes)
|
||||
{
|
||||
NS = {
|
||||
name = "@";
|
||||
type = "NS";
|
||||
target = map (ns: "${ns.name}.${depot.lib.meta.domain}.") nsNodes;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
recordType = types.submodule ({ config, name, ... }: {
|
||||
options = {
|
||||
root = mkOption {
|
||||
type = types.str;
|
||||
default = depot.lib.meta.domain;
|
||||
};
|
||||
consulServicesRoot = mkOption {
|
||||
type = types.str;
|
||||
default = "service.eu-central.sd-magic.${depot.lib.meta.domain}";
|
||||
};
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = name;
|
||||
};
|
||||
|
||||
type = mkOption {
|
||||
type = types.enum [ "A" "CNAME" "AAAA" "NS" "MX" "SOA" "TXT" ];
|
||||
default = "A";
|
||||
};
|
||||
target = mkOption {
|
||||
type = with types; listOf str;
|
||||
};
|
||||
ttl = mkOption {
|
||||
type = types.ints.unsigned;
|
||||
default = 86400;
|
||||
};
|
||||
|
||||
consulService = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
};
|
||||
rewrite = {
|
||||
target = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.enum [ "exact" "substring" "prefix" "suffix" "regex" ];
|
||||
default = "exact";
|
||||
};
|
||||
};
|
||||
};
|
||||
config = {
|
||||
rewrite.target = mkIf (config.consulService != null) "${config.consulService}.${config.consulServicesRoot}";
|
||||
};
|
||||
});
|
||||
in
|
||||
|
||||
{
|
||||
options.dns = {
|
||||
records = mkOption {
|
||||
type = with types; attrsOf recordType;
|
||||
default = {};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
{ cluster, ... }:
|
||||
|
||||
let
|
||||
inherit (cluster._module.specialArgs.depot.lib.meta) domain;
|
||||
in
|
||||
{
|
||||
nodes.nowhere = { pkgs, ... }: {
|
||||
passthru = cluster;
|
||||
environment.systemPackages = [
|
||||
pkgs.knot-dns
|
||||
pkgs.openssl
|
||||
];
|
||||
};
|
||||
|
||||
testScript = ''
|
||||
import json
|
||||
nodeNames = json.loads('${builtins.toJSON cluster.config.services.dns.nodes.authoritative}')
|
||||
dotNames = json.loads('${builtins.toJSON cluster.config.services.dns.nodes.coredns}')
|
||||
nodes = [ n for n in machines if n.name in nodeNames ]
|
||||
dotServers = [ n for n in machines if n.name in dotNames ]
|
||||
|
||||
start_all()
|
||||
|
||||
with subtest("should allow external name resolution for own domain"):
|
||||
for node in nodes:
|
||||
node.wait_for_unit("coredns.service")
|
||||
nowhere.wait_until_succeeds("[[ $(kdig +short securedns.${domain} | wc -l) -ne 0 ]]", timeout=60)
|
||||
nowhere.fail("[[ $(kdig +short example.com | wc -l) -ne 0 ]]")
|
||||
|
||||
with subtest("should have valid certificate on DoT endpoint"):
|
||||
for node in dotServers:
|
||||
node.wait_for_unit("acme-finished-securedns.${domain}.target")
|
||||
nowhere.wait_until_succeeds("openssl </dev/null s_client -connect securedns.${domain}:853 -verify_return_error -strict -verify_hostname securedns.${domain}", timeout=60)
|
||||
'';
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
services.fbi = {
|
||||
nodes.host = [ "VEGAS" ];
|
||||
nixos.host = ./host.nix;
|
||||
};
|
||||
|
||||
dns.records = let
|
||||
fbiAddr = [ depot.hours.VEGAS.interfaces.primary.addrPublic ];
|
||||
in {
|
||||
fbi-index.target = fbiAddr;
|
||||
fbi-requests.target = fbiAddr;
|
||||
radarr.target = fbiAddr;
|
||||
sonarr.target = fbiAddr;
|
||||
};
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
{ lib, ... }:
|
||||
|
||||
{
|
||||
ways.registry.static = { depot, pkgs, ... }: pkgs.writeTextDir "flake-registry.json" (let
|
||||
flakes = {
|
||||
depot = {
|
||||
type = "tarball";
|
||||
url = "https://forge.${depot.lib.meta.domain}/${depot.lib.meta.domain}/depot/archive/master.tar.gz";
|
||||
};
|
||||
depot-nixpkgs = {
|
||||
type = "github";
|
||||
owner = "NixOS";
|
||||
repo = "nixpkgs";
|
||||
inherit (depot.inputs.nixpkgs.sourceInfo) rev narHash lastModified;
|
||||
};
|
||||
blank = {
|
||||
type = "github";
|
||||
owner = "divnix";
|
||||
repo = "blank";
|
||||
inherit (depot.inputs.blank.sourceInfo) rev narHash lastModified;
|
||||
};
|
||||
} // import ./extra-flakes.nix;
|
||||
in builtins.toJSON {
|
||||
version = 2;
|
||||
flakes = lib.pipe flakes [
|
||||
(lib.attrsToList)
|
||||
(map (f: {
|
||||
from = {
|
||||
type = "indirect";
|
||||
id = f.name;
|
||||
};
|
||||
to = f.value;
|
||||
}))
|
||||
];
|
||||
});
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
let
|
||||
github = owner: repo: {
|
||||
type = "github";
|
||||
inherit owner repo;
|
||||
};
|
||||
in {
|
||||
# own
|
||||
hyprspace = github "hyprspace" "hyprspace";
|
||||
ai = github "nixified-ai" "flake";
|
||||
nix-super = github "privatevoid-net" "nix-super";
|
||||
nixpak = github "nixpak" "nixpak";
|
||||
|
||||
# other
|
||||
nix = github "NixOS" "nix";
|
||||
flake-parts = github "hercules-ci" "flake-parts";
|
||||
home-manager = github "nix-community" "home-manager";
|
||||
dream2nix = github "nix-community" "dream2nix";
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
{ config, depot, ... }:
|
||||
|
||||
{
|
||||
services.forge = {
|
||||
nodes.server = [ "VEGAS" ];
|
||||
nixos.server = ./server.nix;
|
||||
meshLinks.server.forge.link.protocol = "http";
|
||||
secrets = with config.services.forge.nodes; {
|
||||
oidcSecret = {
|
||||
nodes = server;
|
||||
owner = "forgejo";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
ways = let
|
||||
host = builtins.head config.services.forge.nodes.server;
|
||||
in config.lib.forService "forge" {
|
||||
forge.target = config.hostLinks.${host}.forge.url;
|
||||
};
|
||||
|
||||
patroni = config.lib.forService "forge" {
|
||||
databases.forge = {};
|
||||
users.forge.locksmith = {
|
||||
nodes = config.services.forge.nodes.server;
|
||||
format = "raw";
|
||||
};
|
||||
};
|
||||
|
||||
garage = config.lib.forService "forge" {
|
||||
keys.forgejo.locksmith.nodes = config.services.forge.nodes.server;
|
||||
buckets.forgejo.allow.forgejo = [ "read" "write" ];
|
||||
};
|
||||
|
||||
monitoring.blackbox.targets.forge = config.lib.forService "forge" {
|
||||
address = "https://forge.${depot.lib.meta.domain}/api/v1/version";
|
||||
module = "https2xx";
|
||||
};
|
||||
|
||||
dns.records = config.lib.forService "forge" {
|
||||
"ssh.forge".target = map
|
||||
(node: depot.hours.${node}.interfaces.primary.addrPublic)
|
||||
config.services.forge.nodes.server;
|
||||
};
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
{ cluster, config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
inherit (depot.lib.meta) domain;
|
||||
inherit (cluster.config.services.forge) secrets;
|
||||
|
||||
patroni = cluster.config.links.patroni-pg-access;
|
||||
|
||||
host = "forge.${domain}";
|
||||
|
||||
link = cluster.config.hostLinks.${config.networking.hostName}.forge;
|
||||
|
||||
exe = lib.getExe config.services.forgejo.package;
|
||||
in
|
||||
|
||||
{
|
||||
system.ascensions.forgejo = {
|
||||
requiredBy = [ "forgejo.service" ];
|
||||
before = [ "forgejo.service" ];
|
||||
incantations = i: [
|
||||
(i.execShell "chown -R forgejo:forgejo /srv/storage/private/forge")
|
||||
(i.execShell "rm -rf /srv/storage/private/forge/data/{attachments,lfs,avatars,repo-avatars,repo-archive,packages,actions_log,actions_artifacts}")
|
||||
];
|
||||
};
|
||||
|
||||
services.locksmith.waitForSecrets.forgejo = [
|
||||
"garage-forgejo-id"
|
||||
"garage-forgejo-secret"
|
||||
"patroni-forge"
|
||||
];
|
||||
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
package = depot.packages.forgejo;
|
||||
stateDir = "/srv/storage/private/forge";
|
||||
database = {
|
||||
createDatabase = false;
|
||||
type = "postgres";
|
||||
host = patroni.ipv4;
|
||||
inherit (patroni) port;
|
||||
name = "forge";
|
||||
user = "forge";
|
||||
passwordFile = "/run/locksmith/patroni-forge";
|
||||
};
|
||||
settings = {
|
||||
DEFAULT = {
|
||||
APP_NAME = "The Forge";
|
||||
};
|
||||
server = {
|
||||
DOMAIN = host;
|
||||
ROOT_URL = "https://${host}/";
|
||||
PROTOCOL = link.protocol;
|
||||
HTTP_ADDR = link.ipv4;
|
||||
HTTP_PORT = link.port;
|
||||
SSH_DOMAIN = "ssh.${host}";
|
||||
};
|
||||
oauth2_client = {
|
||||
REGISTER_EMAIL_CONFIRM = false;
|
||||
ENABLE_AUTO_REGISTRATION = true;
|
||||
ACCOUNT_LINKING = "auto";
|
||||
UPDATE_AVATAR = true;
|
||||
};
|
||||
session.COOKIE_SECURE = true;
|
||||
service = {
|
||||
DISABLE_REGISTRATION = false;
|
||||
ALLOW_ONLY_INTERNAL_REGISTRATION = false;
|
||||
ALLOW_ONLY_EXTERNAL_REGISTRATION = true;
|
||||
};
|
||||
storage = {
|
||||
STORAGE_TYPE = "minio";
|
||||
MINIO_ENDPOINT = cluster.config.links.garageS3.hostname;
|
||||
MINIO_BUCKET = "forgejo";
|
||||
MINIO_USE_SSL = true;
|
||||
MINIO_BUCKET_LOOKUP = "path";
|
||||
};
|
||||
log."logger.xorm.MODE" = "";
|
||||
# enabling this will leak secrets to the log
|
||||
database.LOG_SQL = false;
|
||||
};
|
||||
secrets = {
|
||||
storage = {
|
||||
MINIO_ACCESS_KEY_ID = "/run/locksmith/garage-forgejo-id";
|
||||
MINIO_SECRET_ACCESS_KEY = "/run/locksmith/garage-forgejo-secret";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.forgejo.preStart = let
|
||||
providerName = "PrivateVoidAccount";
|
||||
args = lib.escapeShellArgs [
|
||||
"--name" providerName
|
||||
"--provider" "openidConnect"
|
||||
"--key" "net.privatevoid.forge1"
|
||||
"--auto-discover-url" "https://login.${domain}/auth/realms/master/.well-known/openid-configuration"
|
||||
"--group-claim-name" "groups"
|
||||
"--admin-group" "/forge_admins@${domain}"
|
||||
];
|
||||
in lib.mkAfter /*bash*/ ''
|
||||
providerId="$(${exe} admin auth list | ${pkgs.gnugrep}/bin/grep -w '${providerName}' | cut -f1)"
|
||||
if [[ -z "$providerId" ]]; then
|
||||
FORGEJO_ADMIN_OAUTH2_SECRET="$(< ${secrets.oidcSecret.path})" ${exe} admin auth add-oauth ${args}
|
||||
else
|
||||
FORGEJO_ADMIN_OAUTH2_SECRET="$(< ${secrets.oidcSecret.path})" ${exe} admin auth update-oauth --id "$providerId" ${args}
|
||||
fi
|
||||
'';
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
{ cluster, config, depot, lib, ... }:
|
||||
|
||||
let
|
||||
inherit (cluster.config.services.hercules-ci-multi-agent) nodes secrets;
|
||||
|
||||
mapAgents = lib.flip lib.mapAttrs nodes;
|
||||
|
||||
mergeMap = f: let
|
||||
outputs = mapAgents f;
|
||||
in lib.pipe outputs [
|
||||
(lib.mapAttrs (basename: basevalue:
|
||||
lib.mapAttrs' (n: v:
|
||||
lib.nameValuePair "${n}-${basename}" v
|
||||
) basevalue
|
||||
))
|
||||
lib.attrValues
|
||||
(lib.foldl' (a: b: a // b) {})
|
||||
];
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./modules/multi-agent-refactored
|
||||
];
|
||||
|
||||
systemd.services = mergeMap (_: _: {
|
||||
hercules-ci-agent = {
|
||||
# hercules-ci-agent-restarter should take care of this
|
||||
restartIfChanged = false;
|
||||
environment = {
|
||||
AWS_SHARED_CREDENTIALS_FILE = secrets.cacheCredentials.path;
|
||||
AWS_EC2_METADATA_DISABLED = "true";
|
||||
};
|
||||
serviceConfig.Slice = "builder.slice";
|
||||
};
|
||||
});
|
||||
|
||||
services.hercules-ci-agents = lib.genAttrs (lib.attrNames nodes) (org: {
|
||||
enable = true;
|
||||
package = depot.inputs.hercules-ci-agent.packages.hercules-ci-agent;
|
||||
settings = {
|
||||
clusterJoinTokenPath = secrets."clusterJoinToken-${org}".path;
|
||||
binaryCachesPath = secrets.cacheConfig.path;
|
||||
concurrentTasks = lib.pipe config.reflection.hardware.cpu.cores [
|
||||
(lib.flip builtins.div 2)
|
||||
builtins.floor
|
||||
(lib.max 2)
|
||||
];
|
||||
};
|
||||
});
|
||||
|
||||
nix.settings.cores = lib.pipe config.reflection.hardware.cpu.cores [
|
||||
(builtins.mul 0.75)
|
||||
builtins.floor
|
||||
(lib.max 1)
|
||||
];
|
||||
|
||||
users.groups.hercules-ci-agent.members = map (org: "hci-${org}") (lib.attrNames nodes);
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
services.hercules-ci-multi-agent = {
|
||||
nodes = {
|
||||
private-void = [ "VEGAS" "prophet" ];
|
||||
nixpak = [ "VEGAS" "prophet" ];
|
||||
max = [ "VEGAS" "prophet" ];
|
||||
hyprspace = [ "VEGAS" "prophet" ];
|
||||
};
|
||||
nixos = {
|
||||
private-void = [
|
||||
./common.nix
|
||||
{
|
||||
services.hercules-ci-agents.private-void.settings = {
|
||||
secretsJsonPath = config.services.hercules-ci-multi-agent.secrets.effectsSecrets.path;
|
||||
};
|
||||
}
|
||||
];
|
||||
nixpak = [
|
||||
./common.nix
|
||||
];
|
||||
max = [
|
||||
./common.nix
|
||||
];
|
||||
hyprspace = [
|
||||
./common.nix
|
||||
];
|
||||
};
|
||||
secrets = let
|
||||
inherit (config.services.hercules-ci-multi-agent) nodes;
|
||||
allNodes = lib.unique (lib.concatLists (lib.attrValues nodes));
|
||||
in {
|
||||
cacheConfig = {
|
||||
nodes = allNodes;
|
||||
mode = "0440";
|
||||
group = "hercules-ci-agent";
|
||||
};
|
||||
cacheCredentials = {
|
||||
nodes = allNodes;
|
||||
shared = false;
|
||||
mode = "0440";
|
||||
group = "hercules-ci-agent";
|
||||
};
|
||||
effectsSecrets = {
|
||||
nodes = nodes.private-void;
|
||||
owner = "hci-private-void";
|
||||
};
|
||||
} // lib.mapAttrs' (org: nodes: {
|
||||
name = "clusterJoinToken-${org}";
|
||||
value = {
|
||||
inherit nodes;
|
||||
shared = false;
|
||||
owner = "hci-${org}";
|
||||
};
|
||||
}) nodes;
|
||||
};
|
||||
garage = let
|
||||
hciAgentKeys = lib.pipe config.services.hercules-ci-multi-agent.nodes [
|
||||
(lib.collect lib.isList)
|
||||
lib.flatten
|
||||
lib.unique
|
||||
(map (x: "hci-agent-${x}"))
|
||||
];
|
||||
in config.lib.forService "hercules-ci-multi-agent" {
|
||||
keys = lib.genAttrs hciAgentKeys (lib.const {});
|
||||
buckets.nix-store = {
|
||||
allow = lib.genAttrs hciAgentKeys (lib.const [ "read" "write" ]);
|
||||
web.enable = true;
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
options = {
|
||||
services.hercules-ci-agents = lib.mkOption {
|
||||
default = { };
|
||||
type = lib.types.attrsOf (lib.types.submodule (import ./options.nix { inherit config lib pkgs; }));
|
||||
description = lib.mdDoc "Hercules CI Agent instances.";
|
||||
example = {
|
||||
agent1.enable = true;
|
||||
|
||||
agent2 = {
|
||||
enable = true;
|
||||
settings.labels.myMetadata = "agent2";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config =
|
||||
let
|
||||
forAllAgents = f: lib.mkMerge (lib.mapAttrsToList (name: agent: lib.mkIf agent.enable (f name agent)) config.services.hercules-ci-agents);
|
||||
in
|
||||
{
|
||||
users = forAllAgents (name: agent: {
|
||||
users.${agent.user} = {
|
||||
inherit (agent) group;
|
||||
description = "Hercules CI Agent system user for ${name}";
|
||||
isSystemUser = true;
|
||||
home = agent.settings.baseDirectory;
|
||||
createHome = true;
|
||||
};
|
||||
groups.${agent.group} = { };
|
||||
});
|
||||
|
||||
systemd = forAllAgents (name: agent:
|
||||
let
|
||||
command = "${agent.package}/bin/hercules-ci-agent --config ${agent.tomlFile}";
|
||||
testCommand = "${command} --test-configuration";
|
||||
in
|
||||
{
|
||||
tmpfiles.rules = [ "d ${agent.settings.workDirectory} 0700 ${agent.user} ${agent.group} - -" ];
|
||||
|
||||
services."hercules-ci-agent-${name}" = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
startLimitBurst = 30 * 1000000; # practically infinite
|
||||
serviceConfig = {
|
||||
User = agent.user;
|
||||
Group = agent.group;
|
||||
ExecStart = command;
|
||||
ExecStartPre = testCommand;
|
||||
Restart = "on-failure";
|
||||
RestartSec = 120;
|
||||
|
||||
# If a worker goes OOM, don't kill the main process. It needs to
|
||||
# report the failure and it's unlikely to be part of the problem.
|
||||
OOMPolicy = "continue";
|
||||
|
||||
# Work around excessive stack use by libstdc++ regex
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86164
|
||||
# A 256 MiB stack allows between 400 KiB and 1.5 MiB file to be matched by ".*".
|
||||
LimitSTACK = 256 * 1024 * 1024;
|
||||
|
||||
# Hardening.
|
||||
DeviceAllow = "";
|
||||
LockPersonality = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
PrivateMounts = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectHome = true;
|
||||
ProtectSystem = "full";
|
||||
RemoveIPC = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
|
||||
SystemCallArchitectures = "native";
|
||||
UMask = "077";
|
||||
WorkingDirectory = agent.settings.workDirectory;
|
||||
};
|
||||
};
|
||||
|
||||
# Changes in the secrets do not affect the unit in any way that would cause
|
||||
# a restart, which is currently necessary to reload the secrets.
|
||||
paths."hercules-ci-agent-${name}-restart-files" = {
|
||||
wantedBy = [ "hercules-ci-agent-${name}.service" ];
|
||||
pathConfig = {
|
||||
Unit = "hercules-ci-agent-${name}-restarter.service";
|
||||
PathChanged = [ agent.settings.clusterJoinTokenPath agent.settings.binaryCachesPath ];
|
||||
};
|
||||
};
|
||||
|
||||
services."hercules-ci-agent-restarter-${name}" = {
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
# Wait a bit, with the effect of bundling up file changes into a single
|
||||
# run of this script and hopefully a single restart.
|
||||
sleep 10
|
||||
if systemctl is-active --quiet 'hercules-ci-agent-${name}.service'; then
|
||||
if ${testCommand}; then
|
||||
systemctl restart 'hercules-ci-agent-${name}.service'
|
||||
else
|
||||
echo 1>&2 'WARNING: Not restarting hercules-ci-agent-${name} because config is not valid at this time.'
|
||||
fi
|
||||
else
|
||||
echo 1>&2 'Not restarting hercules-ci-agent-${name} despite config file update, because it is not already active.'
|
||||
fi
|
||||
'';
|
||||
};
|
||||
});
|
||||
|
||||
nix.settings = forAllAgents (_: agent: {
|
||||
trusted-users = [ agent.user ];
|
||||
# A store path that was missing at first may well have finished building,
|
||||
# even shortly after the previous lookup. This *also* applies to the daemon.
|
||||
narinfo-cache-negative-ttl = 0;
|
||||
});
|
||||
|
||||
# Trusted user allows simplified configuration and better performance
|
||||
# when operating in a cluster.
|
||||
assertions = forAllAgents (_: agent: [
|
||||
{
|
||||
assertion = (agent.settings.nixUserIsTrusted or false) -> builtins.match ".*(^|\n)[ \t]*trusted-users[ \t]*=.*" config.nix.extraOptions == null;
|
||||
message = ''
|
||||
hercules-ci-agent: Please do not set `trusted-users` in `nix.extraOptions`.
|
||||
|
||||
The hercules-ci-agent module by default relies on `nix.settings.trusted-users`
|
||||
to be effectful, but a line like `trusted-users = ...` in `nix.extraOptions`
|
||||
will override the value set in `nix.settings.trusted-users`.
|
||||
|
||||
Instead of setting `trusted-users` in the `nix.extraOptions` string, you should
|
||||
set an option with additive semantics, such as
|
||||
- the NixOS option `nix.settings.trusted-users`, or
|
||||
- the Nix option in the `extraOptions` string, `extra-trusted-users`
|
||||
'';
|
||||
}
|
||||
]);
|
||||
};
|
||||
|
||||
meta.maintainers = with lib.maintainers; [ roberth kranzes ];
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
systemConfig = config;
|
||||
in
|
||||
{ config, name, ... }:
|
||||
let
|
||||
inherit (lib) types;
|
||||
in
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption (lib.mdDoc ''
|
||||
Hercules CI Agent as a system service.
|
||||
|
||||
[Hercules CI](https://hercules-ci.com) is a
|
||||
continuous integation service that is centered around Nix.
|
||||
|
||||
Support is available at [help@hercules-ci.com](mailto:help@hercules-ci.com).
|
||||
'');
|
||||
|
||||
package = lib.mkPackageOption pkgs "hercules-ci-agent" { };
|
||||
|
||||
user = lib.mkOption {
|
||||
type = types.str;
|
||||
default = "hci-${name}";
|
||||
description = lib.mdDoc "User account under which hercules-ci-agent runs.";
|
||||
internal = true;
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = types.str;
|
||||
default = "hci-${name}";
|
||||
description = lib.mdDoc "Group account under which hercules-ci-agent runs.";
|
||||
internal = true;
|
||||
};
|
||||
|
||||
settings = lib.mkOption {
|
||||
type = types.submodule (import ./settings.nix { inherit systemConfig lib name pkgs; agent = config; });
|
||||
default = { };
|
||||
description = lib.mdDoc ''
|
||||
These settings are written to the `agent.toml` file.
|
||||
|
||||
Not all settings are listed as options, can be set nonetheless.
|
||||
|
||||
For the exhaustive list of settings, see <https://docs.hercules-ci.com/hercules-ci/reference/agent-config/>.
|
||||
'';
|
||||
};
|
||||
|
||||
tomlFile = lib.mkOption {
|
||||
type = types.path;
|
||||
internal = true;
|
||||
defaultText = lib.literalMD "generated `hercules-ci-agent-${name}.toml`";
|
||||
description = lib.mdDoc ''
|
||||
The fully assembled config file.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
tomlFile = (pkgs.formats.toml { }).generate "hercules-ci-agent-${name}.toml" config.settings;
|
||||
};
|
||||
}
|
|
@ -1,163 +0,0 @@
|
|||
{ agent, systemConfig, lib, name, pkgs, ... }:
|
||||
|
||||
{ config, ... }:
|
||||
|
||||
let
|
||||
inherit (lib) types;
|
||||
format = pkgs.formats.toml { };
|
||||
in
|
||||
{
|
||||
freeformType = format.type;
|
||||
options = {
|
||||
apiBaseUrl = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
API base URL that the agent will connect to.
|
||||
|
||||
When using Hercules CI Enterprise, set this to the URL where your
|
||||
Hercules CI server is reachable.
|
||||
'';
|
||||
type = types.str;
|
||||
default = "https://hercules-ci.com";
|
||||
};
|
||||
baseDirectory = lib.mkOption {
|
||||
type = types.path;
|
||||
default = "/var/lib/hercules-ci-agent-${name}";
|
||||
description = lib.mdDoc ''
|
||||
State directory (secrets, work directory, etc) for agent
|
||||
'';
|
||||
};
|
||||
concurrentTasks = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Number of tasks to perform simultaneously.
|
||||
|
||||
A task is a single derivation build, an evaluation or an effect run.
|
||||
At minimum, you need 2 concurrent tasks for `x86_64-linux`
|
||||
in your cluster, to allow for import from derivation.
|
||||
|
||||
`concurrentTasks` can be around the CPU core count or lower if memory is
|
||||
the bottleneck.
|
||||
|
||||
The optimal value depends on the resource consumption characteristics of your workload,
|
||||
including memory usage and in-task parallelism. This is typically determined empirically.
|
||||
|
||||
When scaling, it is generally better to have a double-size machine than two machines,
|
||||
because each split of resources causes inefficiencies; particularly with regards
|
||||
to build latency because of extra downloads.
|
||||
'';
|
||||
type = types.either types.ints.positive (types.enum [ "auto" ]);
|
||||
default = "auto";
|
||||
defaultText = lib.literalMD ''
|
||||
`"auto"`, meaning equal to the number of CPU cores.
|
||||
'';
|
||||
};
|
||||
labels = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
A key-value map of user data.
|
||||
|
||||
This data will be available to organization members in the dashboard and API.
|
||||
|
||||
The values can be of any TOML type that corresponds to a JSON type, but arrays
|
||||
can not contain tables/objects due to limitations of the TOML library. Values
|
||||
involving arrays of non-primitive types may not be representable currently.
|
||||
'';
|
||||
type = format.type;
|
||||
defaultText = lib.literalExpression ''
|
||||
{
|
||||
agent.source = "..."; # One of "nixpkgs", "flake", "override"
|
||||
lib.version = "...";
|
||||
pkgs.version = "...";
|
||||
}
|
||||
'';
|
||||
};
|
||||
nixUserIsTrusted = lib.mkOption {
|
||||
internal = true;
|
||||
readOnly = true;
|
||||
description = lib.mdDoc ''
|
||||
Whether the agent's user should be considered trusted by Nix.
|
||||
'';
|
||||
type = types.bool;
|
||||
default = lib.elem agent.user systemConfig.nix.settings.trusted-users;
|
||||
};
|
||||
workDirectory = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
The directory in which temporary subdirectories are created for task state. This includes sources for Nix evaluation.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.baseDirectory + "/work";
|
||||
defaultText = lib.literalExpression ''baseDirectory + "/work"'';
|
||||
};
|
||||
staticSecretsDirectory = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
This is the default directory to look for statically configured secrets like `cluster-join-token.key`.
|
||||
|
||||
See also `clusterJoinTokenPath` and `binaryCachesPath` for fine-grained configuration.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.baseDirectory + "/secrets";
|
||||
defaultText = lib.literalExpression ''baseDirectory + "/secrets"'';
|
||||
};
|
||||
clusterJoinTokenPath = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Location of the cluster-join-token.key file.
|
||||
|
||||
You can retrieve the contents of the file when creating a new agent via
|
||||
<https://hercules-ci.com/dashboard>.
|
||||
|
||||
As this value is confidential, it should not be in the store, but
|
||||
installed using other means, such as agenix, NixOps
|
||||
`deployment.keys`, or manual installation.
|
||||
|
||||
The contents of the file are used for authentication between the agent and the API.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.staticSecretsDirectory + "/cluster-join-token.key";
|
||||
defaultText = lib.literalExpression ''staticSecretsDirectory + "/cluster-join-token.key"'';
|
||||
};
|
||||
binaryCachesPath = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Path to a JSON file containing binary cache secret keys.
|
||||
|
||||
As these values are confidential, they should not be in the store, but
|
||||
copied over using other means, such as agenix, NixOps
|
||||
`deployment.keys`, or manual installation.
|
||||
|
||||
The format is described on <https://docs.hercules-ci.com/hercules-ci-agent/binary-caches-json/>.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.staticSecretsDirectory + "/binary-caches.json";
|
||||
defaultText = lib.literalExpression ''staticSecretsDirectory + "/binary-caches.json"'';
|
||||
};
|
||||
secretsJsonPath = lib.mkOption {
|
||||
description = lib.mdDoc ''
|
||||
Path to a JSON file containing secrets for effects.
|
||||
|
||||
As these values are confidential, they should not be in the store, but
|
||||
copied over using other means, such as agenix, NixOps
|
||||
`deployment.keys`, or manual installation.
|
||||
|
||||
The format is described on <https://docs.hercules-ci.com/hercules-ci-agent/secrets-json/>.
|
||||
'';
|
||||
type = types.path;
|
||||
default = config.staticSecretsDirectory + "/secrets.json";
|
||||
defaultText = lib.literalExpression ''staticSecretsDirectory + "/secrets.json"'';
|
||||
};
|
||||
};
|
||||
|
||||
config = {
|
||||
labels =
|
||||
let
|
||||
mkIfNotNull = x: lib.mkIf (x != null) x;
|
||||
in
|
||||
{
|
||||
nixos = {
|
||||
inherit (systemConfig.system.nixos)
|
||||
release
|
||||
codeName
|
||||
tags;
|
||||
configurationRevision = mkIfNotNull systemConfig.system.configurationRevision;
|
||||
label = mkIfNotNull systemConfig.system.nixos.label;
|
||||
systemName = mkIfNotNull systemConfig.system.name;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
{ cluster, config, lib, pkgs, utils, ... }:
|
||||
|
||||
let
|
||||
frontendLink = cluster.config.links.idm;
|
||||
in
|
||||
|
||||
{
|
||||
systemd.services.kanidm-unixd.serviceConfig = {
|
||||
EnvironmentFile = cluster.config.services.idm.secrets.serviceAccountCredentials.path;
|
||||
};
|
||||
|
||||
services.kanidm = {
|
||||
enableClient = true;
|
||||
clientSettings = {
|
||||
uri = frontendLink.url;
|
||||
};
|
||||
enablePam = true;
|
||||
unixSettings = {
|
||||
default_shell = utils.toShellPath config.users.defaultUserShell;
|
||||
home_alias = "name";
|
||||
uid_attr_map = "name";
|
||||
gid_attr_map = "name";
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc."ssh/authorized_keys_command_kanidm" = {
|
||||
mode = "0755";
|
||||
text = ''
|
||||
#!/bin/sh
|
||||
exec ${config.services.kanidm.package}/bin/kanidm_ssh_authorizedkeys "$@"
|
||||
'';
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
authorizedKeysCommand = "/etc/ssh/authorized_keys_command_kanidm";
|
||||
authorizedKeysCommandUser = "nobody";
|
||||
};
|
||||
|
||||
security = {
|
||||
pam.services.sudo = { config, ... }: {
|
||||
rules.auth.rssh = {
|
||||
enable = lib.mkForce true;
|
||||
order = config.rules.auth.unix.order - 10;
|
||||
settings = {
|
||||
authorized_keys_command = "/etc/ssh/authorized_keys_command_kanidm";
|
||||
authorized_keys_command_user = "nobody";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sudo.extraConfig = ''
|
||||
Defaults env_keep+=SSH_AUTH_SOCK
|
||||
'';
|
||||
};
|
||||
|
||||
environment.systemPackages = let
|
||||
idmAlias = pkgs.runCommand "kanidm-idm-alias" {} ''
|
||||
mkdir -p $out/bin
|
||||
ln -s ${config.services.kanidm.package}/bin/kanidm $out/bin/idm
|
||||
mkdir -p $out/share/bash-completion/completions
|
||||
cat >$out/share/bash-completion/completions/idm.bash <<EOF
|
||||
source ${config.services.kanidm.package}/share/bash-completion/completions/kanidm.bash
|
||||
complete -F _kanidm -o bashdefault -o default idm
|
||||
EOF
|
||||
'';
|
||||
in [ idmAlias ];
|
||||
|
||||
# i32 bug https://github.com/nix-community/nsncd/issues/6
|
||||
services.nscd.enableNsncd = false;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue