Compare commits

...

14 commits

17 changed files with 652 additions and 225 deletions

View file

@ -6,5 +6,6 @@
nixos.listener = [
./listener.nix
];
simulacrum.deps = [ "consul" ];
};
}

View file

@ -0,0 +1,25 @@
{ config, ... }:
{
imports = [
./options.nix
./simulacrum/test-data.nix
];
services.incandescence = {
nodes = {
provider = config.services.consul.nodes.agent;
};
nixos = {
provider = [
./provider.nix
./provider-options.nix
];
};
simulacrum = {
enable = true;
deps = [ "consul" "locksmith" ];
settings = ./simulacrum/test.nix;
};
};
}

View file

@ -0,0 +1,22 @@
{ lib, ... }:
let
inherit (lib) mkOption;
inherit (lib.types) attrsOf listOf submodule str;
in
{
options.incandescence = {
providers = mkOption {
type = attrsOf (submodule ({ name, ... }: {
options = {
objects = mkOption {
type = attrsOf (listOf str);
default = { };
};
};
}));
default = { };
};
};
}

View file

@ -0,0 +1,72 @@
{ lib, ... }:
let
inherit (lib) mkEnableOption mkOption;
inherit (lib.types) attrsOf functionTo ints listOf nullOr package submodule str;
in
{
options.services.incandescence = {
providers = mkOption {
type = attrsOf (submodule ({ name, ... }: {
options = {
locksmith = mkEnableOption "Locksmith integration";
wantedBy = mkOption {
type = listOf str;
};
partOf = mkOption {
type = listOf str;
};
wants = mkOption {
type = listOf str;
default = [ ];
};
after = mkOption {
type = listOf str;
default = [ ];
};
packages = mkOption {
type = listOf package;
default = [ ];
};
formulae = mkOption {
type = attrsOf (submodule ({ ... }: {
options = {
deps = mkOption {
type = listOf str;
default = [ ];
};
create = mkOption {
type = functionTo str;
};
change = mkOption {
type = nullOr (functionTo str);
default = null;
};
destroy = mkOption {
type = str;
};
destroyAfterDays = mkOption {
type = ints.unsigned;
default = 0;
};
};
}));
default = { };
};
};
}));
default = { };
};
};
}

View file

@ -0,0 +1,144 @@
{ cluster, config, lib, ... }:
let
inherit (lib) concatStringsSep escapeShellArg flatten filter filterAttrs length mapAttrs mapAttrs' mapAttrsToList mkIf mkMerge pipe stringToCharacters;
cfg = config.services.incandescence;
clusterCfg = cluster.config.incandescence;
in
{
systemd.services = pipe cfg.providers [
(mapAttrsToList (provider: providerConfig: pipe providerConfig.formulae [
(mapAttrsToList (formula: formulaConfig: let
kvRoot = "services/incandescence/providers/${provider}/formulae/${formula}";
time = "$(date +%s)";
in {
"ignite-${provider}-${formula}-create" = {
description = "Ignite Creation: ${provider} - ${formula}";
wantedBy = [ "incandescence-${provider}.target" ];
before = [ "incandescence-${provider}.target" ];
wants = providerConfig.wants ++ map (dep: "ignite-${provider}-${dep}-create.service") formulaConfig.deps;
after = providerConfig.after ++ map (dep: "ignite-${provider}-${dep}-create.service") formulaConfig.deps;
serviceConfig.Type = "oneshot";
distributed.enable = true;
path = [ config.services.consul.package ] ++ providerConfig.packages;
script = pipe clusterCfg.providers.${provider}.objects.${formula} [
(map (object: ''
if ! consul kv get ${kvRoot}/${object}/alive >/dev/null; then
echo "Create ${formula}: ${object}"
if (
${formulaConfig.create object}
)
then
consul kv put ${kvRoot}/${object}/alive true
consul kv delete ${kvRoot}/${object}/destroyOn
else
echo "Creation failed: ${object}"
fi
fi
''))
(concatStringsSep "\n")
(script: if script == "" then ''
echo "Nothing to create"
'' else script)
];
};
"ignite-${provider}-${formula}-change" = mkIf (formulaConfig.change != null) {
description = "Ignite Change: ${provider} - ${formula}";
wantedBy = [ "incandescence-${provider}.target" ];
before = [ "incandescence-${provider}.target" ];
wants = providerConfig.wants ++ [ "ignite-${provider}-${formula}-create.service" ] ++ map (dep: "ignite-${provider}-${dep}-change.service") formulaConfig.deps;
after = providerConfig.after ++ [ "ignite-${provider}-${formula}-create.service" ] ++ map (dep: "ignite-${provider}-${dep}-change.service") formulaConfig.deps;
serviceConfig.Type = "oneshot";
distributed.enable = true;
path = [ config.services.consul.package ] ++ providerConfig.packages;
script = pipe clusterCfg.providers.${provider}.objects.${formula} [
(map (object: ''
echo "Change ${formula}: ${object}"
(
${formulaConfig.change object}
) || echo "Change failed: ${object}"
''))
(concatStringsSep "\n")
(script: if script == "" then ''
echo "Nothing to change"
'' else script)
];
};
"ignite-${provider}-${formula}-destroy" = {
description = "Ignite Destruction: ${provider} - ${formula}";
wantedBy = [ "incandescence-${provider}.target" ] ++ map (dep: "ignite-${provider}-${dep}-destroy.service") formulaConfig.deps;
before = [ "incandescence-${provider}.target" ] ++ map (dep: "ignite-${provider}-${dep}-destroy.service") formulaConfig.deps;
wants = providerConfig.wants ++ [ "ignite-${provider}-${formula}-change.service" ];
after = providerConfig.after ++ [ "ignite-${provider}-${formula}-change.service" ];
serviceConfig.Type = "oneshot";
distributed.enable = true;
path = [ config.services.consul.package ] ++ providerConfig.packages;
script = let
fieldNum = pipe kvRoot [
stringToCharacters
(filter (x: x == "/"))
length
(builtins.add 2)
toString
];
keyFilter = pipe clusterCfg.providers.${provider}.objects.${formula} [
(map (x: escapeShellArg "^${x}$"))
(concatStringsSep " \\\n -e ")
];
destroyAfterDays = toString formulaConfig.destroyAfterDays;
in ''
consul kv get --keys ${kvRoot}/ | cut -d/ -f${fieldNum} | grep -v -e ${keyFilter} | while read object; do
if consul kv get ${kvRoot}/$object/alive >/dev/null; then
destroyOn="$(consul kv get ${kvRoot}/$object/destroyOn || true)"
if [[ -z "$destroyOn" && "${destroyAfterDays}" -ne 0 ]]; then
echo "Schedule ${formula} for destruction in ${destroyAfterDays} days: $object"
consul kv put ${kvRoot}/$object/destroyOn "$((${time} + 86400 * ${destroyAfterDays}))"
elif [[ "${destroyAfterDays}" -eq 0 || "${time}" -ge "$destroyOn" ]]; then
echo "Destroy ${formula}: $object"
export OBJECT="$object"
if (
${formulaConfig.destroy}
)
then
consul kv delete --recurse ${kvRoot}/$object
else
echo "Destruction failed: $object"
fi
else
echo "Scheduled for destruction on $destroyOn (now: ${time})"
fi
fi
done
'';
};
}))
]))
flatten
mkMerge
];
systemd.targets = mapAttrs' (provider: providerConfig: {
name = "incandescence-${provider}";
value = {
description = "An Incandescence | ${provider}";
inherit (providerConfig) wantedBy partOf;
};
}) cfg.providers;
services.locksmith.providers = mapAttrs (provider: providerConfig: {
wantedBy = [ "incandescence-${provider}.target" ];
after = [ "incandescence-${provider}.target" ];
}) (filterAttrs (_: providerConfig: providerConfig.locksmith) cfg.providers);
system.ascensions = mapAttrs' (provider: providerConfig: {
name = "incandescence-${provider}";
value = {
distributed = true;
requiredBy = map (formula: "ignite-${provider}-${formula}-create.service") (lib.attrNames providerConfig.formulae);
before = map (formula: "ignite-${provider}-${formula}-create.service") (lib.attrNames providerConfig.formulae);
incantations = lib.mkDefault (i: []);
};
}) cfg.providers;
}

View file

@ -0,0 +1,8 @@
{ config, lib, ... }:
{
incandescence = lib.mkIf config.simulacrum {
providers = config.lib.forService "incandescence" {
test.objects.example = [ "example1" "example2" ];
};
};
}

View file

@ -0,0 +1,47 @@
{ cluster, lib, ... }:
let
providers = lib.take 2 cluster.config.services.incandescence.nodes.provider;
in
{
nodes = lib.genAttrs providers (lib.const {
services.incandescence.providers.test = {
wantedBy = [ "multi-user.target" ];
partOf = [ ];
formulae.example = {
create = x: "consul kv put testData/${x} ${x}";
destroy = "consul kv delete testData/$OBJECT";
};
};
});
testScript = ''
import json
nodeNames = json.loads('${builtins.toJSON providers}')
nodes = [ n for n in machines if n.name in nodeNames ]
start_all()
consulConfig = json.loads(nodes[0].succeed("cat /etc/consul.json"))
addr = consulConfig["addresses"]["http"]
port = consulConfig["ports"]["http"]
setEnv = f"CONSUL_HTTP_ADDR={addr}:{port}"
with subtest("should create objects"):
for node in nodes:
node.wait_for_unit("incandescence-test.target")
nodes[0].succeed(f"[[ $({setEnv} consul kv get testData/example1) == example1 ]]")
nodes[0].succeed(f"[[ $({setEnv} consul kv get testData/example2) == example2 ]]")
with subtest("should destroy objects"):
nodes[0].succeed(f"{setEnv} consul kv put testData/example3 example3")
nodes[0].succeed(f"{setEnv} consul kv put services/incandescence/providers/test/formulae/example/example3/alive true")
nodes[1].succeed(f"{setEnv} consul kv get testData/example3")
for node in nodes:
node.systemctl("isolate default")
for node in nodes:
node.wait_for_unit("incandescence-test.target")
nodes[0].fail(f"{setEnv} consul kv get testData/example3")
'';
}

View file

@ -14,5 +14,6 @@
./provider.nix
];
};
simulacrum.deps = [ "chant" "consul" ];
};
}

View file

@ -28,6 +28,10 @@ in
command = mkOption {
type = types.coercedTo types.package (package: "${package}") types.str;
};
checkUpdate = mkOption {
type = types.coercedTo types.package (package: "${package}") types.str;
default = "true";
};
owner = mkOption {
type = types.str;
default = "root";
@ -72,20 +76,27 @@ in
activeNodes = lib.unique (lib.flatten (lib.mapAttrsToList (_: secret: secret.nodes) activeSecrets));
secretNames = map (name: "${providerRoot}-${name}/") (lib.attrNames activeSecrets);
createSecret = { path, nodes, owner, mode, group, command }: ''
createSecret = { path, nodes, owner, mode, group, command, checkUpdate }: ''
if (${checkUpdate}); then
consul kv put ${lib.escapeShellArg path}/mode ${lib.escapeShellArg mode}
consul kv put ${lib.escapeShellArg path}/owner ${lib.escapeShellArg owner}
consul kv put ${lib.escapeShellArg path}/group ${lib.escapeShellArg group}
secret="$(mktemp -ut)"
(${command}) > "$secret"
${lib.concatStringsSep "\n" (map (node: ''
consul kv put ${lib.escapeShellArg path}/recipient/${node} "$( (${command}) | age --encrypt --armor -r ${lib.escapeShellArg depot.hours.${node}.ssh.id.publicKey})"
consul kv put ${lib.escapeShellArg path}/recipient/${node} "$(age < "$secret" --encrypt --armor -r ${lib.escapeShellArg depot.hours.${node}.ssh.id.publicKey})"
'') nodes)}
else
echo Skipping update for ${lib.escapeShellArg path}
fi
'';
in ''
# create/update secrets
umask 77
${lib.pipe activeSecrets [
(lib.mapAttrsToList (secretName: secretConfig: createSecret {
path = "${providerRoot}-${secretName}";
inherit (secretConfig) nodes mode owner group command;
inherit (secretConfig) nodes mode owner group command checkUpdate;
}))
(lib.concatStringsSep "\n")
]}

View file

@ -0,0 +1,91 @@
{ cluster, config, lib, pkgs, ... }:
let
inherit (cluster.config.services.patroni) secrets;
patroni = cluster.config.links.patroni-pg-access;
cfg = cluster.config.patroni;
writeQueryFile = pkgs.writeText "patroni-query.sql";
psqlRunFile = file: ''
export PGPASSWORD="$(< ${secrets.PATRONI_SUPERUSER_PASSWORD.path})"
while ! ${config.services.patroni.postgresqlPackage}/bin/psql 'host=${patroni.ipv4} port=${patroni.portStr} dbname=postgres user=postgres' --tuples-only --csv --file="${file}"; do
sleep 3
done
'';
psql = query: psqlRunFile (writeQueryFile query);
psqlSecret = getSecret: queryTemplate: let
queryTemplateFile = writeQueryFile queryTemplate;
in ''
umask 77
secretFile="$(mktemp -ut patroniSecret.XXXXXXXXXXXXXXXX)"
queryFile="$(mktemp -ut patroniQuery.XXXXXXXXXXXXXXXX)"
trap "rm -f $secretFile $queryFile" EXIT
${getSecret} > "$secretFile"
cp --no-preserve=mode ${queryTemplateFile} "$queryFile"
${pkgs.replace-secret}/bin/replace-secret '@SECRET@' "$secretFile" "$queryFile"
${psqlRunFile "$queryFile"}
'';
genPassword = pkgs.writeShellScript "patroni-generate-user-password" ''
umask 77
base64 -w0 /dev/urandom | tr -d /+ | head -c256 | tee "/run/keys/locksmith-provider-patroni-$1"
'';
in
{
services.incandescence.providers.patroni = lib.mkIf config.services.haproxy.enable {
locksmith = true;
wantedBy = [ "patroni.service" "multi-user.target" ];
partOf = [ "patroni.service" ];
wants = [ "postgresql.service" ];
after = [ "postgresql.service" ];
formulae = {
user = {
destroyAfterDays = 0;
create = user: psqlSecret "${genPassword} ${user}" ''
CREATE USER ${user} PASSWORD '@SECRET@';
'';
destroy = psqlSecret "printenv OBJECT" ''
DROP USER @SECRET@;
'';
};
database = {
destroyAfterDays = 30;
deps = [ "user" ];
create = db: psql ''
CREATE DATABASE ${db} OWNER ${cfg.databases.${db}.owner};
'';
destroy = psqlSecret "printenv OBJECT" ''
DROP DATABASE @SECRET@;
'';
};
};
};
services.locksmith.providers.patroni = lib.mkIf config.services.haproxy.enable {
secrets = lib.mapAttrs (user: userConfig: {
command = {
envFile = ''
echo "PGPASSWORD=$(cat /run/keys/locksmith-provider-patroni-${user})"
rm -f /run/keys/locksmith-provider-patroni-${user}
'';
pgpass = ''
echo "*:*:*:${user}:$(cat /run/keys/locksmith-provider-patroni-${user})"
rm -f /run/keys/locksmith-provider-patroni-${user}
'';
raw = ''
cat /run/keys/locksmith-provider-patroni-${user}
rm -f /run/keys/locksmith-provider-patroni-${user}
'';
}.${userConfig.locksmith.format};
checkUpdate = "test -e /run/keys/locksmith-provider-patroni-${user}";
inherit (userConfig.locksmith) nodes;
}) cfg.users;
};
}

View file

@ -1,6 +1,11 @@
{ config, lib, ... }:
{ config, ... }:
{
imports = [
./options.nix
./incandescence.nix
];
links = {
patroni-pg-internal.ipv4 = "0.0.0.0";
patroni-api.ipv4 = "0.0.0.0";
@ -15,6 +20,7 @@
worker = [
./worker.nix
./metrics.nix
./create-databases.nix
];
haproxy = ./haproxy.nix;
};
@ -30,5 +36,10 @@
PATRONI_REWIND_PASSWORD = default;
metricsCredentials.nodes = nodes.worker;
};
simulacrum = {
enable = true;
deps = [ "consul" "incandescence" "locksmith" ];
settings = ./test.nix;
};
};
}

View file

@ -0,0 +1,10 @@
{ config, lib, ... }:
{
incandescence.providers.patroni = {
objects = {
user = lib.attrNames config.patroni.users;
database = lib.attrNames config.patroni.databases;
};
};
}

View file

@ -0,0 +1,39 @@
{ lib, ... }:
let
inherit (lib) mkOption;
inherit (lib.types) attrsOf enum listOf submodule str;
in
{
options.patroni = {
databases = mkOption {
type = attrsOf (submodule ({ name, ... }: {
options = {
owner = mkOption {
type = str;
default = name;
};
};
}));
default = {};
};
users = mkOption {
type = attrsOf (submodule ({ ... }: {
options = {
locksmith = {
nodes = mkOption {
type = listOf str;
default = [];
};
format = mkOption {
type = enum [ "pgpass" "envFile" "raw" ];
default = "pgpass";
};
};
};
}));
default = {};
};
};
}

View file

@ -0,0 +1,155 @@
{ cluster, ... }:
let
createNode = index: { pkgs, ... }:
{
networking.firewall.allowedTCPPorts = [ 5432 8008 5010 ];
environment.systemPackages = [ pkgs.jq ];
services.patroni = {
enable = true;
softwareWatchdog = true;
settings = {
bootstrap = {
dcs = {
ttl = 30;
loop_wait = 10;
retry_timeout = 10;
maximum_lag_on_failover = 1048576;
};
initdb = [
{ encoding = "UTF8"; }
"data-checksums"
];
};
postgresql = {
use_pg_rewind = true;
use_slots = true;
authentication = {
replication = {
username = "replicator";
};
superuser = {
username = "postgres";
};
rewind = {
username = "rewind";
};
};
parameters = {
wal_level = "replica";
hot_standby_feedback = "on";
unix_socket_directories = "/tmp";
};
pg_hba = [
"host replication replicator 192.168.1.0/24 md5"
# Unsafe, do not use for anything other than tests
"host all all 0.0.0.0/0 trust"
];
};
etcd3 = {
host = "192.168.1.4:2379";
};
};
environmentFiles = {
PATRONI_REPLICATION_PASSWORD = pkgs.writeText "replication-password" "postgres";
PATRONI_SUPERUSER_PASSWORD = pkgs.writeText "superuser-password" "postgres";
PATRONI_REWIND_PASSWORD = pkgs.writeText "rewind-password" "postgres";
};
};
# We always want to restart so the tests never hang
systemd.services.patroni.serviceConfig.StartLimitIntervalSec = 0;
};
clusterName = "poseidon";
link = cluster.config.links.patroni-pg-access;
in
{
defaults = { depot, pkgs, ... }: {
environment.systemPackages = [
pkgs.jq
depot.packages.postgresql
];
services.patroni.settings.postgresql.pg_hba = [
"host all all 0.0.0.0/0 trust"
];
};
# taken from https://github.com/phfroidmont/nixpkgs/blob/patroni-module/nixos/tests/patroni.nix
testScript = ''
import json
nodeNames = json.loads('${builtins.toJSON cluster.config.services.patroni.nodes.worker}')
clientNames = json.loads('${builtins.toJSON cluster.config.services.patroni.nodes.haproxy}')
nodes = [ n for n in machines if n.name in nodeNames ]
clients = [ n for n in machines if n.name in clientNames ]
def booted(nodes):
return filter(lambda node: node.booted, nodes)
def wait_for_all_nodes_ready(expected_replicas=2):
booted_nodes = booted(nodes)
for node in booted_nodes:
node.wait_for_unit("patroni.service")
print(node.succeed("patronictl list ${clusterName}"))
node.wait_until_succeeds(f"[ $(patronictl list -f json ${clusterName} | jq 'length') == {expected_replicas + 1} ]")
node.wait_until_succeeds("[ $(patronictl list -f json ${clusterName} | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
node.wait_until_succeeds(f"[ $(patronictl list -f json ${clusterName} | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^streaming$\"))) | length') == {expected_replicas} ]")
print(node.succeed("patronictl list ${clusterName}"))
for client in booted(clients):
client.wait_until_succeeds("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --command='select 1;'")
def run_dummy_queries():
for client in booted(clients):
client.succeed("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='insert into dummy(val) values (101);'")
client.succeed("test $(psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='select val from dummy where val = 101;') -eq 101")
client.succeed("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='delete from dummy where val = 101;'")
start_all()
with subtest("should bootstrap a new patroni cluster"):
wait_for_all_nodes_ready()
with subtest("should be able to insert and select"):
booted_clients = list(booted(clients))
booted_clients[0].succeed("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --command='create table dummy as select * from generate_series(1, 100) as val;'")
for client in booted_clients:
client.succeed("test $(psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100")
with subtest("should restart after all nodes are crashed"):
for node in nodes:
node.crash()
for node in nodes:
node.start()
wait_for_all_nodes_ready()
with subtest("should be able to run queries while any one node is crashed"):
masterNodeName = nodes[0].succeed("patronictl list -f json ${clusterName} | jq '.[] | select(.Role | test(\"^Leader$\")) | .Member' -r").strip()
masterNodeIndex = next((i for i, v in enumerate(nodes) if v.name == masterNodeName))
# Move master node at the end of the list to avoid multiple failovers (makes the test faster and more consistent)
nodes.append(nodes.pop(masterNodeIndex))
for node in nodes:
node.crash()
wait_for_all_nodes_ready(1)
# Execute some queries while a node is down.
run_dummy_queries()
# Restart crashed node.
node.start()
wait_for_all_nodes_ready()
# Execute some queries with the node back up.
run_dummy_queries()
'';
}

View file

@ -25,6 +25,10 @@ in
"d '${baseDir}' 0700 patroni patroni - -"
"d '${walDir}' 0700 patroni patroni - -"
];
systemd.services.patroni = {
requires = [ "consul-ready.service" ];
after = [ "consul-ready.service" ];
};
services.patroni = {
enable = true;
name = hostName;
@ -57,6 +61,7 @@ in
};
use_pg_rewind = true;
use_slots = true;
synchronous_mode = true;
authentication = {
replication.username = "patronirep";
rewind.username = "patronirew";
@ -67,6 +72,7 @@ in
wal_level = "replica";
hot_standby_feedback = "on";
unix_socket_directories = "/tmp";
synchronous_commit = "on";
};
pg_hba = [
"host replication patronirep ${net} scram-sha-256"

View file

@ -35,11 +35,6 @@ in
inherit (self'.packages) keycloak;
};
patroni = pkgs.callPackage ./patroni.nix {
inherit (self) nixosModules;
inherit (self'.packages) postgresql;
};
s3ql-upgrade = pkgs.callPackage ./s3ql-upgrade.nix {
inherit (self'.packages) s3ql;
inherit (self) nixosModules;

View file

@ -1,211 +0,0 @@
{ nixosTest, nixosModules, postgresql }:
# taken from https://github.com/phfroidmont/nixpkgs/blob/patroni-module/nixos/tests/patroni.nix
nixosTest (
let
nodesIps = [
"192.168.1.1"
"192.168.1.2"
"192.168.1.3"
];
createNode = index: { pkgs, ... }:
let
ip = builtins.elemAt nodesIps index; # since we already use IPs to identify servers
in
{
imports = [
nixosModules.patroni
nixosModules.systemd-extras
];
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
{ address = ip; prefixLength = 16; }
];
networking.firewall.allowedTCPPorts = [ 5432 8008 5010 ];
environment.systemPackages = [ pkgs.jq ];
services.patroni = {
enable = true;
postgresqlPackage = postgresql.withPackages (p: [ p.pg_safeupdate ]);
scope = "cluster1";
name = "node${toString(index + 1)}";
nodeIp = ip;
otherNodesIps = builtins.filter (h: h != ip) nodesIps;
softwareWatchdog = true;
settings = {
bootstrap = {
dcs = {
ttl = 30;
loop_wait = 10;
retry_timeout = 10;
maximum_lag_on_failover = 1048576;
};
initdb = [
{ encoding = "UTF8"; }
"data-checksums"
];
};
postgresql = {
use_pg_rewind = true;
use_slots = true;
authentication = {
replication = {
username = "replicator";
};
superuser = {
username = "postgres";
};
rewind = {
username = "rewind";
};
};
parameters = {
listen_addresses = "${ip}";
wal_level = "replica";
hot_standby_feedback = "on";
unix_socket_directories = "/tmp";
};
pg_hba = [
"host replication replicator 192.168.1.0/24 md5"
# Unsafe, do not use for anything other than tests
"host all all 0.0.0.0/0 trust"
];
};
etcd3 = {
host = "192.168.1.4:2379";
};
};
environmentFiles = {
PATRONI_REPLICATION_PASSWORD = pkgs.writeText "replication-password" "postgres";
PATRONI_SUPERUSER_PASSWORD = pkgs.writeText "superuser-password" "postgres";
PATRONI_REWIND_PASSWORD = pkgs.writeText "rewind-password" "postgres";
};
};
# We always want to restart so the tests never hang
systemd.services.patroni.serviceConfig.StartLimitIntervalSec = 0;
};
in
{
name = "patroni";
nodes = {
node1 = createNode 0;
node2 = createNode 1;
node3 = createNode 2;
etcd = { pkgs, ... }: {
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
{ address = "192.168.1.4"; prefixLength = 16; }
];
services.etcd = {
enable = true;
listenClientUrls = [ "http://192.168.1.4:2379" ];
};
networking.firewall.allowedTCPPorts = [ 2379 ];
};
client = { pkgs, ... }: {
environment.systemPackages = [ postgresql ];
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
{ address = "192.168.2.1"; prefixLength = 16; }
];
services.haproxy = {
enable = true;
config = ''
global
maxconn 100
defaults
log global
mode tcp
retries 2
timeout client 30m
timeout connect 4s
timeout server 30m
timeout check 5s
listen cluster1
bind 127.0.0.1:5432
option httpchk
http-check expect status 200
default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
${builtins.concatStringsSep "\n" (map (ip: "server postgresql_${ip}_5432 ${ip}:5432 maxconn 100 check port 8008") nodesIps)}
'';
};
};
};
testScript = ''
nodes = [node1, node2, node3]
def wait_for_all_nodes_ready(expected_replicas=2):
booted_nodes = filter(lambda node: node.booted, nodes)
for node in booted_nodes:
print(node.succeed("patronictl list cluster1"))
node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'length') == {expected_replicas + 1} ]")
node.wait_until_succeeds("[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^streaming$\"))) | length') == {expected_replicas} ]")
print(node.succeed("patronictl list cluster1"))
client.wait_until_succeeds("psql -h 127.0.0.1 -U postgres --command='select 1;'")
def run_dummy_queries():
client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='insert into dummy(val) values (101);'")
client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select val from dummy where val = 101;') -eq 101")
client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='delete from dummy where val = 101;'")
start_all()
with subtest("should bootstrap a new patroni cluster"):
wait_for_all_nodes_ready()
with subtest("should be able to insert and select"):
client.succeed("psql -h 127.0.0.1 -U postgres --command='create table dummy as select * from generate_series(1, 100) as val;'")
client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100")
with subtest("should restart after all nodes are crashed"):
for node in nodes:
node.crash()
for node in nodes:
node.start()
wait_for_all_nodes_ready()
with subtest("should be able to run queries while any one node is crashed"):
masterNodeName = node1.succeed("patronictl list -f json cluster1 | jq '.[] | select(.Role | test(\"^Leader$\")) | .Member' -r").strip()
masterNodeIndex = int(masterNodeName[len(masterNodeName)-1]) - 1
# Move master node at the end of the list to avoid multiple failovers (makes the test faster and more consistent)
nodes.append(nodes.pop(masterNodeIndex))
for node in nodes:
node.crash()
wait_for_all_nodes_ready(1)
# Execute some queries while a node is down.
run_dummy_queries()
# Restart crashed node.
node.start()
wait_for_all_nodes_ready()
# Execute some queries with the node back up.
run_dummy_queries()
'';
})