Compare commits
No commits in common. "43e1e0d1c3920ce1a1135ed52833a92fc4f7a68e" and "6ab5ca5abfba3e369d28f4fff666ff1fdeef4707" have entirely different histories.
43e1e0d1c3
...
6ab5ca5abf
8 changed files with 219 additions and 171 deletions
|
@ -39,9 +39,6 @@ in
|
||||||
fi
|
fi
|
||||||
''))
|
''))
|
||||||
(concatStringsSep "\n")
|
(concatStringsSep "\n")
|
||||||
(script: if script == "" then ''
|
|
||||||
echo "Nothing to create"
|
|
||||||
'' else script)
|
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
"ignite-${provider}-${formula}-change" = mkIf (formulaConfig.change != null) {
|
"ignite-${provider}-${formula}-change" = mkIf (formulaConfig.change != null) {
|
||||||
|
@ -61,9 +58,6 @@ in
|
||||||
) || echo "Change failed: ${object}"
|
) || echo "Change failed: ${object}"
|
||||||
''))
|
''))
|
||||||
(concatStringsSep "\n")
|
(concatStringsSep "\n")
|
||||||
(script: if script == "" then ''
|
|
||||||
echo "Nothing to change"
|
|
||||||
'' else script)
|
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
"ignite-${provider}-${formula}-destroy" = {
|
"ignite-${provider}-${formula}-destroy" = {
|
||||||
|
|
|
@ -14,10 +14,6 @@
|
||||||
./provider.nix
|
./provider.nix
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
simulacrum = {
|
simulacrum.deps = [ "chant" "consul" ];
|
||||||
enable = true;
|
|
||||||
deps = [ "chant" "consul" ];
|
|
||||||
settings = ./test.nix;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
testScript = '''';
|
|
||||||
}
|
|
|
@ -36,10 +36,6 @@
|
||||||
PATRONI_REWIND_PASSWORD = default;
|
PATRONI_REWIND_PASSWORD = default;
|
||||||
metricsCredentials.nodes = nodes.worker;
|
metricsCredentials.nodes = nodes.worker;
|
||||||
};
|
};
|
||||||
simulacrum = {
|
simulacrum.deps = [ "consul" "incandescence" "locksmith" ];
|
||||||
enable = true;
|
|
||||||
deps = [ "consul" "incandescence" "locksmith" ];
|
|
||||||
settings = ./test.nix;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,150 +0,0 @@
|
||||||
{ cluster, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
createNode = index: { pkgs, ... }:
|
|
||||||
{
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ 5432 8008 5010 ];
|
|
||||||
|
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
|
||||||
|
|
||||||
services.patroni = {
|
|
||||||
|
|
||||||
enable = true;
|
|
||||||
|
|
||||||
softwareWatchdog = true;
|
|
||||||
|
|
||||||
settings = {
|
|
||||||
bootstrap = {
|
|
||||||
dcs = {
|
|
||||||
ttl = 30;
|
|
||||||
loop_wait = 10;
|
|
||||||
retry_timeout = 10;
|
|
||||||
maximum_lag_on_failover = 1048576;
|
|
||||||
};
|
|
||||||
initdb = [
|
|
||||||
{ encoding = "UTF8"; }
|
|
||||||
"data-checksums"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
postgresql = {
|
|
||||||
use_pg_rewind = true;
|
|
||||||
use_slots = true;
|
|
||||||
authentication = {
|
|
||||||
replication = {
|
|
||||||
username = "replicator";
|
|
||||||
};
|
|
||||||
superuser = {
|
|
||||||
username = "postgres";
|
|
||||||
};
|
|
||||||
rewind = {
|
|
||||||
username = "rewind";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
parameters = {
|
|
||||||
wal_level = "replica";
|
|
||||||
hot_standby_feedback = "on";
|
|
||||||
unix_socket_directories = "/tmp";
|
|
||||||
};
|
|
||||||
pg_hba = [
|
|
||||||
"host replication replicator 192.168.1.0/24 md5"
|
|
||||||
# Unsafe, do not use for anything other than tests
|
|
||||||
"host all all 0.0.0.0/0 trust"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
etcd3 = {
|
|
||||||
host = "192.168.1.4:2379";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
environmentFiles = {
|
|
||||||
PATRONI_REPLICATION_PASSWORD = pkgs.writeText "replication-password" "postgres";
|
|
||||||
PATRONI_SUPERUSER_PASSWORD = pkgs.writeText "superuser-password" "postgres";
|
|
||||||
PATRONI_REWIND_PASSWORD = pkgs.writeText "rewind-password" "postgres";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# We always want to restart so the tests never hang
|
|
||||||
systemd.services.patroni.serviceConfig.StartLimitIntervalSec = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
clusterName = "poseidon";
|
|
||||||
link = cluster.config.links.patroni-pg-access;
|
|
||||||
in
|
|
||||||
{
|
|
||||||
defaults = { depot, pkgs, ... }: {
|
|
||||||
environment.systemPackages = [
|
|
||||||
pkgs.jq
|
|
||||||
depot.packages.postgresql
|
|
||||||
];
|
|
||||||
services.patroni.settings.postgresql.pg_hba = [
|
|
||||||
"host all all 0.0.0.0/0 trust"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# taken from https://github.com/phfroidmont/nixpkgs/blob/patroni-module/nixos/tests/patroni.nix
|
|
||||||
testScript = ''
|
|
||||||
import json
|
|
||||||
nodeNames = json.loads('${builtins.toJSON cluster.config.services.patroni.nodes.worker}')
|
|
||||||
clientNames = json.loads('${builtins.toJSON cluster.config.services.patroni.nodes.haproxy}')
|
|
||||||
nodes = [ n for n in machines if n.name in nodeNames ]
|
|
||||||
clients = [ n for n in machines if n.name in clientNames ]
|
|
||||||
|
|
||||||
def wait_for_all_nodes_ready(expected_replicas=2):
|
|
||||||
booted_nodes = filter(lambda node: node.booted, nodes)
|
|
||||||
for node in booted_nodes:
|
|
||||||
print(node.succeed("patronictl list ${clusterName}"))
|
|
||||||
node.wait_until_succeeds(f"[ $(patronictl list -f json ${clusterName} | jq 'length') == {expected_replicas + 1} ]")
|
|
||||||
node.wait_until_succeeds("[ $(patronictl list -f json ${clusterName} | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
|
|
||||||
node.wait_until_succeeds(f"[ $(patronictl list -f json ${clusterName} | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^streaming$\"))) | length') == {expected_replicas} ]")
|
|
||||||
print(node.succeed("patronictl list ${clusterName}"))
|
|
||||||
for client in clients:
|
|
||||||
client.wait_until_succeeds("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --command='select 1;'")
|
|
||||||
|
|
||||||
def run_dummy_queries():
|
|
||||||
for client in clients:
|
|
||||||
client.succeed("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='insert into dummy(val) values (101);'")
|
|
||||||
client.succeed("test $(psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='select val from dummy where val = 101;') -eq 101")
|
|
||||||
client.succeed("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='delete from dummy where val = 101;'")
|
|
||||||
|
|
||||||
start_all()
|
|
||||||
|
|
||||||
with subtest("should bootstrap a new patroni cluster"):
|
|
||||||
wait_for_all_nodes_ready()
|
|
||||||
|
|
||||||
with subtest("should be able to insert and select"):
|
|
||||||
clients[0].succeed("psql -h ${link.ipv4} -p ${link.portStr} -U postgres --command='create table dummy as select * from generate_series(1, 100) as val;'")
|
|
||||||
for client in clients:
|
|
||||||
client.succeed("test $(psql -h ${link.ipv4} -p ${link.portStr} -U postgres --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100")
|
|
||||||
|
|
||||||
with subtest("should restart after all nodes are crashed"):
|
|
||||||
for node in nodes:
|
|
||||||
node.crash()
|
|
||||||
for node in nodes:
|
|
||||||
node.start()
|
|
||||||
wait_for_all_nodes_ready()
|
|
||||||
|
|
||||||
with subtest("should be able to run queries while any one node is crashed"):
|
|
||||||
masterNodeName = nodes[0].succeed("patronictl list -f json ${clusterName} | jq '.[] | select(.Role | test(\"^Leader$\")) | .Member' -r").strip()
|
|
||||||
masterNodeIndex = next((i for i, v in enumerate(nodes) if v.name == masterNodeName), None)
|
|
||||||
|
|
||||||
# Move master node at the end of the list to avoid multiple failovers (makes the test faster and more consistent)
|
|
||||||
nodes.append(nodes.pop(masterNodeIndex))
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
node.crash()
|
|
||||||
wait_for_all_nodes_ready(1)
|
|
||||||
|
|
||||||
# Execute some queries while a node is down.
|
|
||||||
run_dummy_queries()
|
|
||||||
|
|
||||||
# Restart crashed node.
|
|
||||||
node.start()
|
|
||||||
wait_for_all_nodes_ready()
|
|
||||||
|
|
||||||
# Execute some queries with the node back up.
|
|
||||||
run_dummy_queries()
|
|
||||||
'';
|
|
||||||
}
|
|
|
@ -22,8 +22,7 @@ in
|
||||||
config.systemd.packages = pipe config.systemd.services [
|
config.systemd.packages = pipe config.systemd.services [
|
||||||
(filterAttrs (_: v: v.distributed.enable))
|
(filterAttrs (_: v: v.distributed.enable))
|
||||||
(mapAttrsToList (n: v: let
|
(mapAttrsToList (n: v: let
|
||||||
# inherit (v.serviceConfig) ExecStart;
|
inherit (v.serviceConfig) ExecStart;
|
||||||
ExecStart = builtins.trace "for service ${n}" v.serviceConfig.ExecStart;
|
|
||||||
|
|
||||||
cfg = v.distributed;
|
cfg = v.distributed;
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,11 @@ in
|
||||||
inherit (self'.packages) keycloak;
|
inherit (self'.packages) keycloak;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
patroni = pkgs.callPackage ./patroni.nix {
|
||||||
|
inherit (self) nixosModules;
|
||||||
|
inherit (self'.packages) postgresql;
|
||||||
|
};
|
||||||
|
|
||||||
s3ql-upgrade = pkgs.callPackage ./s3ql-upgrade.nix {
|
s3ql-upgrade = pkgs.callPackage ./s3ql-upgrade.nix {
|
||||||
inherit (self'.packages) s3ql;
|
inherit (self'.packages) s3ql;
|
||||||
inherit (self) nixosModules;
|
inherit (self) nixosModules;
|
||||||
|
|
211
packages/checks/patroni.nix
Normal file
211
packages/checks/patroni.nix
Normal file
|
@ -0,0 +1,211 @@
|
||||||
|
{ nixosTest, nixosModules, postgresql }:
|
||||||
|
|
||||||
|
# taken from https://github.com/phfroidmont/nixpkgs/blob/patroni-module/nixos/tests/patroni.nix
|
||||||
|
nixosTest (
|
||||||
|
let
|
||||||
|
nodesIps = [
|
||||||
|
"192.168.1.1"
|
||||||
|
"192.168.1.2"
|
||||||
|
"192.168.1.3"
|
||||||
|
];
|
||||||
|
|
||||||
|
createNode = index: { pkgs, ... }:
|
||||||
|
let
|
||||||
|
ip = builtins.elemAt nodesIps index; # since we already use IPs to identify servers
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
nixosModules.patroni
|
||||||
|
nixosModules.systemd-extras
|
||||||
|
];
|
||||||
|
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||||
|
{ address = ip; prefixLength = 16; }
|
||||||
|
];
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ 5432 8008 5010 ];
|
||||||
|
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
|
services.patroni = {
|
||||||
|
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
postgresqlPackage = postgresql.withPackages (p: [ p.pg_safeupdate ]);
|
||||||
|
|
||||||
|
scope = "cluster1";
|
||||||
|
name = "node${toString(index + 1)}";
|
||||||
|
nodeIp = ip;
|
||||||
|
otherNodesIps = builtins.filter (h: h != ip) nodesIps;
|
||||||
|
softwareWatchdog = true;
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
bootstrap = {
|
||||||
|
dcs = {
|
||||||
|
ttl = 30;
|
||||||
|
loop_wait = 10;
|
||||||
|
retry_timeout = 10;
|
||||||
|
maximum_lag_on_failover = 1048576;
|
||||||
|
};
|
||||||
|
initdb = [
|
||||||
|
{ encoding = "UTF8"; }
|
||||||
|
"data-checksums"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
postgresql = {
|
||||||
|
use_pg_rewind = true;
|
||||||
|
use_slots = true;
|
||||||
|
authentication = {
|
||||||
|
replication = {
|
||||||
|
username = "replicator";
|
||||||
|
};
|
||||||
|
superuser = {
|
||||||
|
username = "postgres";
|
||||||
|
};
|
||||||
|
rewind = {
|
||||||
|
username = "rewind";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
parameters = {
|
||||||
|
listen_addresses = "${ip}";
|
||||||
|
wal_level = "replica";
|
||||||
|
hot_standby_feedback = "on";
|
||||||
|
unix_socket_directories = "/tmp";
|
||||||
|
};
|
||||||
|
pg_hba = [
|
||||||
|
"host replication replicator 192.168.1.0/24 md5"
|
||||||
|
# Unsafe, do not use for anything other than tests
|
||||||
|
"host all all 0.0.0.0/0 trust"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
etcd3 = {
|
||||||
|
host = "192.168.1.4:2379";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
environmentFiles = {
|
||||||
|
PATRONI_REPLICATION_PASSWORD = pkgs.writeText "replication-password" "postgres";
|
||||||
|
PATRONI_SUPERUSER_PASSWORD = pkgs.writeText "superuser-password" "postgres";
|
||||||
|
PATRONI_REWIND_PASSWORD = pkgs.writeText "rewind-password" "postgres";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# We always want to restart so the tests never hang
|
||||||
|
systemd.services.patroni.serviceConfig.StartLimitIntervalSec = 0;
|
||||||
|
};
|
||||||
|
in
|
||||||
|
{
|
||||||
|
name = "patroni";
|
||||||
|
|
||||||
|
nodes = {
|
||||||
|
node1 = createNode 0;
|
||||||
|
node2 = createNode 1;
|
||||||
|
node3 = createNode 2;
|
||||||
|
|
||||||
|
etcd = { pkgs, ... }: {
|
||||||
|
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||||
|
{ address = "192.168.1.4"; prefixLength = 16; }
|
||||||
|
];
|
||||||
|
|
||||||
|
services.etcd = {
|
||||||
|
enable = true;
|
||||||
|
listenClientUrls = [ "http://192.168.1.4:2379" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall.allowedTCPPorts = [ 2379 ];
|
||||||
|
};
|
||||||
|
|
||||||
|
client = { pkgs, ... }: {
|
||||||
|
environment.systemPackages = [ postgresql ];
|
||||||
|
|
||||||
|
networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
|
||||||
|
{ address = "192.168.2.1"; prefixLength = 16; }
|
||||||
|
];
|
||||||
|
|
||||||
|
services.haproxy = {
|
||||||
|
enable = true;
|
||||||
|
config = ''
|
||||||
|
global
|
||||||
|
maxconn 100
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode tcp
|
||||||
|
retries 2
|
||||||
|
timeout client 30m
|
||||||
|
timeout connect 4s
|
||||||
|
timeout server 30m
|
||||||
|
timeout check 5s
|
||||||
|
|
||||||
|
listen cluster1
|
||||||
|
bind 127.0.0.1:5432
|
||||||
|
option httpchk
|
||||||
|
http-check expect status 200
|
||||||
|
default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
|
||||||
|
${builtins.concatStringsSep "\n" (map (ip: "server postgresql_${ip}_5432 ${ip}:5432 maxconn 100 check port 8008") nodesIps)}
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
testScript = ''
|
||||||
|
nodes = [node1, node2, node3]
|
||||||
|
|
||||||
|
def wait_for_all_nodes_ready(expected_replicas=2):
|
||||||
|
booted_nodes = filter(lambda node: node.booted, nodes)
|
||||||
|
for node in booted_nodes:
|
||||||
|
print(node.succeed("patronictl list cluster1"))
|
||||||
|
node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'length') == {expected_replicas + 1} ]")
|
||||||
|
node.wait_until_succeeds("[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
|
||||||
|
node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^streaming$\"))) | length') == {expected_replicas} ]")
|
||||||
|
print(node.succeed("patronictl list cluster1"))
|
||||||
|
client.wait_until_succeeds("psql -h 127.0.0.1 -U postgres --command='select 1;'")
|
||||||
|
|
||||||
|
def run_dummy_queries():
|
||||||
|
client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='insert into dummy(val) values (101);'")
|
||||||
|
client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select val from dummy where val = 101;') -eq 101")
|
||||||
|
client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='delete from dummy where val = 101;'")
|
||||||
|
|
||||||
|
start_all()
|
||||||
|
|
||||||
|
with subtest("should bootstrap a new patroni cluster"):
|
||||||
|
wait_for_all_nodes_ready()
|
||||||
|
|
||||||
|
with subtest("should be able to insert and select"):
|
||||||
|
client.succeed("psql -h 127.0.0.1 -U postgres --command='create table dummy as select * from generate_series(1, 100) as val;'")
|
||||||
|
client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100")
|
||||||
|
|
||||||
|
with subtest("should restart after all nodes are crashed"):
|
||||||
|
for node in nodes:
|
||||||
|
node.crash()
|
||||||
|
for node in nodes:
|
||||||
|
node.start()
|
||||||
|
wait_for_all_nodes_ready()
|
||||||
|
|
||||||
|
with subtest("should be able to run queries while any one node is crashed"):
|
||||||
|
masterNodeName = node1.succeed("patronictl list -f json cluster1 | jq '.[] | select(.Role | test(\"^Leader$\")) | .Member' -r").strip()
|
||||||
|
masterNodeIndex = int(masterNodeName[len(masterNodeName)-1]) - 1
|
||||||
|
|
||||||
|
# Move master node at the end of the list to avoid multiple failovers (makes the test faster and more consistent)
|
||||||
|
nodes.append(nodes.pop(masterNodeIndex))
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
node.crash()
|
||||||
|
wait_for_all_nodes_ready(1)
|
||||||
|
|
||||||
|
# Execute some queries while a node is down.
|
||||||
|
run_dummy_queries()
|
||||||
|
|
||||||
|
# Restart crashed node.
|
||||||
|
node.start()
|
||||||
|
wait_for_all_nodes_ready()
|
||||||
|
|
||||||
|
# Execute some queries with the node back up.
|
||||||
|
run_dummy_queries()
|
||||||
|
'';
|
||||||
|
})
|
Loading…
Reference in a new issue