Merge branch 'updates-20220618'
This commit is contained in:
commit
6c3b268306
32 changed files with 419 additions and 222 deletions
168
flake.lock
168
flake.lock
|
@ -45,11 +45,11 @@
|
|||
"crane": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1644785799,
|
||||
"narHash": "sha256-VpAJO1L0XeBvtCuNGK4IDKp6ENHIpTrlaZT7yfBCvwo=",
|
||||
"lastModified": 1654444508,
|
||||
"narHash": "sha256-4OBvQ4V7jyt7afs6iKUvRzJ1u/9eYnKzVQbeQdiamuY=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "fc7a94f841347c88f2cb44217b2a3faa93e2a0b2",
|
||||
"rev": "db5482bf225acc3160899124a1df5a617cfa27b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -85,6 +85,22 @@
|
|||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1654858401,
|
||||
"narHash": "sha256-53bw34DtVJ2bnF6WEwy6Tym+qY0pNEiEwARUlvmTZjs=",
|
||||
"owner": "numtide",
|
||||
"repo": "devshell",
|
||||
"rev": "f55e05c6d3bbe9acc7363bc8fc739518b2f02976",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "devshell",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"devshell_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1653917170,
|
||||
"narHash": "sha256-FyxOnEE/V4PNEcMU62ikY4FfYPo349MOhMM97HS0XEo=",
|
||||
|
@ -103,6 +119,7 @@
|
|||
"inputs": {
|
||||
"alejandra": "alejandra",
|
||||
"crane": "crane",
|
||||
"devshell": "devshell_2",
|
||||
"flake-utils-pre-commit": "flake-utils-pre-commit",
|
||||
"gomod2nix": "gomod2nix",
|
||||
"mach-nix": "mach-nix",
|
||||
|
@ -114,11 +131,11 @@
|
|||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1653944295,
|
||||
"narHash": "sha256-xoFmfL71JS/wP5SvkupqDB7SNhDFmb77dyiyniNAwYs=",
|
||||
"lastModified": 1655326915,
|
||||
"narHash": "sha256-jh8HXBycUQ6JljIqPN53Q4p4kmaYnL5ZL7fu3WHK9dk=",
|
||||
"owner": "nix-community",
|
||||
"repo": "dream2nix",
|
||||
"rev": "ca7f4d0a7fb79813b446ebce097c3db538b37b8c",
|
||||
"rev": "caa9c4b5ef1c2d6f81f2651927b01f246b3d78a9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -144,6 +161,22 @@
|
|||
}
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1650374568,
|
||||
"narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "b4a34015c698c7793d592d66adbab377907a2be8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat_3": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1603796912,
|
||||
|
@ -295,11 +328,11 @@
|
|||
"pre-commit-hooks-nix": "pre-commit-hooks-nix"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1653571057,
|
||||
"narHash": "sha256-uh5R2O2qmQVDoFnUVVJnOO4amiEFjsShA6B58qzrmBI=",
|
||||
"lastModified": 1655108975,
|
||||
"narHash": "sha256-BVE61UMEhmXTCkMLoIyuOfGjV0Z4yHNtIiC5VYe02FM=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "hercules-ci-agent",
|
||||
"rev": "3822c49d81c2ccec4cffd2d1b2897dd86290bb14",
|
||||
"rev": "2ee7b49b01068d0fbd5bec61fdcd12b525dab5d7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -313,11 +346,11 @@
|
|||
"nixpkgs": "nixpkgs_3"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1653841712,
|
||||
"narHash": "sha256-XBF4i1MuIRAEbFpj3Z3fVaYxzNEsYapyENtw3vG+q1I=",
|
||||
"lastModified": 1655158531,
|
||||
"narHash": "sha256-5LeaONqA6pgSNeA39gzu5XUipw3mXNZ04LUiy2TVImU=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "hercules-ci-effects",
|
||||
"rev": "e14d2131b7c81acca3904b584ac45fb72da64dd2",
|
||||
"rev": "bda248e06dc44cbba9f4db350abbb10c3fe3b6fd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -328,16 +361,20 @@
|
|||
},
|
||||
"home-manager": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat_2",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
],
|
||||
"nmd": "nmd",
|
||||
"nmt": "nmt",
|
||||
"utils": "utils_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1653943687,
|
||||
"narHash": "sha256-xXW9t24HLf89+n/92kOqRRfOBE3KDna+9rAOefs5WSQ=",
|
||||
"lastModified": 1655381586,
|
||||
"narHash": "sha256-2IrSYYjxoT+iOihSiH0Elo9wzjbHjDSH+qPvI5BklCs=",
|
||||
"owner": "nix-community",
|
||||
"repo": "home-manager",
|
||||
"rev": "8f3e26705178cc8c1d982d37d881fc0d5b5b1837",
|
||||
"rev": "1de492f6f8e9937c822333739c5d5b20d93bf49f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -396,7 +433,7 @@
|
|||
},
|
||||
"mms": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-compat": "flake-compat_3",
|
||||
"flake-utils": "flake-utils_3",
|
||||
"nix": "nix",
|
||||
"nixpkgs": [
|
||||
|
@ -486,11 +523,11 @@
|
|||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1653842047,
|
||||
"narHash": "sha256-rm8OIwU0+V9KMooDvj4Hdwio5MWjAn6CvdM3MU2tGhk=",
|
||||
"lastModified": 1655504882,
|
||||
"narHash": "sha256-R3pRcYsxpHuCI4Z/XeiBle6qYQWt8IriZP3vz58OpMk=",
|
||||
"ref": "refs/heads/master",
|
||||
"rev": "c6087c318fbc238269487ec3feee3d6ad762aee7",
|
||||
"revCount": 12253,
|
||||
"rev": "6281f78ce2059dbbcc98319cff773de5d71fd327",
|
||||
"revCount": 12340,
|
||||
"type": "git",
|
||||
"url": "https://git.privatevoid.net/max/nix-super-fork"
|
||||
},
|
||||
|
@ -525,9 +562,10 @@
|
|||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "indirect"
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
|
@ -579,26 +617,11 @@
|
|||
},
|
||||
"nixpkgs_5": {
|
||||
"locked": {
|
||||
"lastModified": 1645296114,
|
||||
"narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=",
|
||||
"lastModified": 1653988320,
|
||||
"narHash": "sha256-ZaqFFsSDipZ6KVqriwM34T739+KLYJvNmCWzErjAg7c=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-21.05-small",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs_6": {
|
||||
"locked": {
|
||||
"lastModified": 1653948565,
|
||||
"narHash": "sha256-jYfs8TQw/xRKOGg7NV+hVEZfYAVnqk4yEKhw111N4h4=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "7c1e79e294fe1be3cacb6408e3983bf2836c818e",
|
||||
"rev": "2fa57ed190fd6c7c746319444f34b5917666e5c1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -608,6 +631,54 @@
|
|||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_6": {
|
||||
"locked": {
|
||||
"lastModified": 1655421536,
|
||||
"narHash": "sha256-CjPYLRJj/aglDiY+755CYazTugGco0quzlTo1arVil0=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "87d9c84817d7be81850c07e8f6a362b1dfc30feb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.05-small",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nmd": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1653339422,
|
||||
"narHash": "sha256-8nc7lcYOgih3YEmRMlBwZaLLJYpLPYKBlewqHqx8ieg=",
|
||||
"owner": "rycee",
|
||||
"repo": "nmd",
|
||||
"rev": "9e7a20e6ee3f6751f699f79c0b299390f81f7bcd",
|
||||
"type": "gitlab"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rycee",
|
||||
"repo": "nmd",
|
||||
"type": "gitlab"
|
||||
}
|
||||
},
|
||||
"nmt": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1648075362,
|
||||
"narHash": "sha256-u36WgzoA84dMVsGXzml4wZ5ckGgfnvS0ryzo/3zn/Pc=",
|
||||
"owner": "rycee",
|
||||
"repo": "nmt",
|
||||
"rev": "d83601002c99b78c89ea80e5e6ba21addcfe12ae",
|
||||
"type": "gitlab"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rycee",
|
||||
"repo": "nmt",
|
||||
"type": "gitlab"
|
||||
}
|
||||
},
|
||||
"node2nix": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
|
@ -718,6 +789,21 @@
|
|||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"utils_2": {
|
||||
"locked": {
|
||||
"lastModified": 1653893745,
|
||||
"narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
|
|
@ -2,19 +2,19 @@
|
|||
let
|
||||
inherit (tools.meta) domain;
|
||||
apiAddr = "api.${domain}";
|
||||
proxyTarget = "http://127.0.0.1:${config.portsStr.api}";
|
||||
proxyTarget = config.links.api.url;
|
||||
proxy = tools.nginx.vhosts.proxy proxyTarget;
|
||||
in
|
||||
{
|
||||
# n8n uses "Sustainable Use License"
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
reservePortsFor = [ "api" ];
|
||||
links.api.protocol = "http";
|
||||
|
||||
services.n8n = {
|
||||
enable = true;
|
||||
settings = {
|
||||
port = config.ports.api;
|
||||
inherit (config.links.api) port;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
{ config, lib, tools, ... }:
|
||||
with tools.nginx;
|
||||
{
|
||||
reservePortsFor = [ "bitwarden" ];
|
||||
links.bitwarden.protocol = "http";
|
||||
|
||||
services.nginx.virtualHosts = mappers.mapSubdomains {
|
||||
keychain = vhosts.proxy "http://127.0.0.1:${config.portsStr.bitwarden}";
|
||||
keychain = vhosts.proxy config.links.bitwarden.url;
|
||||
};
|
||||
services.vaultwarden = {
|
||||
enable = true;
|
||||
backupDir = "/srv/storage/private/bitwarden/backups";
|
||||
config = {
|
||||
dataFolder = "/srv/storage/private/bitwarden/data";
|
||||
rocketPort = config.ports.bitwarden;
|
||||
rocketPort = config.links.bitwarden.port;
|
||||
};
|
||||
#environmentFile = ""; # TODO: agenix
|
||||
};
|
||||
|
|
|
@ -17,8 +17,6 @@ let
|
|||
(mapPaths config)
|
||||
);
|
||||
|
||||
port = config.portsStr.ghost;
|
||||
|
||||
contentPath = "/srv/storage/private/ghost";
|
||||
in
|
||||
|
||||
|
@ -29,7 +27,7 @@ in
|
|||
mode = "0400";
|
||||
};
|
||||
|
||||
reservePortsFor = [ "ghost" ];
|
||||
links.ghost.protocol = "http";
|
||||
|
||||
users.users.ghost = {
|
||||
isSystemUser = true;
|
||||
|
@ -99,7 +97,7 @@ in
|
|||
};
|
||||
server = {
|
||||
host = "127.0.0.1";
|
||||
inherit port;
|
||||
inherit (config.links.ghost) port;
|
||||
};
|
||||
|
||||
privacy.useTinfoil = true;
|
||||
|
@ -110,6 +108,6 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."blog.${domain}" = tools.nginx.vhosts.proxy "http://127.0.0.1:${port}";
|
||||
services.nginx.virtualHosts."blog.${domain}" = tools.nginx.vhosts.proxy config.links.ghost.url;
|
||||
|
||||
}
|
||||
|
|
|
@ -1,7 +1,21 @@
|
|||
{ config, lib, tools, ... }:
|
||||
with tools.nginx;
|
||||
{
|
||||
reservePortsFor = [ "ombi" ];
|
||||
links = {
|
||||
ombi.protocol = "http";
|
||||
radarr = {
|
||||
protocol = "http";
|
||||
port = 7878;
|
||||
};
|
||||
sonarr = {
|
||||
protocol = "http";
|
||||
port = 8989;
|
||||
};
|
||||
prowlarr = {
|
||||
protocol = "http";
|
||||
port = 9696;
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
radarr = {
|
||||
|
@ -15,14 +29,14 @@ with tools.nginx;
|
|||
};
|
||||
ombi = {
|
||||
enable = true;
|
||||
port = config.ports.ombi;
|
||||
inherit (config.links.ombi) port;
|
||||
};
|
||||
|
||||
nginx.virtualHosts = mappers.mapSubdomains {
|
||||
radarr = vhosts.proxy "http://127.0.0.1:7878";
|
||||
sonarr = vhosts.proxy "http://127.0.0.1:8989";
|
||||
fbi-index = vhosts.proxy "http://127.0.0.1:9696";
|
||||
fbi-requests = vhosts.proxy "http://127.0.0.1:${config.portsStr.ombi}";
|
||||
nginx.virtualHosts = with config.links; mappers.mapSubdomains {
|
||||
radarr = vhosts.proxy radarr.url;
|
||||
sonarr = vhosts.proxy sonarr.url;
|
||||
fbi-index = vhosts.proxy prowlarr.url;
|
||||
fbi-requests = vhosts.proxy ombi.url;
|
||||
};
|
||||
};
|
||||
systemd.services.radarr.serviceConfig.Slice = "mediamanagement.slice";
|
||||
|
|
|
@ -31,15 +31,15 @@ in
|
|||
)
|
||||
);
|
||||
|
||||
reservePortsFor = [ "hydra" ];
|
||||
links.hydra.protocol = "http";
|
||||
|
||||
services.nginx.appendHttpConfig = ''
|
||||
limit_req_zone $binary_remote_addr zone=hydra_api_push_limiter:10m rate=1r/m;
|
||||
'';
|
||||
|
||||
services.nginx.virtualHosts."hydra.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${config.portsStr.hydra}") {
|
||||
services.nginx.virtualHosts."hydra.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy config.links.hydra.url) {
|
||||
locations."/api/push" = {
|
||||
proxyPass = "http://127.0.0.1:${config.portsStr.hydra}";
|
||||
proxyPass = config.links.hydra.url;
|
||||
extraConfig = ''
|
||||
auth_request off;
|
||||
proxy_method PUT;
|
||||
|
@ -54,7 +54,7 @@ in
|
|||
services.hydra = {
|
||||
enable = true;
|
||||
hydraURL = "https://hydra.${domain}";
|
||||
port = config.ports.hydra;
|
||||
inherit (config.links.hydra) port;
|
||||
notificationSender = "hydra@${domain}";
|
||||
buildMachinesFiles = [ "/etc/nix/hydra-machines" ];
|
||||
useSubstitutes = true;
|
||||
|
|
|
@ -3,7 +3,7 @@ with tools.nginx;
|
|||
let
|
||||
inherit (tools.meta) domain;
|
||||
cfg = config.services.ipfs;
|
||||
gwPort = config.portsStr.ipfsGateway;
|
||||
gw = config.links.ipfsGateway;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
|
@ -31,7 +31,7 @@ in
|
|||
services.nginx.virtualHosts = {
|
||||
"top-level.${domain}".locations = {
|
||||
"~ ^/ip[fn]s" = {
|
||||
proxyPass = "http://127.0.0.1:${gwPort}";
|
||||
proxyPass = gw.url;
|
||||
extraConfig = ''
|
||||
add_header X-Content-Type-Options "";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
@ -43,7 +43,7 @@ in
|
|||
locations = {
|
||||
"= /".return = "404";
|
||||
"~ ^/ip[fn]s" = {
|
||||
proxyPass = "http://127.0.0.1:${gwPort}";
|
||||
proxyPass = gw.url;
|
||||
extraConfig = ''
|
||||
add_header X-Content-Type-Options "";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
@ -54,7 +54,7 @@ in
|
|||
};
|
||||
"ipfs.admin.${domain}" = vhosts.basic // {
|
||||
locations."/api".proxyPass = "http://unix:/run/ipfs/ipfs-api.sock:";
|
||||
locations."/ipns/webui.ipfs.${domain}".proxyPass = "http://127.0.0.1:${gwPort}/ipns/webui.ipfs.${domain}";
|
||||
locations."/ipns/webui.ipfs.${domain}".proxyPass = "${gw.url}/ipns/webui.ipfs.${domain}";
|
||||
locations."= /".return = "302 /ipns/webui.ipfs.${domain}";
|
||||
locations."/debug/metrics/prometheus" = {
|
||||
proxyPass = "http://unix:/run/ipfs/ipfs-api.sock:";
|
||||
|
@ -85,7 +85,7 @@ in
|
|||
useACMEHost = "ipfs.${domain}";
|
||||
locations = {
|
||||
"/" = {
|
||||
proxyPass = "http://127.0.0.1:${gwPort}";
|
||||
proxyPass = gw.url;
|
||||
extraConfig = ''
|
||||
add_header X-Content-Type-Options "";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
reservePortsFor = [ "mc-num" ];
|
||||
links.mc-num = {};
|
||||
services.modded-minecraft-servers.instances.num = {
|
||||
enable = true;
|
||||
rsyncSSHKeys = [
|
||||
|
@ -10,7 +10,7 @@
|
|||
jvmInitialAllocation = "2G";
|
||||
jvmMaxAllocation = "8G";
|
||||
serverConfig = {
|
||||
server-port = config.ports.mc-num;
|
||||
server-port = config.links.mc-num.port;
|
||||
motd = "Welcome to num's minecraft server";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
let
|
||||
inherit (tools.meta) domain;
|
||||
|
||||
inherit (config) ports portsStr;
|
||||
inherit (config) links;
|
||||
|
||||
cfg = { inherit (config.services) loki; };
|
||||
|
||||
|
@ -34,10 +34,21 @@ in
|
|||
file = ../../../../secrets/grafana-secrets.age;
|
||||
};
|
||||
|
||||
reservePortsFor = [ "grafana" "prometheus" "loki" "loki-grpc" ];
|
||||
links = {
|
||||
grafana.protocol = "http";
|
||||
prometheus.protocol = "http";
|
||||
loki = {
|
||||
protocol = "http";
|
||||
ipv4 = myNode.hypr.addr;
|
||||
};
|
||||
loki-grpc = {
|
||||
protocol = "grpc";
|
||||
ipv4 = myNode.hypr.addr;
|
||||
};
|
||||
};
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
port = ports.grafana;
|
||||
inherit (links.grafana) port;
|
||||
rootUrl = "https://monitoring.${domain}/";
|
||||
dataDir = "/srv/storage/private/grafana";
|
||||
analytics.reporting.enable = false;
|
||||
|
@ -63,13 +74,13 @@ in
|
|||
datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
url = "http://127.0.0.1:${portsStr.prometheus}";
|
||||
inherit (links.prometheus) url;
|
||||
type = "prometheus";
|
||||
isDefault = true;
|
||||
}
|
||||
{
|
||||
name = "Loki";
|
||||
url = "http://${myNode.hypr.addr}:${portsStr.loki}";
|
||||
inherit (links.loki) url;
|
||||
type = "loki";
|
||||
}
|
||||
];
|
||||
|
@ -80,14 +91,14 @@ in
|
|||
EnvironmentFile = config.age.secrets.grafana-secrets.path;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."monitoring.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${portsStr.grafana}") {
|
||||
services.nginx.virtualHosts."monitoring.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy links.grafana.url) {
|
||||
locations."/".proxyWebsockets = true;
|
||||
};
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
port = ports.prometheus;
|
||||
listenAddress = links.prometheus.ipv4;
|
||||
inherit (links.prometheus) port;
|
||||
globalConfig = {
|
||||
scrape_interval = "60s";
|
||||
};
|
||||
|
@ -130,12 +141,12 @@ in
|
|||
auth_enabled = false;
|
||||
server = {
|
||||
log_level = "warn";
|
||||
http_listen_address = myNode.hypr.addr;
|
||||
http_listen_port = ports.loki;
|
||||
grpc_listen_address = "127.0.0.1";
|
||||
grpc_listen_port = ports.loki-grpc;
|
||||
http_listen_address = links.loki.ipv4;
|
||||
http_listen_port = links.loki.port;
|
||||
grpc_listen_address = links.loki-grpc.ipv4;
|
||||
grpc_listen_port = links.loki-grpc.port;
|
||||
};
|
||||
frontend_worker.frontend_address = "127.0.0.1:${portsStr.loki-grpc}";
|
||||
frontend_worker.frontend_address = links.loki-grpc.tuple;
|
||||
ingester = {
|
||||
lifecycler = {
|
||||
address = "127.0.0.1";
|
||||
|
|
|
@ -10,8 +10,8 @@ in
|
|||
{
|
||||
services.nginx.upstreams.nar-serve.extraConfig = ''
|
||||
random;
|
||||
server 127.0.0.1:${config.portsStr.nar-serve-self} fail_timeout=0;
|
||||
server 127.0.0.1:${config.portsStr.nar-serve-nixos-org} fail_timeout=0;
|
||||
server ${config.links.nar-serve-self.tuple} fail_timeout=0;
|
||||
server ${config.links.nar-serve-nixos-org.tuple} fail_timeout=0;
|
||||
'';
|
||||
services.nginx.appendHttpConfig = ''
|
||||
proxy_cache_path /var/cache/nginx/nixstore levels=1:2 keys_zone=nixstore:10m max_size=10g inactive=24h use_temp_path=off;
|
||||
|
|
|
@ -12,11 +12,11 @@
|
|||
};
|
||||
in
|
||||
{
|
||||
reservePortsFor = [
|
||||
"nar-serve-self"
|
||||
"nar-serve-nixos-org"
|
||||
];
|
||||
links = {
|
||||
nar-serve-self.protocol = "http";
|
||||
nar-serve-nixos-org.protocol = "http";
|
||||
};
|
||||
|
||||
systemd.services.nar-serve-self = mkNarServe "https://cache.${tools.meta.domain}" config.portsStr.nar-serve-self;
|
||||
systemd.services.nar-serve-nixos-org = mkNarServe "https://cache.nixos.org" config.portsStr.nar-serve-nixos-org;
|
||||
systemd.services.nar-serve-self = mkNarServe "https://cache.${tools.meta.domain}" config.links.nar-serve-self.portStr;
|
||||
systemd.services.nar-serve-nixos-org = mkNarServe "https://cache.nixos.org" config.links.nar-serve-nixos-org.portStr;
|
||||
}
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
{ config, inputs, lib, pkgs, tools, ... }:
|
||||
with tools.nginx;
|
||||
let
|
||||
minioPort = config.portsStr.minio;
|
||||
consolePort = config.portsStr.minioConsole;
|
||||
inherit (config) links;
|
||||
|
||||
mapPaths = lib.mapAttrsRecursive (
|
||||
path: value: lib.nameValuePair
|
||||
|
@ -17,7 +16,10 @@ let
|
|||
);
|
||||
in
|
||||
{
|
||||
reservePortsFor = [ "minio" "minioConsole" ];
|
||||
links = {
|
||||
minio.protocol = "http";
|
||||
minioConsole.protocol = "http";
|
||||
};
|
||||
|
||||
age.secrets.minio-root-credentials = {
|
||||
file = ../../../../secrets/minio-root-credentials.age;
|
||||
|
@ -30,8 +32,8 @@ in
|
|||
rootCredentialsFile = config.age.secrets.minio-root-credentials.path;
|
||||
dataDir = [ "/srv/storage/objects" ];
|
||||
browser = true;
|
||||
listenAddress = "127.0.0.1:${minioPort}";
|
||||
consoleAddress = "127.0.0.1:${consolePort}";
|
||||
listenAddress = links.minio.tuple;
|
||||
consoleAddress = links.minioConsole.tuple;
|
||||
};
|
||||
systemd.services.minio.serviceConfig = {
|
||||
Slice = "remotefshost.slice";
|
||||
|
@ -40,17 +42,17 @@ in
|
|||
# TODO: vhosts.proxy?
|
||||
"object-storage" = vhosts.basic // {
|
||||
locations = {
|
||||
"/".proxyPass = "http://127.0.0.1:${minioPort}";
|
||||
"= /dashboard".proxyPass = "http://127.0.0.1:${minioPort}";
|
||||
"/".proxyPass = links.minio.url;
|
||||
"= /dashboard".proxyPass = links.minio.url;
|
||||
};
|
||||
extraConfig = "client_max_body_size 4G;";
|
||||
};
|
||||
"console.object-storage" = vhosts.basic // {
|
||||
locations = {
|
||||
"/".proxyPass = "http://127.0.0.1:${consolePort}";
|
||||
"/".proxyPass = links.minioConsole.url;
|
||||
};
|
||||
};
|
||||
"cdn" = lib.recursiveUpdate (vhosts.proxy "http://127.0.0.1:${minioPort}/content-delivery$request_uri") {
|
||||
"cdn" = lib.recursiveUpdate (vhosts.proxy "${links.minio.url}/content-delivery$request_uri") {
|
||||
locations."= /".return = "302 /index.html";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{ config, inputs, lib, pkgs, tools, ... }:
|
||||
let
|
||||
port = config.portsStr.searxng;
|
||||
inherit (config) links;
|
||||
in
|
||||
{
|
||||
reservePortsFor = [ "searxng" ];
|
||||
links.searxng.protocol = "http";
|
||||
|
||||
age.secrets.searxng-secrets.file = ../../../../secrets/searxng-secrets.age;
|
||||
services.searx = {
|
||||
|
@ -45,14 +45,14 @@ in
|
|||
};
|
||||
};
|
||||
uwsgiConfig = {
|
||||
http = "127.0.0.1:${port}";
|
||||
http = links.searxng.tuple;
|
||||
cache2 = "name=searxcache,items=2000,blocks=2000,blocksize=65536,bitmap=1";
|
||||
buffer-size = 65536;
|
||||
env = ["SEARXNG_SETTINGS_PATH=/run/searx/settings.yml"];
|
||||
disable-logging = true;
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."search.${tools.meta.domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${port}") {
|
||||
services.nginx.virtualHosts."search.${tools.meta.domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy links.searxng.url) {
|
||||
extraConfig = "access_log off;";
|
||||
};
|
||||
systemd.services.uwsgi.after = [ "wireguard-wgmv-es7.service" "network-addresses-wgmv-es7.service" ];
|
||||
|
|
|
@ -3,6 +3,7 @@ let
|
|||
host = tools.identity.autoDomain "sips";
|
||||
|
||||
inherit (inputs.self.packages.${pkgs.system}) sips;
|
||||
inherit (config) links;
|
||||
|
||||
connStringNet = "host=127.0.0.1 sslmode=disable dbname=sips user=sips";
|
||||
connString = "host=/var/run/postgresql dbname=sips user=sips";
|
||||
|
@ -23,14 +24,18 @@ in
|
|||
mode = "0400";
|
||||
};
|
||||
|
||||
reservePortsFor = [ "sips" "sipsInternal" "sipsIpfsApiProxy" ];
|
||||
links = {
|
||||
sips.protocol = "http";
|
||||
sipsInternal.protocol = "http";
|
||||
sipsIpfsApiProxy.protocol = "http";
|
||||
};
|
||||
|
||||
systemd.services.sips = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "postgresql.service" ];
|
||||
requires = [ "sips-ipfs-api-proxy.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${sips}/bin/sips --dbdriver postgres --db \"${connString}\" --addr 127.0.0.1:${config.portsStr.sipsInternal} --api http://127.0.0.1:${config.portsStr.sipsIpfsApiProxy} --apitimeout 604800s";
|
||||
ExecStart = "${sips}/bin/sips --dbdriver postgres --db \"${connString}\" --addr ${links.sipsInternal.tuple} --api ${links.sipsIpfsApiProxy.url} --apitimeout 604800s";
|
||||
PrivateNetwork = true;
|
||||
DynamicUser = true;
|
||||
};
|
||||
|
@ -41,7 +46,7 @@ in
|
|||
after = [ "network.target" "sips.service" ];
|
||||
bindsTo = [ "sips.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.socat}/bin/socat tcp4-listen:${config.portsStr.sipsIpfsApiProxy},fork,reuseaddr,bind=127.0.0.1 unix-connect:/run/ipfs/ipfs-api.sock";
|
||||
ExecStart = "${pkgs.socat}/bin/socat tcp4-listen:${links.sipsIpfsApiProxy.portStr},fork,reuseaddr,bind=${links.sipsIpfsApiProxy.ipv4} unix-connect:/run/ipfs/ipfs-api.sock";
|
||||
PrivateNetwork = true;
|
||||
DynamicUser = true;
|
||||
SupplementaryGroups = "ipfs";
|
||||
|
@ -54,7 +59,7 @@ in
|
|||
bindsTo = [ "sips.service" ];
|
||||
requires = [ "sips-proxy.socket" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${config.systemd.package}/lib/systemd/systemd-socket-proxyd 127.0.0.1:${config.portsStr.sipsInternal}";
|
||||
ExecStart = "${config.systemd.package}/lib/systemd/systemd-socket-proxyd ${links.sipsInternal.tuple}";
|
||||
PrivateNetwork = true;
|
||||
DynamicUser = true;
|
||||
SupplementaryGroups = "ipfs";
|
||||
|
@ -66,11 +71,11 @@ in
|
|||
wantedBy = [ "sockets.target" ];
|
||||
after = [ "network.target" ];
|
||||
socketConfig = {
|
||||
ListenStream = "127.0.0.1:${config.portsStr.sips}";
|
||||
ListenStream = "${links.sips.tuple}";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ sipsctl ];
|
||||
|
||||
services.nginx.virtualHosts.${host} = tools.nginx.vhosts.proxy "http://127.0.0.1:${config.portsStr.sips}";
|
||||
services.nginx.virtualHosts.${host} = tools.nginx.vhosts.proxy links.sips.url;
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ with tools.nginx;
|
|||
let
|
||||
login = "login.${tools.meta.domain}";
|
||||
cfg = config.services.keycloak;
|
||||
kc = config.links.keycloak;
|
||||
in
|
||||
{
|
||||
tested.requiredChecks = [ "keycloak" ];
|
||||
reservePortsFor = [ "keycloak" ];
|
||||
links.keycloak.protocol = "http";
|
||||
|
||||
imports = [
|
||||
./identity-management.nix
|
||||
|
@ -18,7 +19,7 @@ in
|
|||
mode = "0400";
|
||||
};
|
||||
services.nginx.virtualHosts = {
|
||||
"${login}" = lib.recursiveUpdate (vhosts.proxy "http://${cfg.settings.http-host}:${config.portsStr.keycloak}") {
|
||||
"${login}" = lib.recursiveUpdate (vhosts.proxy kc.url) {
|
||||
locations."= /".return = "302 /auth/realms/master/account/";
|
||||
};
|
||||
"account.${domain}" = vhosts.redirect "https://${login}/auth/realms/master/account/";
|
||||
|
@ -31,8 +32,8 @@ in
|
|||
passwordFile = config.age.secrets.keycloak-dbpass.path;
|
||||
};
|
||||
settings = {
|
||||
http-host = "127.0.0.1";
|
||||
http-port = config.ports.keycloak;
|
||||
http-host = kc.ipv4;
|
||||
http-port = kc.port;
|
||||
hostname = login;
|
||||
proxy = "edge";
|
||||
# for backcompat, TODO: remove
|
||||
|
|
|
@ -5,13 +5,13 @@ let
|
|||
|
||||
flakePkgs = inputs.self.packages.${pkgs.system};
|
||||
|
||||
port = config.portsStr.uptime-kuma;
|
||||
link = config.links.uptime-kuma;
|
||||
|
||||
dataDir = "/srv/storage/private/uptime-kuma";
|
||||
in
|
||||
|
||||
{
|
||||
reservePortsFor = [ "uptime-kuma" ];
|
||||
links.uptime-kuma.protocol = "http";
|
||||
|
||||
users.users.uptime-kuma = {
|
||||
isSystemUser = true;
|
||||
|
@ -62,8 +62,8 @@ in
|
|||
environment = {
|
||||
NODE_ENV = "production";
|
||||
DATA_DIR = dataDir;
|
||||
UPTIME_KUMA_HOST = "127.0.0.1";
|
||||
UPTIME_KUMA_PORT = port;
|
||||
UPTIME_KUMA_HOST = link.ipv4;
|
||||
UPTIME_KUMA_PORT = link.portStr;
|
||||
UPTIME_KUMA_HIDE_LOG = lib.concatStringsSep "," [
|
||||
"debug_monitor"
|
||||
"info_monitor"
|
||||
|
@ -71,7 +71,7 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."status.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${port}") {
|
||||
services.nginx.virtualHosts."status.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy link.url) {
|
||||
locations = {
|
||||
"/".proxyWebsockets = true;
|
||||
"=/".return = "302 /status/${builtins.replaceStrings ["."] ["-"] domain}";
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
{ config, inputs, pkgs, tools, ... }:
|
||||
|
||||
let
|
||||
port = config.portsStr.nixIpfs;
|
||||
in {
|
||||
reservePortsFor = [ "nixIpfs" ];
|
||||
{
|
||||
links.nixIpfs.protocol = "http";
|
||||
|
||||
systemd.services.nix-ipfs-cache = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
@ -14,7 +12,7 @@ in {
|
|||
CacheDirectory = "nix-ipfs-cache";
|
||||
};
|
||||
environment = {
|
||||
REFLEX_PORT = port;
|
||||
REFLEX_PORT = config.links.nixIpfs.portStr;
|
||||
IPFS_API = config.services.ipfs.apiAddress;
|
||||
NIX_CACHES = toString [
|
||||
"https://cache.nixos.org"
|
||||
|
@ -24,5 +22,5 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."reflex.${tools.meta.domain}" = tools.nginx.vhosts.proxy "http://127.0.0.1:${port}";
|
||||
services.nginx.virtualHosts."reflex.${tools.meta.domain}" = tools.nginx.vhosts.proxy config.links.nixIpfs.url;
|
||||
}
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
patched = import ../../packages/patched-derivations.nix super;
|
||||
in {
|
||||
|
||||
ipfs = patched.lain-ipfs;
|
||||
|
||||
hydra-unstable = patched.hydra;
|
||||
|
||||
inherit (patched) sssd;
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
{ config, lib, pkgs, tools, ... }:
|
||||
{ config, inputs, lib, pkgs, tools, ... }:
|
||||
let
|
||||
inherit (tools.meta) domain;
|
||||
cfg = config.services.ipfs;
|
||||
apiAddress = "/unix/run/ipfs/ipfs-api.sock";
|
||||
ipfsApi = pkgs.writeTextDir "api" apiAddress;
|
||||
gwPort = config.portsStr.ipfsGateway;
|
||||
gw = config.links.ipfsGateway;
|
||||
in
|
||||
{
|
||||
age.secrets.ipfs-swarm-key = {
|
||||
|
@ -14,7 +14,7 @@ in
|
|||
inherit (cfg) group;
|
||||
};
|
||||
|
||||
reservePortsFor = [ "ipfsGateway" ];
|
||||
links.ipfsGateway.protocol = "http";
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 4001 ];
|
||||
|
@ -23,12 +23,13 @@ in
|
|||
|
||||
services.ipfs = {
|
||||
enable = true;
|
||||
package = inputs.self.packages.${pkgs.system}.ipfs;
|
||||
startWhenNeeded = false;
|
||||
autoMount = true;
|
||||
autoMigrate = false;
|
||||
|
||||
inherit apiAddress;
|
||||
gatewayAddress = "/ip4/127.0.0.1/tcp/${gwPort}";
|
||||
gatewayAddress = "/ip4/${gw.ipv4}/tcp/${gw.portStr}";
|
||||
dataDir = "/srv/storage/ipfs/repo";
|
||||
localDiscovery = false;
|
||||
|
||||
|
|
|
@ -4,16 +4,17 @@ let
|
|||
|
||||
writeJSON = filename: data: pkgs.writeText filename (builtins.toJSON data);
|
||||
|
||||
inherit (config) ports portsStr;
|
||||
|
||||
relabel = from: to: {
|
||||
source_labels = [ from ];
|
||||
target_label = to;
|
||||
};
|
||||
in
|
||||
{
|
||||
# same as remote loki port
|
||||
reservePortsFor = [ "loki" ];
|
||||
# remote loki
|
||||
links.loki = {
|
||||
protocol = "http";
|
||||
ipv4 = hosts.VEGAS.hypr.addr;
|
||||
};
|
||||
|
||||
services.journald.extraConfig = "Storage=volatile";
|
||||
|
||||
|
@ -41,7 +42,7 @@ in
|
|||
server.disable = true;
|
||||
positions.filename = "\${STATE_DIRECTORY:/tmp}/promtail-positions.yaml";
|
||||
clients = [
|
||||
{ url = "http://${hosts.VEGAS.hypr.addr}:${portsStr.loki}/loki/api/v1/push"; }
|
||||
{ url = "${config.links.loki.url}/loki/api/v1/push"; }
|
||||
];
|
||||
scrape_configs = [
|
||||
{
|
||||
|
|
|
@ -5,7 +5,7 @@ let
|
|||
else x;
|
||||
in {
|
||||
nix = {
|
||||
package = inputs.nix-super.defaultPackage.${pkgs.system};
|
||||
package = inputs.nix-super.packages.${pkgs.system}.default;
|
||||
|
||||
trustedUsers = [ "root" "@wheel" "@admins" ];
|
||||
|
||||
|
|
|
@ -1,59 +1,11 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with builtins;
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.reservedPorts;
|
||||
|
||||
portNames = config.reservePortsFor;
|
||||
|
||||
portHash = flip pipe [
|
||||
(hashString "sha512")
|
||||
stringToCharacters
|
||||
(filter (n: match "[0-9]" n == []))
|
||||
(map toInt)
|
||||
(foldl add 0)
|
||||
(mul 1009) # prime number
|
||||
(flip mod cfg.amount)
|
||||
(add cfg.start)
|
||||
];
|
||||
|
||||
ports = genAttrs portNames portHash;
|
||||
|
||||
portsEnd = cfg.start + cfg.amount;
|
||||
in {
|
||||
options = {
|
||||
reservedPorts = {
|
||||
amount = mkOption {
|
||||
type = types.int;
|
||||
default = 10000;
|
||||
description = "Amount of ports to reserve at most.";
|
||||
};
|
||||
start = mkOption {
|
||||
type = types.int;
|
||||
default = 30000;
|
||||
description = "Starting point for reserved ports.";
|
||||
};
|
||||
};
|
||||
reservePortsFor = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = "List of application names for which to automatically reserve ports.";
|
||||
};
|
||||
ports = mkOption {
|
||||
type = types.attrsOf (types.ints.between cfg.start portsEnd);
|
||||
default = {};
|
||||
description = "Named network ports.";
|
||||
};
|
||||
portsStr = mkOption {
|
||||
readOnly = true;
|
||||
type = types.attrsOf types.str;
|
||||
description = "Named network ports, as strings.";
|
||||
};
|
||||
};
|
||||
config = lib.mkIf (config.reservePortsFor != []) {
|
||||
inherit ports;
|
||||
portsStr = mapAttrs (_: toString) ports;
|
||||
{
|
||||
options.links = mkOption {
|
||||
type = types.attrsOf (types.submodule ./link.nix);
|
||||
description = "Port Magic links.";
|
||||
default = {};
|
||||
};
|
||||
}
|
||||
|
|
76
modules/port-magic/link.nix
Normal file
76
modules/port-magic/link.nix
Normal file
|
@ -0,0 +1,76 @@
|
|||
{ config, lib, name, ... }:
|
||||
|
||||
with builtins;
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config;
|
||||
|
||||
portHash = flip pipe [
|
||||
(hashString "md5")
|
||||
(substring 0 7)
|
||||
(hash: (fromTOML "v=0x${hash}").v)
|
||||
(flip mod cfg.reservedPorts.amount)
|
||||
(add cfg.reservedPorts.start)
|
||||
];
|
||||
in
|
||||
|
||||
{
|
||||
options = {
|
||||
ipv4 = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = "The IPv4 address.";
|
||||
};
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = "The hostname.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
description = "The TCP or UDP port.";
|
||||
};
|
||||
portStr = mkOption {
|
||||
type = types.str;
|
||||
description = "The TCP or UDP port, as a string.";
|
||||
};
|
||||
reservedPorts = {
|
||||
amount = mkOption {
|
||||
type = types.int;
|
||||
default = 10000;
|
||||
description = "Amount of ports to reserve at most.";
|
||||
};
|
||||
start = mkOption {
|
||||
type = types.int;
|
||||
default = 30000;
|
||||
description = "Starting point for reserved ports.";
|
||||
};
|
||||
};
|
||||
|
||||
protocol = mkOption {
|
||||
type = types.str;
|
||||
description = "The protocol in URL scheme name format.";
|
||||
};
|
||||
path = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
description = "The resource path.";
|
||||
};
|
||||
url = mkOption {
|
||||
type = types.str;
|
||||
description = "The URL.";
|
||||
};
|
||||
tuple = mkOption {
|
||||
type = types.str;
|
||||
description = "The hostname:port tuple.";
|
||||
};
|
||||
};
|
||||
config = mkIf true {
|
||||
hostname = mkDefault cfg.ipv4;
|
||||
port = mkDefault (portHash "${cfg.hostname}:${name}");
|
||||
portStr = toString cfg.port;
|
||||
tuple = "${cfg.hostname}:${cfg.portStr}";
|
||||
url = "${cfg.protocol}://${cfg.hostname}:${cfg.portStr}${if cfg.path == null then "" else cfg.path}";
|
||||
};
|
||||
}
|
|
@ -10,24 +10,9 @@
|
|||
let
|
||||
versionGate = pkg: target:
|
||||
assert
|
||||
lib.assertMsg (lib.versionAtLeast target.version pkg.version)
|
||||
lib.assertMsg (lib.versionOlder pkg.version target.version)
|
||||
"${pkg.name} has reached the desired version upstream";
|
||||
target;
|
||||
|
||||
vips_8_12_2' = pkgs.vips.overrideAttrs (_: {
|
||||
version = "8.12.2";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "libvips";
|
||||
repo = "libvips";
|
||||
rev = "v8.12.2";
|
||||
sha256 = "sha256-ffDJJWe/SzG+lppXEiyfXXL5KLdZgnMjv1SYnuYnh4c=";
|
||||
postFetch = ''
|
||||
rm -r $out/test/test-suite/images/
|
||||
'';
|
||||
};
|
||||
});
|
||||
|
||||
vips_8_12_2 = versionGate pkgs.vips vips_8_12_2';
|
||||
in
|
||||
|
||||
{
|
||||
|
@ -36,7 +21,7 @@ in
|
|||
pkg-config
|
||||
];
|
||||
buildInputs = old: old ++ [
|
||||
vips_8_12_2
|
||||
vips
|
||||
];
|
||||
};
|
||||
ghost.build = {
|
||||
|
|
69
packages/networking/ipfs/default.nix
Normal file
69
packages/networking/ipfs/default.nix
Normal file
|
@ -0,0 +1,69 @@
|
|||
{ lib, buildGoModule, fetchurl, nixosTests, openssl, pkg-config }:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "ipfs";
|
||||
version = "0.13.0"; # When updating, also check if the repo version changed and adjust repoVersion below
|
||||
rev = "v${version}";
|
||||
|
||||
repoVersion = "12"; # Also update ipfs-migrator when changing the repo version
|
||||
|
||||
# go-ipfs makes changes to it's source tarball that don't match the git source.
|
||||
src = fetchurl {
|
||||
url = "https://github.com/ipfs/go-ipfs/releases/download/${rev}/go-ipfs-source.tar.gz";
|
||||
sha256 = "sha256-eEIHsmtD3vF48RVFHEz28gkVv7u50pMBE8Z+oaM6pLM=";
|
||||
};
|
||||
|
||||
# tarball contains multiple files/directories
|
||||
postUnpack = ''
|
||||
mkdir ipfs-src
|
||||
shopt -s extglob
|
||||
mv !(ipfs-src) ipfs-src || true
|
||||
cd ipfs-src
|
||||
'';
|
||||
|
||||
sourceRoot = ".";
|
||||
|
||||
subPackages = [ "cmd/ipfs" ];
|
||||
|
||||
buildInputs = [ openssl ];
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
tags = [ "openssl" ];
|
||||
|
||||
passthru.tests.ipfs = nixosTests.ipfs;
|
||||
|
||||
vendorSha256 = null;
|
||||
|
||||
outputs = [ "out" "systemd_unit" "systemd_unit_hardened" ];
|
||||
|
||||
patches = [
|
||||
./ipfs-allow-publish-with-ipns-mounted.patch
|
||||
./ipfs-fuse-nuke-getxattr.patch
|
||||
./ipfs-unsafe-allow-all-paths-for-filestore.patch
|
||||
./lain-webui-0.13.0.patch
|
||||
];
|
||||
|
||||
postPatch = ''
|
||||
substituteInPlace 'misc/systemd/ipfs.service' \
|
||||
--replace '/usr/bin/ipfs' "$out/bin/ipfs"
|
||||
substituteInPlace 'misc/systemd/ipfs-hardened.service' \
|
||||
--replace '/usr/bin/ipfs' "$out/bin/ipfs"
|
||||
'';
|
||||
|
||||
postInstall = ''
|
||||
install --mode=444 -D 'misc/systemd/ipfs-api.socket' "$systemd_unit/etc/systemd/system/ipfs-api.socket"
|
||||
install --mode=444 -D 'misc/systemd/ipfs-gateway.socket' "$systemd_unit/etc/systemd/system/ipfs-gateway.socket"
|
||||
install --mode=444 -D 'misc/systemd/ipfs.service' "$systemd_unit/etc/systemd/system/ipfs.service"
|
||||
|
||||
install --mode=444 -D 'misc/systemd/ipfs-api.socket' "$systemd_unit_hardened/etc/systemd/system/ipfs-api.socket"
|
||||
install --mode=444 -D 'misc/systemd/ipfs-gateway.socket' "$systemd_unit_hardened/etc/systemd/system/ipfs-gateway.socket"
|
||||
install --mode=444 -D 'misc/systemd/ipfs-hardened.service' "$systemd_unit_hardened/etc/systemd/system/ipfs.service"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "A global, versioned, peer-to-peer filesystem";
|
||||
homepage = "https://ipfs.io/";
|
||||
license = licenses.mit;
|
||||
platforms = platforms.unix;
|
||||
maintainers = with maintainers; [ fpletz ];
|
||||
};
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
package corehttp
|
||||
|
||||
// TODO: move to IPNS
|
||||
-const WebUIPath = "/ipfs/bafybeihcyruaeza7uyjd6ugicbcrqumejf6uf353e5etdkhotqffwtguva" // v2.13.0
|
||||
-const WebUIPath = "/ipfs/bafybeiednzu62vskme5wpoj4bjjikeg3xovfpp4t7vxk5ty2jxdi4mv4bu" // v2.15.0
|
||||
+const WebUIPath = "/ipns/webui.ipfs.privatevoid.net"
|
||||
|
||||
// this is a list of all past webUI paths.
|
|
@ -3,8 +3,6 @@ in with tools;
|
|||
super: rec {
|
||||
hydra = (patch super.hydra-unstable "patches/base/hydra").override { nix = super.nixVersions.nix_2_8; };
|
||||
|
||||
lain-ipfs = patch-rename (super.ipfs_latest or super.ipfs) "lain-ipfs" "patches/base/ipfs";
|
||||
|
||||
sssd = (super.sssd.override { withSudo = true; }).overrideAttrs (old: {
|
||||
postFixup = (old.postFixup or "") + ''
|
||||
${super.removeReferencesTo}/bin/remove-references-to -t ${super.stdenv.cc.cc} $out/modules/ldb/memberof.so
|
||||
|
|
|
@ -6,7 +6,7 @@ let
|
|||
pins = import ./sources;
|
||||
|
||||
dream2nix = inputs.dream2nix.lib2.init {
|
||||
systems = [ system ];
|
||||
inherit pkgs;
|
||||
config = {
|
||||
projectRoot = ./.;
|
||||
overridesDirs = [ ./dream2nix-overrides ];
|
||||
|
@ -23,27 +23,29 @@ in
|
|||
packages = rec {
|
||||
ghost = let
|
||||
version = "4.41.3";
|
||||
dream = dream2nix.makeFlakeOutputs {
|
||||
dream = dream2nix.makeOutputs {
|
||||
source = pkgs.fetchzip {
|
||||
url = "https://github.com/TryGhost/Ghost/releases/download/v${version}/Ghost-${version}.zip";
|
||||
sha256 = "sha256-mqN43LSkd9MHoIHyGS1VsPvpqWqX4Bx5KHcp3KOHw5A=";
|
||||
stripRoot = false;
|
||||
};
|
||||
};
|
||||
inherit (dream.packages.${system}) ghost;
|
||||
inherit (dream.packages) ghost;
|
||||
in
|
||||
ghost;
|
||||
|
||||
uptime-kuma = let
|
||||
dream = dream2nix.makeFlakeOutputs {
|
||||
dream = dream2nix.makeOutputs {
|
||||
source = pins.uptime-kuma;
|
||||
};
|
||||
inherit (dream.packages.${system}) uptime-kuma;
|
||||
inherit (dream.packages) uptime-kuma;
|
||||
in
|
||||
uptime-kuma;
|
||||
|
||||
hyprspace = pkgs.callPackage ./networking/hyprspace { iproute2mac = null; };
|
||||
|
||||
ipfs = pkgs.callPackage ./networking/ipfs { };
|
||||
|
||||
npins = let
|
||||
inherit (inputs.self.packages.${system}) nix-super;
|
||||
in pkgs.callPackage ./tools/npins {
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
"repo": "searxng"
|
||||
},
|
||||
"branch": "master",
|
||||
"revision": "51ba817e06bb15ca1768010d6873d1d7bf48b0b6",
|
||||
"url": "https://github.com/searxng/searxng/archive/51ba817e06bb15ca1768010d6873d1d7bf48b0b6.tar.gz",
|
||||
"hash": "0g2gy6pjd7w6dg1n4f3ci7k3qkwwnkd0kcww88rk12v2rlsrijm3"
|
||||
"revision": "8177bf3f0a4d4f22cf63812dc86a80535cd15d68",
|
||||
"url": "https://github.com/searxng/searxng/archive/8177bf3f0a4d4f22cf63812dc86a80535cd15d68.tar.gz",
|
||||
"hash": "0hbz28sldk56id721r6c0fc9bahs99i90wygsddiba6n7kcn49zm"
|
||||
},
|
||||
"stevenblack": {
|
||||
"type": "GitRelease",
|
||||
|
@ -21,10 +21,10 @@
|
|||
},
|
||||
"pre_releases": false,
|
||||
"version_upper_bound": null,
|
||||
"version": "3.10.3",
|
||||
"revision": "bbddccb8fe47ccc02edcf4866099daf921e3eb64",
|
||||
"url": "https://api.github.com/repos/StevenBlack/hosts/tarball/3.10.3",
|
||||
"hash": "0sy624j4q1xb1wn3s9wjfii6yrimwpm4j5qy4z9hjjbcmr39xb2g"
|
||||
"version": "3.10.9",
|
||||
"revision": "1ed8b1f54ba3ce2f6f8a1c329fc5726aa04d24c5",
|
||||
"url": "https://api.github.com/repos/StevenBlack/hosts/tarball/3.10.9",
|
||||
"hash": "1xskv09qrxly22vj32s6g8jnx4ijqy5pqmvwri8rx6w1vsklbg47"
|
||||
},
|
||||
"uptime-kuma": {
|
||||
"type": "GitRelease",
|
||||
|
|
Loading…
Reference in a new issue