treewide: implement port-magic v2
This commit is contained in:
parent
76b5dddf81
commit
59d5f9d884
18 changed files with 122 additions and 92 deletions
|
@ -2,19 +2,19 @@
|
|||
let
|
||||
inherit (tools.meta) domain;
|
||||
apiAddr = "api.${domain}";
|
||||
proxyTarget = "http://127.0.0.1:${config.portsStr.api}";
|
||||
proxyTarget = config.links.api.url;
|
||||
proxy = tools.nginx.vhosts.proxy proxyTarget;
|
||||
in
|
||||
{
|
||||
# n8n uses "Sustainable Use License"
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
|
||||
reservePortsFor = [ "api" ];
|
||||
links.api.protocol = "http";
|
||||
|
||||
services.n8n = {
|
||||
enable = true;
|
||||
settings = {
|
||||
port = config.ports.api;
|
||||
inherit (config.links.api) port;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
{ config, lib, tools, ... }:
|
||||
with tools.nginx;
|
||||
{
|
||||
reservePortsFor = [ "bitwarden" ];
|
||||
links.bitwarden.protocol = "http";
|
||||
|
||||
services.nginx.virtualHosts = mappers.mapSubdomains {
|
||||
keychain = vhosts.proxy "http://127.0.0.1:${config.portsStr.bitwarden}";
|
||||
keychain = vhosts.proxy config.links.bitwarden.url;
|
||||
};
|
||||
services.vaultwarden = {
|
||||
enable = true;
|
||||
backupDir = "/srv/storage/private/bitwarden/backups";
|
||||
config = {
|
||||
dataFolder = "/srv/storage/private/bitwarden/data";
|
||||
rocketPort = config.ports.bitwarden;
|
||||
rocketPort = config.links.bitwarden.port;
|
||||
};
|
||||
#environmentFile = ""; # TODO: agenix
|
||||
};
|
||||
|
|
|
@ -17,8 +17,6 @@ let
|
|||
(mapPaths config)
|
||||
);
|
||||
|
||||
port = config.portsStr.ghost;
|
||||
|
||||
contentPath = "/srv/storage/private/ghost";
|
||||
in
|
||||
|
||||
|
@ -29,7 +27,7 @@ in
|
|||
mode = "0400";
|
||||
};
|
||||
|
||||
reservePortsFor = [ "ghost" ];
|
||||
links.ghost.protocol = "http";
|
||||
|
||||
users.users.ghost = {
|
||||
isSystemUser = true;
|
||||
|
@ -99,7 +97,7 @@ in
|
|||
};
|
||||
server = {
|
||||
host = "127.0.0.1";
|
||||
inherit port;
|
||||
inherit (config.links.ghost) port;
|
||||
};
|
||||
|
||||
privacy.useTinfoil = true;
|
||||
|
@ -110,6 +108,6 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."blog.${domain}" = tools.nginx.vhosts.proxy "http://127.0.0.1:${port}";
|
||||
services.nginx.virtualHosts."blog.${domain}" = tools.nginx.vhosts.proxy config.links.ghost.url;
|
||||
|
||||
}
|
||||
|
|
|
@ -1,7 +1,21 @@
|
|||
{ config, lib, tools, ... }:
|
||||
with tools.nginx;
|
||||
{
|
||||
reservePortsFor = [ "ombi" ];
|
||||
links = {
|
||||
ombi.protocol = "http";
|
||||
radarr = {
|
||||
protocol = "http";
|
||||
port = 7878;
|
||||
};
|
||||
sonarr = {
|
||||
protocol = "http";
|
||||
port = 8989;
|
||||
};
|
||||
prowlarr = {
|
||||
protocol = "http";
|
||||
port = 9696;
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
radarr = {
|
||||
|
@ -15,14 +29,14 @@ with tools.nginx;
|
|||
};
|
||||
ombi = {
|
||||
enable = true;
|
||||
port = config.ports.ombi;
|
||||
inherit (config.links.ombi) port;
|
||||
};
|
||||
|
||||
nginx.virtualHosts = mappers.mapSubdomains {
|
||||
radarr = vhosts.proxy "http://127.0.0.1:7878";
|
||||
sonarr = vhosts.proxy "http://127.0.0.1:8989";
|
||||
fbi-index = vhosts.proxy "http://127.0.0.1:9696";
|
||||
fbi-requests = vhosts.proxy "http://127.0.0.1:${config.portsStr.ombi}";
|
||||
nginx.virtualHosts = with config.links; mappers.mapSubdomains {
|
||||
radarr = vhosts.proxy radarr.url;
|
||||
sonarr = vhosts.proxy sonarr.url;
|
||||
fbi-index = vhosts.proxy prowlarr.url;
|
||||
fbi-requests = vhosts.proxy ombi.url;
|
||||
};
|
||||
};
|
||||
systemd.services.radarr.serviceConfig.Slice = "mediamanagement.slice";
|
||||
|
|
|
@ -31,15 +31,15 @@ in
|
|||
)
|
||||
);
|
||||
|
||||
reservePortsFor = [ "hydra" ];
|
||||
links.hydra.protocol = "http";
|
||||
|
||||
services.nginx.appendHttpConfig = ''
|
||||
limit_req_zone $binary_remote_addr zone=hydra_api_push_limiter:10m rate=1r/m;
|
||||
'';
|
||||
|
||||
services.nginx.virtualHosts."hydra.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${config.portsStr.hydra}") {
|
||||
services.nginx.virtualHosts."hydra.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy config.links.hydra.url) {
|
||||
locations."/api/push" = {
|
||||
proxyPass = "http://127.0.0.1:${config.portsStr.hydra}";
|
||||
proxyPass = config.links.hydra.url;
|
||||
extraConfig = ''
|
||||
auth_request off;
|
||||
proxy_method PUT;
|
||||
|
@ -54,7 +54,7 @@ in
|
|||
services.hydra = {
|
||||
enable = true;
|
||||
hydraURL = "https://hydra.${domain}";
|
||||
port = config.ports.hydra;
|
||||
inherit (config.links.hydra) port;
|
||||
notificationSender = "hydra@${domain}";
|
||||
buildMachinesFiles = [ "/etc/nix/hydra-machines" ];
|
||||
useSubstitutes = true;
|
||||
|
|
|
@ -3,7 +3,7 @@ with tools.nginx;
|
|||
let
|
||||
inherit (tools.meta) domain;
|
||||
cfg = config.services.ipfs;
|
||||
gwPort = config.portsStr.ipfsGateway;
|
||||
gw = config.links.ipfsGateway;
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
|
@ -31,7 +31,7 @@ in
|
|||
services.nginx.virtualHosts = {
|
||||
"top-level.${domain}".locations = {
|
||||
"~ ^/ip[fn]s" = {
|
||||
proxyPass = "http://127.0.0.1:${gwPort}";
|
||||
proxyPass = gw.url;
|
||||
extraConfig = ''
|
||||
add_header X-Content-Type-Options "";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
@ -43,7 +43,7 @@ in
|
|||
locations = {
|
||||
"= /".return = "404";
|
||||
"~ ^/ip[fn]s" = {
|
||||
proxyPass = "http://127.0.0.1:${gwPort}";
|
||||
proxyPass = gw.url;
|
||||
extraConfig = ''
|
||||
add_header X-Content-Type-Options "";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
@ -54,7 +54,7 @@ in
|
|||
};
|
||||
"ipfs.admin.${domain}" = vhosts.basic // {
|
||||
locations."/api".proxyPass = "http://unix:/run/ipfs/ipfs-api.sock:";
|
||||
locations."/ipns/webui.ipfs.${domain}".proxyPass = "http://127.0.0.1:${gwPort}/ipns/webui.ipfs.${domain}";
|
||||
locations."/ipns/webui.ipfs.${domain}".proxyPass = "${gw.url}/ipns/webui.ipfs.${domain}";
|
||||
locations."= /".return = "302 /ipns/webui.ipfs.${domain}";
|
||||
locations."/debug/metrics/prometheus" = {
|
||||
proxyPass = "http://unix:/run/ipfs/ipfs-api.sock:";
|
||||
|
@ -85,7 +85,7 @@ in
|
|||
useACMEHost = "ipfs.${domain}";
|
||||
locations = {
|
||||
"/" = {
|
||||
proxyPass = "http://127.0.0.1:${gwPort}";
|
||||
proxyPass = gw.url;
|
||||
extraConfig = ''
|
||||
add_header X-Content-Type-Options "";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
reservePortsFor = [ "mc-num" ];
|
||||
links.mc-num = {};
|
||||
services.modded-minecraft-servers.instances.num = {
|
||||
enable = true;
|
||||
rsyncSSHKeys = [
|
||||
|
@ -10,7 +10,7 @@
|
|||
jvmInitialAllocation = "2G";
|
||||
jvmMaxAllocation = "8G";
|
||||
serverConfig = {
|
||||
server-port = config.ports.mc-num;
|
||||
server-port = config.links.mc-num.port;
|
||||
motd = "Welcome to num's minecraft server";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
let
|
||||
inherit (tools.meta) domain;
|
||||
|
||||
inherit (config) ports portsStr;
|
||||
inherit (config) links;
|
||||
|
||||
cfg = { inherit (config.services) loki; };
|
||||
|
||||
|
@ -34,10 +34,21 @@ in
|
|||
file = ../../../../secrets/grafana-secrets.age;
|
||||
};
|
||||
|
||||
reservePortsFor = [ "grafana" "prometheus" "loki" "loki-grpc" ];
|
||||
links = {
|
||||
grafana.protocol = "http";
|
||||
prometheus.protocol = "http";
|
||||
loki = {
|
||||
protocol = "http";
|
||||
ipv4 = myNode.hypr.addr;
|
||||
};
|
||||
loki-grpc = {
|
||||
protocol = "grpc";
|
||||
ipv4 = myNode.hypr.addr;
|
||||
};
|
||||
};
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
port = ports.grafana;
|
||||
inherit (links.grafana) port;
|
||||
rootUrl = "https://monitoring.${domain}/";
|
||||
dataDir = "/srv/storage/private/grafana";
|
||||
analytics.reporting.enable = false;
|
||||
|
@ -63,13 +74,13 @@ in
|
|||
datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
url = "http://127.0.0.1:${portsStr.prometheus}";
|
||||
inherit (links.prometheus) url;
|
||||
type = "prometheus";
|
||||
isDefault = true;
|
||||
}
|
||||
{
|
||||
name = "Loki";
|
||||
url = "http://${myNode.hypr.addr}:${portsStr.loki}";
|
||||
inherit (links.loki) url;
|
||||
type = "loki";
|
||||
}
|
||||
];
|
||||
|
@ -80,14 +91,14 @@ in
|
|||
EnvironmentFile = config.age.secrets.grafana-secrets.path;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."monitoring.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${portsStr.grafana}") {
|
||||
services.nginx.virtualHosts."monitoring.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy links.grafana.url) {
|
||||
locations."/".proxyWebsockets = true;
|
||||
};
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
port = ports.prometheus;
|
||||
listenAddress = links.prometheus.ipv4;
|
||||
inherit (links.prometheus) port;
|
||||
globalConfig = {
|
||||
scrape_interval = "60s";
|
||||
};
|
||||
|
@ -129,12 +140,12 @@ in
|
|||
auth_enabled = false;
|
||||
server = {
|
||||
log_level = "warn";
|
||||
http_listen_address = myNode.hypr.addr;
|
||||
http_listen_port = ports.loki;
|
||||
grpc_listen_address = "127.0.0.1";
|
||||
grpc_listen_port = ports.loki-grpc;
|
||||
http_listen_address = links.loki.ipv4;
|
||||
http_listen_port = links.loki.port;
|
||||
grpc_listen_address = links.loki-grpc.ipv4;
|
||||
grpc_listen_port = links.loki-grpc.port;
|
||||
};
|
||||
frontend_worker.frontend_address = "127.0.0.1:${portsStr.loki-grpc}";
|
||||
frontend_worker.frontend_address = links.loki-grpc.tuple;
|
||||
ingester = {
|
||||
lifecycler = {
|
||||
address = "127.0.0.1";
|
||||
|
|
|
@ -10,8 +10,8 @@ in
|
|||
{
|
||||
services.nginx.upstreams.nar-serve.extraConfig = ''
|
||||
random;
|
||||
server 127.0.0.1:${config.portsStr.nar-serve-self} fail_timeout=0;
|
||||
server 127.0.0.1:${config.portsStr.nar-serve-nixos-org} fail_timeout=0;
|
||||
server ${config.links.nar-serve-self.tuple} fail_timeout=0;
|
||||
server ${config.links.nar-serve-nixos-org.tuple} fail_timeout=0;
|
||||
'';
|
||||
services.nginx.appendHttpConfig = ''
|
||||
proxy_cache_path /var/cache/nginx/nixstore levels=1:2 keys_zone=nixstore:10m max_size=10g inactive=24h use_temp_path=off;
|
||||
|
|
|
@ -12,11 +12,11 @@
|
|||
};
|
||||
in
|
||||
{
|
||||
reservePortsFor = [
|
||||
"nar-serve-self"
|
||||
"nar-serve-nixos-org"
|
||||
];
|
||||
links = {
|
||||
nar-serve-self.protocol = "http";
|
||||
nar-serve-nixos-org.protocol = "http";
|
||||
};
|
||||
|
||||
systemd.services.nar-serve-self = mkNarServe "https://cache.${tools.meta.domain}" config.portsStr.nar-serve-self;
|
||||
systemd.services.nar-serve-nixos-org = mkNarServe "https://cache.nixos.org" config.portsStr.nar-serve-nixos-org;
|
||||
systemd.services.nar-serve-self = mkNarServe "https://cache.${tools.meta.domain}" config.links.nar-serve-self.portStr;
|
||||
systemd.services.nar-serve-nixos-org = mkNarServe "https://cache.nixos.org" config.links.nar-serve-nixos-org.portStr;
|
||||
}
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
{ config, inputs, lib, pkgs, tools, ... }:
|
||||
with tools.nginx;
|
||||
let
|
||||
minioPort = config.portsStr.minio;
|
||||
consolePort = config.portsStr.minioConsole;
|
||||
inherit (config) links;
|
||||
|
||||
mapPaths = lib.mapAttrsRecursive (
|
||||
path: value: lib.nameValuePair
|
||||
|
@ -17,7 +16,10 @@ let
|
|||
);
|
||||
in
|
||||
{
|
||||
reservePortsFor = [ "minio" "minioConsole" ];
|
||||
links = {
|
||||
minio.protocol = "http";
|
||||
minioConsole.protocol = "http";
|
||||
};
|
||||
|
||||
age.secrets.minio-root-credentials = {
|
||||
file = ../../../../secrets/minio-root-credentials.age;
|
||||
|
@ -30,8 +32,8 @@ in
|
|||
rootCredentialsFile = config.age.secrets.minio-root-credentials.path;
|
||||
dataDir = [ "/srv/storage/objects" ];
|
||||
browser = true;
|
||||
listenAddress = "127.0.0.1:${minioPort}";
|
||||
consoleAddress = "127.0.0.1:${consolePort}";
|
||||
listenAddress = links.minio.tuple;
|
||||
consoleAddress = links.minioConsole.tuple;
|
||||
};
|
||||
systemd.services.minio.serviceConfig = {
|
||||
Slice = "remotefshost.slice";
|
||||
|
@ -40,17 +42,17 @@ in
|
|||
# TODO: vhosts.proxy?
|
||||
"object-storage" = vhosts.basic // {
|
||||
locations = {
|
||||
"/".proxyPass = "http://127.0.0.1:${minioPort}";
|
||||
"= /dashboard".proxyPass = "http://127.0.0.1:${minioPort}";
|
||||
"/".proxyPass = links.minio.url;
|
||||
"= /dashboard".proxyPass = links.minio.url;
|
||||
};
|
||||
extraConfig = "client_max_body_size 4G;";
|
||||
};
|
||||
"console.object-storage" = vhosts.basic // {
|
||||
locations = {
|
||||
"/".proxyPass = "http://127.0.0.1:${consolePort}";
|
||||
"/".proxyPass = links.minioConsole.url;
|
||||
};
|
||||
};
|
||||
"cdn" = lib.recursiveUpdate (vhosts.proxy "http://127.0.0.1:${minioPort}/content-delivery$request_uri") {
|
||||
"cdn" = lib.recursiveUpdate (vhosts.proxy "${links.minio.url}/content-delivery$request_uri") {
|
||||
locations."= /".return = "302 /index.html";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{ config, inputs, lib, pkgs, tools, ... }:
|
||||
let
|
||||
port = config.portsStr.searxng;
|
||||
inherit (config) links;
|
||||
in
|
||||
{
|
||||
reservePortsFor = [ "searxng" ];
|
||||
links.searxng.protocol = "http";
|
||||
|
||||
age.secrets.searxng-secrets.file = ../../../../secrets/searxng-secrets.age;
|
||||
services.searx = {
|
||||
|
@ -45,14 +45,14 @@ in
|
|||
};
|
||||
};
|
||||
uwsgiConfig = {
|
||||
http = "127.0.0.1:${port}";
|
||||
http = links.searxng.tuple;
|
||||
cache2 = "name=searxcache,items=2000,blocks=2000,blocksize=65536,bitmap=1";
|
||||
buffer-size = 65536;
|
||||
env = ["SEARXNG_SETTINGS_PATH=/run/searx/settings.yml"];
|
||||
disable-logging = true;
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."search.${tools.meta.domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${port}") {
|
||||
services.nginx.virtualHosts."search.${tools.meta.domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy links.searxng.url) {
|
||||
extraConfig = "access_log off;";
|
||||
};
|
||||
systemd.services.uwsgi.after = [ "wireguard-wgmv-es7.service" "network-addresses-wgmv-es7.service" ];
|
||||
|
|
|
@ -3,6 +3,7 @@ let
|
|||
host = tools.identity.autoDomain "sips";
|
||||
|
||||
inherit (inputs.self.packages.${pkgs.system}) sips;
|
||||
inherit (config) links;
|
||||
|
||||
connStringNet = "host=127.0.0.1 sslmode=disable dbname=sips user=sips";
|
||||
connString = "host=/var/run/postgresql dbname=sips user=sips";
|
||||
|
@ -23,14 +24,18 @@ in
|
|||
mode = "0400";
|
||||
};
|
||||
|
||||
reservePortsFor = [ "sips" "sipsInternal" "sipsIpfsApiProxy" ];
|
||||
links = {
|
||||
sips.protocol = "http";
|
||||
sipsInternal.protocol = "http";
|
||||
sipsIpfsApiProxy.protocol = "http";
|
||||
};
|
||||
|
||||
systemd.services.sips = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "postgresql.service" ];
|
||||
requires = [ "sips-ipfs-api-proxy.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${sips}/bin/sips --dbdriver postgres --db \"${connString}\" --addr 127.0.0.1:${config.portsStr.sipsInternal} --api http://127.0.0.1:${config.portsStr.sipsIpfsApiProxy} --apitimeout 604800s";
|
||||
ExecStart = "${sips}/bin/sips --dbdriver postgres --db \"${connString}\" --addr ${links.sipsInternal.tuple} --api ${links.sipsIpfsApiProxy.url} --apitimeout 604800s";
|
||||
PrivateNetwork = true;
|
||||
DynamicUser = true;
|
||||
};
|
||||
|
@ -41,7 +46,7 @@ in
|
|||
after = [ "network.target" "sips.service" ];
|
||||
bindsTo = [ "sips.service" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${pkgs.socat}/bin/socat tcp4-listen:${config.portsStr.sipsIpfsApiProxy},fork,reuseaddr,bind=127.0.0.1 unix-connect:/run/ipfs/ipfs-api.sock";
|
||||
ExecStart = "${pkgs.socat}/bin/socat tcp4-listen:${links.sipsIpfsApiProxy.portStr},fork,reuseaddr,bind=${links.sipsIpfsApiProxy.ipv4} unix-connect:/run/ipfs/ipfs-api.sock";
|
||||
PrivateNetwork = true;
|
||||
DynamicUser = true;
|
||||
SupplementaryGroups = "ipfs";
|
||||
|
@ -54,7 +59,7 @@ in
|
|||
bindsTo = [ "sips.service" ];
|
||||
requires = [ "sips-proxy.socket" ];
|
||||
serviceConfig = {
|
||||
ExecStart = "${config.systemd.package}/lib/systemd/systemd-socket-proxyd 127.0.0.1:${config.portsStr.sipsInternal}";
|
||||
ExecStart = "${config.systemd.package}/lib/systemd/systemd-socket-proxyd ${links.sipsInternal.tuple}";
|
||||
PrivateNetwork = true;
|
||||
DynamicUser = true;
|
||||
SupplementaryGroups = "ipfs";
|
||||
|
@ -66,11 +71,11 @@ in
|
|||
wantedBy = [ "sockets.target" ];
|
||||
after = [ "network.target" ];
|
||||
socketConfig = {
|
||||
ListenStream = "127.0.0.1:${config.portsStr.sips}";
|
||||
ListenStream = "${links.sips.tuple}";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ sipsctl ];
|
||||
|
||||
services.nginx.virtualHosts.${host} = tools.nginx.vhosts.proxy "http://127.0.0.1:${config.portsStr.sips}";
|
||||
services.nginx.virtualHosts.${host} = tools.nginx.vhosts.proxy links.sips.url;
|
||||
}
|
||||
|
|
|
@ -3,10 +3,11 @@ with tools.nginx;
|
|||
let
|
||||
login = "login.${tools.meta.domain}";
|
||||
cfg = config.services.keycloak;
|
||||
kc = config.links.keycloak;
|
||||
in
|
||||
{
|
||||
tested.requiredChecks = [ "keycloak" ];
|
||||
reservePortsFor = [ "keycloak" ];
|
||||
links.keycloak.protocol = "http";
|
||||
|
||||
imports = [
|
||||
./identity-management.nix
|
||||
|
@ -18,7 +19,7 @@ in
|
|||
mode = "0400";
|
||||
};
|
||||
services.nginx.virtualHosts = {
|
||||
"${login}" = lib.recursiveUpdate (vhosts.proxy "http://${cfg.settings.http-host}:${config.portsStr.keycloak}") {
|
||||
"${login}" = lib.recursiveUpdate (vhosts.proxy kc.url) {
|
||||
locations."= /".return = "302 /auth/realms/master/account/";
|
||||
};
|
||||
"account.${domain}" = vhosts.redirect "https://${login}/auth/realms/master/account/";
|
||||
|
@ -31,8 +32,8 @@ in
|
|||
passwordFile = config.age.secrets.keycloak-dbpass.path;
|
||||
};
|
||||
settings = {
|
||||
http-host = "127.0.0.1";
|
||||
http-port = config.ports.keycloak;
|
||||
http-host = kc.ipv4;
|
||||
http-port = kc.port;
|
||||
hostname = login;
|
||||
proxy = "edge";
|
||||
# for backcompat, TODO: remove
|
||||
|
|
|
@ -5,13 +5,13 @@ let
|
|||
|
||||
flakePkgs = inputs.self.packages.${pkgs.system};
|
||||
|
||||
port = config.portsStr.uptime-kuma;
|
||||
link = config.links.uptime-kuma;
|
||||
|
||||
dataDir = "/srv/storage/private/uptime-kuma";
|
||||
in
|
||||
|
||||
{
|
||||
reservePortsFor = [ "uptime-kuma" ];
|
||||
links.uptime-kuma.protocol = "http";
|
||||
|
||||
users.users.uptime-kuma = {
|
||||
isSystemUser = true;
|
||||
|
@ -62,8 +62,8 @@ in
|
|||
environment = {
|
||||
NODE_ENV = "production";
|
||||
DATA_DIR = dataDir;
|
||||
UPTIME_KUMA_HOST = "127.0.0.1";
|
||||
UPTIME_KUMA_PORT = port;
|
||||
UPTIME_KUMA_HOST = link.ipv4;
|
||||
UPTIME_KUMA_PORT = link.portStr;
|
||||
UPTIME_KUMA_HIDE_LOG = lib.concatStringsSep "," [
|
||||
"debug_monitor"
|
||||
"info_monitor"
|
||||
|
@ -71,7 +71,7 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."status.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy "http://127.0.0.1:${port}") {
|
||||
services.nginx.virtualHosts."status.${domain}" = lib.recursiveUpdate (tools.nginx.vhosts.proxy link.url) {
|
||||
locations = {
|
||||
"/".proxyWebsockets = true;
|
||||
"=/".return = "302 /status/${builtins.replaceStrings ["."] ["-"] domain}";
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
{ config, inputs, pkgs, tools, ... }:
|
||||
|
||||
let
|
||||
port = config.portsStr.nixIpfs;
|
||||
in {
|
||||
reservePortsFor = [ "nixIpfs" ];
|
||||
{
|
||||
links.nixIpfs.protocol = "http";
|
||||
|
||||
systemd.services.nix-ipfs-cache = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
@ -14,7 +12,7 @@ in {
|
|||
CacheDirectory = "nix-ipfs-cache";
|
||||
};
|
||||
environment = {
|
||||
REFLEX_PORT = port;
|
||||
REFLEX_PORT = config.links.nixIpfs.portStr;
|
||||
IPFS_API = config.services.ipfs.apiAddress;
|
||||
NIX_CACHES = toString [
|
||||
"https://cache.nixos.org"
|
||||
|
@ -24,5 +22,5 @@ in {
|
|||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."reflex.${tools.meta.domain}" = tools.nginx.vhosts.proxy "http://127.0.0.1:${port}";
|
||||
services.nginx.virtualHosts."reflex.${tools.meta.domain}" = tools.nginx.vhosts.proxy config.links.nixIpfs.url;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ let
|
|||
cfg = config.services.ipfs;
|
||||
apiAddress = "/unix/run/ipfs/ipfs-api.sock";
|
||||
ipfsApi = pkgs.writeTextDir "api" apiAddress;
|
||||
gwPort = config.portsStr.ipfsGateway;
|
||||
gw = config.links.ipfsGateway;
|
||||
in
|
||||
{
|
||||
age.secrets.ipfs-swarm-key = {
|
||||
|
@ -14,7 +14,7 @@ in
|
|||
inherit (cfg) group;
|
||||
};
|
||||
|
||||
reservePortsFor = [ "ipfsGateway" ];
|
||||
links.ipfsGateway.protocol = "http";
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 4001 ];
|
||||
|
@ -28,7 +28,7 @@ in
|
|||
autoMigrate = false;
|
||||
|
||||
inherit apiAddress;
|
||||
gatewayAddress = "/ip4/127.0.0.1/tcp/${gwPort}";
|
||||
gatewayAddress = "/ip4/${gw.ipv4}/tcp/${gw.portStr}";
|
||||
dataDir = "/srv/storage/ipfs/repo";
|
||||
localDiscovery = false;
|
||||
|
||||
|
|
|
@ -4,16 +4,17 @@ let
|
|||
|
||||
writeJSON = filename: data: pkgs.writeText filename (builtins.toJSON data);
|
||||
|
||||
inherit (config) ports portsStr;
|
||||
|
||||
relabel = from: to: {
|
||||
source_labels = [ from ];
|
||||
target_label = to;
|
||||
};
|
||||
in
|
||||
{
|
||||
# same as remote loki port
|
||||
reservePortsFor = [ "loki" ];
|
||||
# remote loki
|
||||
links.loki = {
|
||||
protocol = "http";
|
||||
ipv4 = hosts.VEGAS.hypr.addr;
|
||||
};
|
||||
|
||||
services.journald.extraConfig = "Storage=volatile";
|
||||
|
||||
|
@ -41,7 +42,7 @@ in
|
|||
server.disable = true;
|
||||
positions.filename = "\${STATE_DIRECTORY:/tmp}/promtail-positions.yaml";
|
||||
clients = [
|
||||
{ url = "http://${hosts.VEGAS.hypr.addr}:${portsStr.loki}/loki/api/v1/push"; }
|
||||
{ url = "${config.links.loki.url}/loki/api/v1/push"; }
|
||||
];
|
||||
scrape_configs = [
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue