diff --git a/cluster/services/consul/agent.nix b/cluster/services/consul/agent.nix index b0b65bd..e45ea47 100644 --- a/cluster/services/consul/agent.nix +++ b/cluster/services/consul/agent.nix @@ -13,6 +13,7 @@ in services.consul = { enable = true; webUi = true; + package = depot.packages.consul; extraConfig = { datacenter = "eu-central"; domain = "sd-magic.${domain}."; diff --git a/cluster/services/forge/server.nix b/cluster/services/forge/server.nix index f093bfb..2de93b3 100644 --- a/cluster/services/forge/server.nix +++ b/cluster/services/forge/server.nix @@ -11,32 +11,34 @@ let link = config.links.forge; - exe = lib.getExe config.services.gitea.package; + exe = lib.getExe config.services.forgejo.package; in { system.ascensions.forgejo = { - requiredBy = [ "gitea.service" ]; - incantations = i: [ ]; + requiredBy = [ "forgejo.service" ]; + before = [ "forgejo.service" ]; + incantations = i: [ + (i.execShell "chown -R forgejo:forgejo /srv/storage/private/forge") + ]; }; age.secrets = { forgejoOidcSecret = { file = ./credentials/forgejo-oidc-secret.age; - owner = "gitea"; + owner = "forgejo"; }; forgejoDbCredentials = { file = ./credentials/forgejo-db-credentials.age; - owner = "gitea"; + owner = "forgejo"; }; }; links.forge.protocol = "http"; - services.gitea = { + services.forgejo = { enable = true; package = depot.packages.forgejo; - appName = "The Forge"; stateDir = "/srv/storage/private/forge"; database = { createDatabase = false; @@ -48,6 +50,9 @@ in passwordFile = secrets.forgejoDbCredentials.path; }; settings = { + DEFAULT = { + APP_NAME = "The Forge"; + }; server = { DOMAIN = host; ROOT_URL = "https://${host}/"; @@ -75,7 +80,7 @@ in services.nginx.virtualHosts."${host}" = vhosts.proxy link.url; - systemd.services.gitea.preStart = let + systemd.services.forgejo.preStart = let providerName = "PrivateVoidAccount"; args = lib.escapeShellArgs [ "--name" providerName diff --git a/cluster/services/ipfs/cluster.nix b/cluster/services/ipfs/cluster.nix index df7b9bb..2e03c27 100644 --- a/cluster/services/ipfs/cluster.nix +++ b/cluster/services/ipfs/cluster.nix @@ -29,7 +29,6 @@ in { services.ipfs-cluster = { enable = true; - package = depot.packages.ipfs-cluster; consensus = "crdt"; dataDir = "/srv/storage/ipfs/cluster"; secretFile = config.age.secrets.ipfs-cluster-secret.path; diff --git a/cluster/services/ipfs/remote-api.nix b/cluster/services/ipfs/remote-api.nix index 378392c..75e0ede 100644 --- a/cluster/services/ipfs/remote-api.nix +++ b/cluster/services/ipfs/remote-api.nix @@ -16,5 +16,5 @@ in }; }; - services.oauth2_proxy.nginx.virtualHosts = [ "ipfs.admin.${domain}" ]; + services.oauth2-proxy.nginx.virtualHosts."ipfs.admin.${domain}" = { }; } diff --git a/cluster/services/nextcloud/host.nix b/cluster/services/nextcloud/host.nix index dd53b76..f92d1f9 100644 --- a/cluster/services/nextcloud/host.nix +++ b/cluster/services/nextcloud/host.nix @@ -41,11 +41,13 @@ in dbuser = "storage"; dbpassFile = config.age.secrets.nextcloud-dbpass.path; - overwriteProtocol = "https"; - adminuser = "sa"; adminpassFile = config.age.secrets.nextcloud-adminpass.path; }; + + settings = { + overwriteprotocol = "https"; + }; }; services.nginx.virtualHosts."${config.services.nextcloud.hostName}" = { addSSL = true; diff --git a/cluster/services/nginx/nginx.nix b/cluster/services/nginx/nginx.nix index 087f20f..dc0fccc 100644 --- a/cluster/services/nginx/nginx.nix +++ b/cluster/services/nginx/nginx.nix @@ -25,5 +25,8 @@ in { ''; }; networking.firewall.allowedTCPPorts = [ 80 443 ]; - systemd.services.nginx.after = [ "network-online.target" ]; + systemd.services.nginx = { + after = [ "network-online.target" ]; + wants = [ "network-online.target" ]; + }; } diff --git a/cluster/services/storage/external.nix b/cluster/services/storage/external.nix index 2f26961..971d9a6 100644 --- a/cluster/services/storage/external.nix +++ b/cluster/services/storage/external.nix @@ -9,7 +9,7 @@ in fileSystems.external = { mountpoint = "/srv/storage"; authFile = ./secrets/external-storage-auth-${hostName}.age; - backend = "s3c://${cluster.config.links.garageS3.hostname}/storage-${hostName}"; + backend = "s3c4://${cluster.config.links.garageS3.hostname}/storage-${hostName}"; backendOptions = [ "disable-expect100" ]; }; }; diff --git a/cluster/services/storage/garage.nix b/cluster/services/storage/garage.nix index a8f1cbf..8997613 100644 --- a/cluster/services/storage/garage.nix +++ b/cluster/services/storage/garage.nix @@ -30,7 +30,7 @@ in enable = true; package = depot.packages.garage; settings = { - replication_mode = 3; + replication_mode = "3"; block_size = 16 * 1024 * 1024; db_engine = "lmdb"; metadata_dir = "/var/lib/garage-metadata"; diff --git a/flake.lock b/flake.lock index c0752c7..738e42c 100644 --- a/flake.lock +++ b/flake.lock @@ -40,11 +40,11 @@ ] }, "locked": { - "lastModified": 1711742460, - "narHash": "sha256-0O4v6e4a1toxXZ2gf5INhg4WPE5C5T+SVvsBt+45Mcc=", + "lastModified": 1717279440, + "narHash": "sha256-kH04ReTjxOpQumgWnqy40vvQLSnLGxWP6RF3nq5Esrk=", "owner": "zhaofengli", "repo": "attic", - "rev": "4dbdbee45728d8ce5788db6461aaaa89d98081f0", + "rev": "717cc95983cdc357bc347d70be20ced21f935843", "type": "github" }, "original": { @@ -76,11 +76,11 @@ ] }, "locked": { - "lastModified": 1702918879, - "narHash": "sha256-tWJqzajIvYcaRWxn+cLUB9L9Pv4dQ3Bfit/YjU5ze3g=", + "lastModified": 1717025063, + "narHash": "sha256-dIubLa56W9sNNz0e8jGxrX3CAkPXsq7snuFA/Ie6dn8=", "owner": "ipetkov", "repo": "crane", - "rev": "7195c00c272fdd92fc74e7d5a0a2844b9fadb2fb", + "rev": "480dff0be03dac0e51a8dfc26e882b0d123a450e", "type": "github" }, "original": { @@ -121,11 +121,11 @@ ] }, "locked": { - "lastModified": 1713532798, - "narHash": "sha256-wtBhsdMJA3Wa32Wtm1eeo84GejtI43pMrFrmwLXrsEc=", + "lastModified": 1717408969, + "narHash": "sha256-Q0OEFqe35fZbbRPPRdrjTUUChKVhhWXz3T9ZSKmaoVY=", "owner": "numtide", "repo": "devshell", - "rev": "12e914740a25ea1891ec619bb53cf5e6ca922e40", + "rev": "1ebbe68d57457c8cae98145410b164b5477761f4", "type": "github" }, "original": { @@ -197,11 +197,11 @@ ] }, "locked": { - "lastModified": 1715865404, - "narHash": "sha256-/GJvTdTpuDjNn84j82cU6bXztE0MSkdnTWClUCRub78=", + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "8dc45382d5206bd292f9c2768b8058a8fd8311d9", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", "type": "github" }, "original": { @@ -404,16 +404,15 @@ ] }, "locked": { - "lastModified": 1626443268, - "narHash": "sha256-LAsxgaWKTxOVZVpNrUG9ZrHMnzNMKKxKciVitxdgylE=", + "lastModified": 1716758395, + "narHash": "sha256-yM/ICgmMxUAk/feKojy/Jul8jh4OaVBhQoIChA6Vvq8=", "owner": "numtide", "repo": "nar-serve", - "rev": "84a77d8ab3ddec9d8090d2f0bc6718484e2d94ea", + "rev": "a1458804bb1ab9f1a44101e56a010ca95b8e8309", "type": "github" }, "original": { "owner": "numtide", - "ref": "v0.5.0", "repo": "nar-serve", "type": "github" } @@ -438,9 +437,7 @@ "flake-compat": "flake-compat_2", "flake-parts": "flake-parts_2", "libgit2": "libgit2", - "nixpkgs": [ - "nixpkgs" - ], + "nixpkgs": "nixpkgs_3", "nixpkgs-regression": [ "blank" ], @@ -496,16 +493,32 @@ }, "nixpkgs_3": { "locked": { - "lastModified": 1717072704, - "narHash": "sha256-CDrqjliWZePpUb++X27U1IP0oYoGB4NdCpUezEk9FzM=", + "lastModified": 1709083642, + "narHash": "sha256-7kkJQd4rZ+vFrzWu8sTRtta5D1kBG0LSRYAfhtmMlSo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "4da08daf9eafaafe9a23a1154f87e51c15f99806", + "rev": "b550fe4b4776908ac2a861124307045f8e717c8e", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.11-small", + "ref": "release-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_4": { + "locked": { + "lastModified": 1717653235, + "narHash": "sha256-wODpVx0FtLHnyKIOnm4V7fE9P8Pg12u/8ytY++VYMK0=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "844ccd07fb2aa17250952aee34a6fefd914b4638", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable-small", "repo": "nixpkgs", "type": "github" } @@ -577,7 +590,7 @@ "nar-serve": "nar-serve", "nix-filter": "nix-filter", "nix-super": "nix-super", - "nixpkgs": "nixpkgs_3", + "nixpkgs": "nixpkgs_4", "repin-flake-utils": "repin-flake-utils", "systems": "systems_2" } diff --git a/flake.nix b/flake.nix index c20e137..581b5ae 100644 --- a/flake.nix +++ b/flake.nix @@ -26,12 +26,11 @@ inputs = { systems.url = "github:privatevoid-net/nix-systems-default-linux"; - nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11-small"; + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable-small"; nix-super = { url = "gitlab:max/nix-super?host=git.privatevoid.net"; inputs = { - nixpkgs.follows = "nixpkgs"; nixpkgs-regression.follows = "blank"; }; }; @@ -59,7 +58,7 @@ }; nar-serve = { - url = "github:numtide/nar-serve/v0.5.0"; + url = "github:numtide/nar-serve"; inputs = { nixpkgs.follows = "nixpkgs"; flake-utils.follows = "repin-flake-utils"; diff --git a/hosts/VEGAS/modules/oauth2-proxy/default.nix b/hosts/VEGAS/modules/oauth2-proxy/default.nix index 5db4f39..faa1e8f 100644 --- a/hosts/VEGAS/modules/oauth2-proxy/default.nix +++ b/hosts/VEGAS/modules/oauth2-proxy/default.nix @@ -1,8 +1,7 @@ -{ config, lib, depot, ... }: +{ config, depot, ... }: let inherit (depot.lib.meta) domain; login = x: "https://login.${domain}/auth/realms/master/protocol/openid-connect/${x}"; - cfg = config.services.oauth2_proxy; in { age.secrets.oauth2_proxy-secrets = { @@ -12,11 +11,9 @@ in mode = "0400"; }; - users.users.oauth2_proxy.group = "oauth2_proxy"; - users.groups.oauth2_proxy = {}; - - services.oauth2_proxy = { + services.oauth2-proxy = { enable = true; + nginx.domain = config.services.keycloak.settings.hostname; approvalPrompt = "auto"; provider = "keycloak"; scope = "openid"; @@ -35,24 +32,4 @@ in skip-provider-button = true; }; }; - services.nginx.virtualHosts = lib.genAttrs cfg.nginx.virtualHosts (_vhost: { - # apply protection to the whole vhost, not just / - extraConfig = '' - auth_request /oauth2/auth; - error_page 401 = /oauth2/sign_in; - - # pass information via X-User and X-Email headers to backend, - # requires running with --set-xauthrequest flag - auth_request_set $user $upstream_http_x_auth_request_user; - auth_request_set $email $upstream_http_x_auth_request_email; - proxy_set_header X-User $user; - proxy_set_header X-Email $email; - - # if you enabled --cookie-refresh, this is needed for it to work with auth_request - auth_request_set $auth_cookie $upstream_http_set_cookie; - add_header Set-Cookie $auth_cookie; - ''; - locations."/oauth2/".extraConfig = "auth_request off;"; - locations."/oauth2/auth".extraConfig = "auth_request off;"; - }); } diff --git a/hosts/VEGAS/services/api/default.nix b/hosts/VEGAS/services/api/default.nix index ca7d25c..897acb7 100644 --- a/hosts/VEGAS/services/api/default.nix +++ b/hosts/VEGAS/services/api/default.nix @@ -36,6 +36,5 @@ in }; }; - - services.oauth2_proxy.nginx.virtualHosts = [ apiAddr ]; + services.oauth2-proxy.nginx.virtualHosts.${apiAddr} = { }; } diff --git a/hosts/VEGAS/services/mail/imap.nix b/hosts/VEGAS/services/mail/imap.nix index fc247bc..2beb14e 100644 --- a/hosts/VEGAS/services/mail/imap.nix +++ b/hosts/VEGAS/services/mail/imap.nix @@ -48,7 +48,7 @@ in { modules = [ pkgs.dovecot_pigeonhole ]; - sieveScripts.after = ./sieve; + sieve.scripts.after = ./sieve; extraConfig = with config.services.dovecot2; '' auth_username_format = %n diff --git a/hosts/VEGAS/services/mail/postfix.nix b/hosts/VEGAS/services/mail/postfix.nix index 43f0ca6..077fe94 100644 --- a/hosts/VEGAS/services/mail/postfix.nix +++ b/hosts/VEGAS/services/mail/postfix.nix @@ -90,8 +90,14 @@ in }; }; - systemd.services.postfix.after = [ "network-online.target" "network-addresses-${interfaces.primary.link}.service" "network-addresses-vstub.service" ]; - systemd.services.postfix-setup.after = [ "network-online.target" "network-addresses-${interfaces.primary.link}.service" "network-addresses-vstub.service" ]; + systemd.services.postfix = { + after = [ "network-online.target" "network-addresses-${interfaces.primary.link}.service" "network-addresses-vstub.service" ]; + wants = [ "network-online.target" ]; + }; + systemd.services.postfix-setup = { + after = [ "network-online.target" "network-addresses-${interfaces.primary.link}.service" "network-addresses-vstub.service" ]; + wants = [ "network-online.target" ]; + }; services.fail2ban.jails.postfix.settings = { mode = "extra"; diff --git a/hosts/deploy.nix b/hosts/deploy.nix index 1442fd9..830c75b 100644 --- a/hosts/deploy.nix +++ b/hosts/deploy.nix @@ -23,18 +23,18 @@ in async = true; deploy = { agents = callUpon hour; - rollbackScript = genAttrs systems (flip withSystem ({ pkgs, ... }: + rollbackScript = genAttrs systems (flip withSystem ({ config, pkgs, ... }: let scheduleReboot = pkgs.writeShellScript "schedule-reboot.sh" '' - export PATH="${pkgs.consul}/bin:${pkgs.systemd}/bin:${pkgs.coreutils}/bin" + export PATH="${config.packages.consul}/bin:${pkgs.systemd}/bin:${pkgs.coreutils}/bin" currentTime=$(date +%s) lastScheduledTime=$(consul kv get system/coordinated-reboot/last) if [[ $? -ne 0 ]]; then - lastScheduledTime=$((currentTime - 300)) + lastScheduledTime=$((currentTime - 900)) fi nextScheduledTime=$((lastScheduledTime + 3600)) - if [[ $nextScheduledTime -lt $((currentTime + 300)) ]]; then - nextScheduledTime=$((currentTime + 300)) + if [[ $nextScheduledTime -lt $((currentTime + 900)) ]]; then + nextScheduledTime=$((currentTime + 900)) fi consul kv put system/coordinated-reboot/last $nextScheduledTime echo "Scheduling reboot for $nextScheduledTime" @@ -46,7 +46,7 @@ in ScheduleShutdown st reboot ''${nextScheduledTime}000000 ''; in pkgs.writeShellScript "post-effect.sh" '' - export PATH="${pkgs.consul}/bin:${pkgs.coreutils}/bin" + export PATH="${config.packages.consul}/bin:${pkgs.coreutils}/bin" if [[ "$(realpath /run/booted-system/kernel)" != "$(realpath /nix/var/nix/profiles/system/kernel)" ]]; then echo "Scheduling reboot for kernel upgrade" if ! consul members >/dev/null; then diff --git a/modules/system-recovery/default.nix b/modules/system-recovery/default.nix index ece57ea..1f9185a 100644 --- a/modules/system-recovery/default.nix +++ b/modules/system-recovery/default.nix @@ -2,32 +2,25 @@ users.users = { sa = { isNormalUser = true; - initialHashedPassword = "$6$/WpFHuBXPJHZx$nq0YnOvSTSqu2B3OkPITSPCKUPVfPK04wbPpK/Ntla2MRWJb5eRzKxIK.ASBq0lKay7xpZW0PnQ58qnDTBkf8/"; hashedPassword = "$6$/WpFHuBXPJHZx$nq0YnOvSTSqu2B3OkPITSPCKUPVfPK04wbPpK/Ntla2MRWJb5eRzKxIK.ASBq0lKay7xpZW0PnQ58qnDTBkf8/"; extraGroups = [ "wheel" ]; openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMmdWfmAs/0rno8zJlhBFMY2SumnHbTNdZUXJqxgd9ON max@jericho" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL5C7mC5S2gM0K6x0L/jNwAeQYbFSzs16Q73lONUlIkL max@TITAN" + "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBDHyIQ7AWXUKlmNCFDCsl9u/k0cTd9PCXLdx3/oQJ9oLMfwor2HCP6f+Pi5JuEx7D5Guzn1pj7hq8eQh0cpB418AAAAEc3NoOg== max@jericho" + "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBEV+hYUnt5DnPGuZUsFXi8+YHYPsTxR/Rm96AA9ny8TxauBrLiZfErQgkXfQc3UcVXc/6sBL8AdzMw0Fqs8ISokAAAAEc3NoOg== max@TITAN" ]; }; sa_max = { isNormalUser = true; uid = 2000; - initialHashedPassword = "$6$/WpFHuBXPJHZx$nq0YnOvSTSqu2B3OkPITSPCKUPVfPK04wbPpK/Ntla2MRWJb5eRzKxIK.ASBq0lKay7xpZW0PnQ58qnDTBkf8/"; hashedPassword = "$6$/WpFHuBXPJHZx$nq0YnOvSTSqu2B3OkPITSPCKUPVfPK04wbPpK/Ntla2MRWJb5eRzKxIK.ASBq0lKay7xpZW0PnQ58qnDTBkf8/"; group = "wheel"; openssh.authorizedKeys.keys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMmdWfmAs/0rno8zJlhBFMY2SumnHbTNdZUXJqxgd9ON max@jericho" "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL5C7mC5S2gM0K6x0L/jNwAeQYbFSzs16Q73lONUlIkL max@TITAN" - ]; - }; - sa_alex = { - isNormalUser = true; - uid = 2001; - initialHashedPassword = "$6$/WpFHuBXPJHZx$nq0YnOvSTSqu2B3OkPITSPCKUPVfPK04wbPpK/Ntla2MRWJb5eRzKxIK.ASBq0lKay7xpZW0PnQ58qnDTBkf8/"; - hashedPassword = "$6$/WpFHuBXPJHZx$nq0YnOvSTSqu2B3OkPITSPCKUPVfPK04wbPpK/Ntla2MRWJb5eRzKxIK.ASBq0lKay7xpZW0PnQ58qnDTBkf8/"; - group = "wheel"; - openssh.authorizedKeys.keys = [ + "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBDHyIQ7AWXUKlmNCFDCsl9u/k0cTd9PCXLdx3/oQJ9oLMfwor2HCP6f+Pi5JuEx7D5Guzn1pj7hq8eQh0cpB418AAAAEc3NoOg== max@jericho" + "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBEV+hYUnt5DnPGuZUsFXi8+YHYPsTxR/Rm96AA9ny8TxauBrLiZfErQgkXfQc3UcVXc/6sBL8AdzMw0Fqs8ISokAAAAEc3NoOg== max@TITAN" ]; }; }; diff --git a/packages/checks/ascensions.nix b/packages/checks/ascensions.nix index a7dc52b..b09f67e 100644 --- a/packages/checks/ascensions.nix +++ b/packages/checks/ascensions.nix @@ -1,4 +1,4 @@ -{ testers, nixosModules }: +{ testers, nixosModules, consul }: let dataDir = { @@ -18,6 +18,8 @@ testers.runNixOSTest { ./modules/consul.nix ]; + extraBaseModules.services.consul.package = consul; + nodes = let common = { config, lib, ... }: let inherit (config.networking) hostName; diff --git a/packages/checks/default.nix b/packages/checks/default.nix index 7879949..f39e12d 100644 --- a/packages/checks/default.nix +++ b/packages/checks/default.nix @@ -1,18 +1,30 @@ { config, lib, self, ... }: +let + timeMachine = { + preUnstable = config.lib.timeTravel "637f048ee36d5052e2e7938bf9039e418accde66"; + }; +in + { perSystem = { filters, pkgs, self', system, ... }: { checks = lib.mkIf (system == "x86_64-linux") { ascensions = pkgs.callPackage ./ascensions.nix { + inherit (self'.packages) consul; inherit (self) nixosModules; }; garage = pkgs.callPackage ./garage.nix { - inherit (self'.packages) garage; + inherit (self'.packages) garage consul; inherit (self) nixosModules; inherit (config) cluster; }; + ipfs-cluster-upgrade = pkgs.callPackage ./ipfs-cluster-upgrade.nix { + inherit (self) nixosModules; + previous = timeMachine.preUnstable; + }; + jellyfin-stateless = pkgs.callPackage ./jellyfin-stateless.nix { inherit (self'.packages) jellyfin; inherit (config) cluster; @@ -26,6 +38,13 @@ inherit (self) nixosModules; inherit (self'.packages) postgresql; }; + + s3ql-upgrade = pkgs.callPackage ./s3ql-upgrade.nix { + inherit (self'.packages) s3ql; + inherit (self) nixosModules; + previous = timeMachine.preUnstable; + }; + searxng = pkgs.callPackage ./searxng.nix { inherit (self'.packages) searxng; }; diff --git a/packages/checks/garage.nix b/packages/checks/garage.nix index dea41d4..1856a97 100644 --- a/packages/checks/garage.nix +++ b/packages/checks/garage.nix @@ -1,4 +1,4 @@ -{ testers, nixosModules, cluster, garage }: +{ testers, nixosModules, cluster, garage, consul }: testers.runNixOSTest { name = "garage"; @@ -7,6 +7,8 @@ testers.runNixOSTest { ./modules/consul.nix ]; + extraBaseModules.services.consul.package = consul; + nodes = let common = { config, lib, ... }: let inherit (config.networking) hostName primaryIPAddress; diff --git a/packages/checks/ipfs-cluster-upgrade.nix b/packages/checks/ipfs-cluster-upgrade.nix new file mode 100644 index 0000000..ee536ae --- /dev/null +++ b/packages/checks/ipfs-cluster-upgrade.nix @@ -0,0 +1,45 @@ +{ testers, nixosModules, lib, ipfs-cluster, previous, system }: + +testers.runNixOSTest { + name = "ipfs-cluster-upgrade"; + + extraBaseModules = { + imports = [ + nixosModules.ipfs + nixosModules.ipfs-cluster + nixosModules.systemd-extras + ]; + + services.ipfs = { + enable = true; + apiAddress = "/ip4/127.0.0.1/tcp/5001"; + }; + services.ipfs-cluster = { + enable = true; + openSwarmPort = true; + consensus = "crdt"; + package = previous.packages.${system}.ipfs-cluster; + }; + specialisation.upgrade = { + inheritParentConfig = true; + configuration = { + services.ipfs-cluster.package = lib.mkForce ipfs-cluster; + }; + }; + }; + + nodes.machine = {}; + + testScript = /*python*/ '' + machine.wait_for_unit("ipfs.service") + machine.wait_for_unit("ipfs-cluster.service") + machine.succeed("ipfs-cluster-ctl add -r -n TestPin123 /var/empty") + + machine.succeed("systemctl stop ipfs-cluster.service") + machine.succeed("/run/current-system/specialisation/upgrade/bin/switch-to-configuration test") + + machine.wait_for_unit("ipfs-cluster.service") + machine.succeed("systemctl is-active ipfs-cluster.service") + machine.succeed("ipfs-cluster-ctl pin ls | grep TestPin123") + ''; +} diff --git a/packages/checks/patroni.nix b/packages/checks/patroni.nix index bd29fd7..dd24f33 100644 --- a/packages/checks/patroni.nix +++ b/packages/checks/patroni.nix @@ -162,7 +162,7 @@ nixosTest ( print(node.succeed("patronictl list cluster1")) node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'length') == {expected_replicas + 1} ]") node.wait_until_succeeds("[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]") - node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^running$\"))) | length') == {expected_replicas} ]") + node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^streaming$\"))) | length') == {expected_replicas} ]") print(node.succeed("patronictl list cluster1")) client.wait_until_succeeds("psql -h 127.0.0.1 -U postgres --command='select 1;'") diff --git a/packages/checks/s3ql-upgrade.nix b/packages/checks/s3ql-upgrade.nix new file mode 100644 index 0000000..f7616f6 --- /dev/null +++ b/packages/checks/s3ql-upgrade.nix @@ -0,0 +1,67 @@ +{ testers, nixosModules, lib, s3ql, previous, system }: + +testers.runNixOSTest { + name = "s3ql-upgrade"; + + nodes.machine = { + imports = [ + nixosModules.ascensions + nixosModules.external-storage + nixosModules.systemd-extras + ./modules/nixos/age-dummy-secrets.nix + ]; + + _module.args.depot.packages = { inherit (previous.packages.${system}) s3ql; }; + + services.external-storage = { + fileSystems.test = { + mountpoint = "/srv/test"; + backend = "local:///mnt/backend"; + }; + }; + + environment.etc."dummy-secrets/storageAuth-test".text = '' + [local] + storage-url: local:// + ''; + + systemd.tmpfiles.settings.s3ql-storage."/mnt/backend".d.mode = "0700"; + + system.ascensions.s3ql-test = { + requiredBy = [ "remote-storage-test.service" ]; + before = [ "remote-storage-test.service" ]; + incantations = i: []; + }; + + specialisation.upgrade = { + inheritParentConfig = true; + configuration = { + _module.args.depot = lib.mkForce { packages = { inherit s3ql; }; }; + system.ascensions.s3ql-test = { + incantations = lib.mkForce (i: [ + (i.runS3qlUpgrade "test") + ]); + }; + }; + }; + }; + + testScript = /*python*/ '' + machine.wait_for_unit("remote-storage-test.service") + machine.succeed("mkdir /srv/test/hello") + machine.succeed("echo HelloWorld > /srv/test/hello/world.txt") + + with subtest("should upgrade"): + machine.succeed("systemctl stop remote-storage-test.service") + machine.succeed("/run/current-system/specialisation/upgrade/bin/switch-to-configuration test") + machine.wait_for_unit("remote-storage-test.service") + machine.succeed("systemctl is-active remote-storage-test.service") + machine.succeed("test \"$(cat /srv/test/hello/world.txt)\" == HelloWorld") + + with subtest("should survive a restart"): + machine.succeed("systemctl restart remote-storage-test.service") + machine.wait_for_unit("remote-storage-test.service") + machine.succeed("systemctl is-active remote-storage-test.service") + machine.succeed("test \"$(cat /srv/test/hello/world.txt)\" == HelloWorld") + ''; +} diff --git a/packages/networking/ipfs-cluster/.codeclimate.yml b/packages/networking/ipfs-cluster/.codeclimate.yml deleted file mode 100644 index fc7d4a1..0000000 --- a/packages/networking/ipfs-cluster/.codeclimate.yml +++ /dev/null @@ -1,38 +0,0 @@ -ratings: - paths: - - "**/*.go" - -checks: - file-lines: - config: - threshold: 500 - method-complexity: - config: - threshold: 15 - method-lines: - config: - threshold: 80 - similar-code: - enabled: false - return-statements: - config: - threshold: 10 - argument-count: - config: - threshold: 6 - -engines: - fixme: - enabled: true - config: - strings: - - FIXME - - HACK - - XXX - - BUG - golint: - enabled: true - govet: - enabled: true - gofmt: - enabled: true \ No newline at end of file diff --git a/packages/networking/ipfs-cluster/.codecov.yml b/packages/networking/ipfs-cluster/.codecov.yml deleted file mode 100644 index b0bb2c7..0000000 --- a/packages/networking/ipfs-cluster/.codecov.yml +++ /dev/null @@ -1,31 +0,0 @@ -coverage: - status: - project: - default: - # basic - target: auto - threshold: 50 - base: auto - # advanced - branches: null - if_no_uploads: error - if_not_found: success - if_ci_failed: error - only_pulls: false - flags: null - paths: null - patch: - default: - # basic - target: auto - threshold: 50 - base: auto - # advanced - branches: null - if_no_uploads: error - if_not_found: success - if_ci_failed: error - only_pulls: false - flags: null - paths: null -comment: false diff --git a/packages/networking/ipfs-cluster/.envrc b/packages/networking/ipfs-cluster/.envrc deleted file mode 100644 index 5d84e98..0000000 --- a/packages/networking/ipfs-cluster/.envrc +++ /dev/null @@ -1,2 +0,0 @@ -source ../../build-support/activate-shell -nix_direnv_watch_file project.nix diff --git a/packages/networking/ipfs-cluster/.gitignore b/packages/networking/ipfs-cluster/.gitignore deleted file mode 100644 index 70f5ca3..0000000 --- a/packages/networking/ipfs-cluster/.gitignore +++ /dev/null @@ -1,45 +0,0 @@ -tag_annotation -coverage.out -cmd/ipfs-cluster-service/ipfs-cluster-service -cmd/ipfs-cluster-ctl/ipfs-cluster-ctl -cmd/ipfs-cluster-follow/ipfs-cluster-follow -sharness/lib/sharness -sharness/test-results -sharness/trash* -vendor/ - - -raftFolderFromTest* -peerstore -shardTesting -compose - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -test/sharness/test-results -test/sharness/trash* -test/sharness/lib/sharness -test/sharness/.test_config -test/sharness/.test_ipfs - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/packages/networking/ipfs-cluster/CHANGELOG.md b/packages/networking/ipfs-cluster/CHANGELOG.md deleted file mode 100644 index 4a552f6..0000000 --- a/packages/networking/ipfs-cluster/CHANGELOG.md +++ /dev/null @@ -1,2571 +0,0 @@ -# IPFS Cluster Changelog - -### v1.0.2 - 2022-07-06 - -IPFS Cluster v1.0.2 is a maintenance release with bug fixes and another -iteration of the experimental support for the Pinning Services API that was -introduced on v1.0.0, including Bearer token authorization support for both -the REST and the Pinning Service APIs. - -**This release includes a - [security fix in the go-car library](yhttps://github.com/ipld/go-car/security/advisories/GHSA-9x4h-8wgm-8xfg)**. The - security issue allows an attacker to crash a cluster peer or cause excessive - memory usage when uploading CAR files via the REST API (`POST - /add?format=car` endpoint). - -This also the first release after moving the project from the "ipfs" to the -the "ipfs-cluster" Github organization, which means the project Go modules -have new paths (everything is redirected though). The Docker builds remain -inside the "ipfs" namespace (i.e. `docker pull ipfs/ipfs-cluster`). - -IPFS Cluster is also ready to work with go-ipfs v0.13.0+. We recommend to upgrade. - -#### List of changes - -##### Breaking changes - -##### Features - -* REST/PinSVC API: support JWT bearer token authorization | [ipfs/ipfs-cluster#1703](https://github.com/ipfs/ipfs-cluster/issues/1703) -* crdt: commit pending batched pins on shutdown | [ipfs/ipfs-cluster#1697](https://github.com/ipfs/ipfs-cluster/issues/1697) | 1719 -* Export a prometheus metric with the current disk informer value | [ipfs/ipfs-cluster#1725](https://github.com/ipfs/ipfs-cluster/issues/1725) - -##### Bug fixes - -* Fix adding large directories | [ipfs/ipfs-cluster#1691](https://github.com/ipfs/ipfs-cluster/issues/1691) | [ipfs/ipfs-cluster#1700](https://github.com/ipfs/ipfs-cluster/issues/1700) -* PinSVC API: fix compliance errors and bugs | [ipfs/ipfs-cluster#1704](https://github.com/ipfs/ipfs-cluster/issues/1704) -* Pintracker: fix missing and wrong values in PinStatus object fields for - recovered operations | [ipfs/ipfs-cluster#1705](https://github.com/ipfs/ipfs-cluster/issues/1705) -* ctl: fix "Exp" label showing the pin timestamp instead of the experiation date | [ipfs/ipfs-cluster#1666](https://github.com/ipfs/ipfs-cluster/issues/1666) | [ipfs/ipfs-cluster#1716](https://github.com/ipfs/ipfs-cluster/issues/1716) -* Pintracker: fix races causing wrong counts in metrics | [ipfs/ipfs-cluster#1717](https://github.com/ipfs/ipfs-cluster/issues/1717) | [ipfs/ipfs-cluster#1729](https://github.com/ipfs/ipfs-cluster/issues/1729) -* Update go-car to v0.4.0 (security fixes) | [ipfs/ipfs-cluster#1730](https://github.com/ipfs/ipfs-cluster/issues/1730) - -##### Other changes - -* Improve language, fix typos to changelog | [ipfs/ipfs-cluster#1667](https://github.com/ipfs/ipfs-cluster/issues/1667) -* Update comment in docker-compose | [ipfs/ipfs-cluster#1689](https://github.com/ipfs/ipfs-cluster/issues/1689) -* Migrate from ipfs/ipfs-cluster to ipfs-cluster/ipfs-cluster | [ipfs/ipfs-cluster#1694](https://github.com/ipfs/ipfs-cluster/issues/1694) -* Enable spell-checking and fix spelling errors (US locale) | [ipfs/ipfs-cluster#1695](https://github.com/ipfs/ipfs-cluster/issues/1695) -* Enable CodeQL analysis and fix security warnings | [ipfs/ipfs-cluster#1696](https://github.com/ipfs/ipfs-cluster/issues/1696) -* Dependency upgrades: libp2p-0.20.1 etc. | [ipfs/ipfs-cluster#1711](https://github.com/ipfs/ipfs-cluster/issues/1711) | [ipfs/ipfs-cluster#1712](https://github.com/ipfs/ipfs-cluster/issues/1712) | [ipfs/ipfs-cluster#1724](https://github.com/ipfs/ipfs-cluster/issues/1724) -* API: improve debug logging during tls setup | [ipfs/ipfs-cluster#1715](https://github.com/ipfs/ipfs-cluster/issues/1715) - -#### Upgrading notices - -##### Configuration changes - -There are no configuration changes for this release. - -##### REST API - -The REST API has a new `POST /token` endpoint, which returns a JSON object -with a JWT token (when correctly authenticated). - -This token can be used to authenticate using `Authorization: Bearer ` -header on subsequent requests. - -The token is tied and verified against a basic authentication user and -password, as configured in the `basic_auth_credentials` field. - -At the moment we do not support revocation, expiration and other token -options. - -##### Pinning Service API - -The Pinning Service API has a new `POST /token` endpoint, which returns a JSON object -with a JWT token (when correctly authenticated). See the REST API section above. - -##### IPFS Proxy API - -No changes to IPFS Proxy API. - -##### Go APIs - -All cluster modules have new paths: every instance of "ipfs/ipfs-cluster" should now be "ipfs-cluster/ipfs-cluster". - -##### Other - -go-ipfs v0.13.0 introduced some changes to the Block/Put API. IPFS Cluster now -uses the `cid-format` option when performing Block-Puts. We believe the change -does not affect adding blocks and that it should still work with previous -go-ipfs versions, yet we recommend upgrading to go-ipfs v0.13.1 or later. - - ---- - -### v1.0.1 - 2022-05-06 - -IPFS Cluster v1.0.1 is a maintenance release ironing out some issues and -bringing a couple of improvements around observability of cluster performance: - -* We have fixed the `ipfscluster_pins` metric and added a few new ones that - help determine how fast the cluster can pin and add blocks. -* We have added a new Informer that broadcasts current pinning-queue size, - which means we can take this information into account when making - allocations, essentially allowing peers with big pinning queues to be - relieved by peers with smaller pinning queues. - -Please read below for a list of changes and things to watch out for. - -#### List of changes - -##### Breaking changes - -Peers running IPFS Cluster v1.0.0 will not be able to read the pin's user-set -metadata fields for pins submitted by peers in later versions, since metadata -is now stored on a different protobuf field. If this is an issue, all peers in -the cluster should upgrade. - -##### Features - -* Pinqueue Informer: let pinning queue size inform allocation selection | [ipfs-cluster/ipfs-cluster#1649](https://github.com/ipfs-cluster/ipfs-cluster/issues/1649) | [ipfs-cluster/ipfs-cluster#1657](https://github.com/ipfs-cluster/ipfs-cluster/issues/1657) -* Metrics: add additional Prometheus metrics | [ipfs-cluster/ipfs-cluster#1650](https://github.com/ipfs-cluster/ipfs-cluster/issues/1650) | [ipfs-cluster/ipfs-cluster#1659](https://github.com/ipfs-cluster/ipfs-cluster/issues/1659) - -##### Bug fixes - -* Fix: state import can result in different CRDT-heads | [ipfs-cluster/ipfs-cluster#1547](https://github.com/ipfs-cluster/ipfs-cluster/issues/1547) | [ipfs-cluster/ipfs-cluster#1664](https://github.com/ipfs-cluster/ipfs-cluster/issues/1664) -* Fix: `ipfs-cluster-ctl pin ls` hangs | [ipfs-cluster/ipfs-cluster#1663](https://github.com/ipfs-cluster/ipfs-cluster/issues/1663) -* Fix: restapi client panics on retry | [ipfs-cluster/ipfs-cluster#1655](https://github.com/ipfs-cluster/ipfs-cluster/issues/1655) | [ipfs-cluster/ipfs-cluster#1662](https://github.com/ipfs-cluster/ipfs-cluster/issues/1662) -* Fix: bad behavior while adding and ipfs is down | [ipfs-cluster/ipfs-cluster#1646](https://github.com/ipfs-cluster/ipfs-cluster/issues/1646) -* Fix: `ipfscluster_pins` metric issues bad values | [ipfs-cluster/ipfs-cluster#1645](https://github.com/ipfs-cluster/ipfs-cluster/issues/1645) - -##### Other changes - -* Dependency upgrades (includes go-libp2p v0.19.1) | [ipfs-cluster/ipfs-cluster#1660](https://github.com/ipfs-cluster/ipfs-cluster/issues/1660) -* Build with go1.18 | [ipfs-cluster/ipfs-cluster#1661](https://github.com/ipfs-cluster/ipfs-cluster/issues/1661) -* Do not issue freespace metrics when freespace is 0 | [ipfs-cluster/ipfs-cluster#1656](https://github.com/ipfs-cluster/ipfs-cluster/issues/1656) -* Convert pinning/queued/error metrics go gauges | [ipfs-cluster/ipfs-cluster#1647](https://github.com/ipfs-cluster/ipfs-cluster/issues/1647) | [ipfs-cluster/ipfs-cluster#1651](https://github.com/ipfs-cluster/ipfs-cluster/issues/1651) - -#### Upgrading notices - -##### Configuration changes - -There is a new `pinqueue` configuration object inside the `informer` section on newly initialized configurations: - -``` - "informer": { - ... - "pinqueue": { - "metric_ttl": "30s", - "weight_bucket_size": 100000 - }, - ... -``` - -This enables the Pinqueue Informer, which broadcasts metrics containing the size of the pinqueue with the metric weight divided by `weight_bucket_size`. The new metric is not used for allocations by default, and it needs to be manually added to the `allocate_by` option in the allocator, usually like: - -``` -"allocator": { - "balanced": { - "allocate_by": [ - "tag:group", - "pinqueue", - "freespace" - ] - } -``` - - -##### REST API - -No changes to REST API. - -##### IPFS Proxy API - -No changes to IPFS Proxy API. - -##### Go APIs - -No relevant changes to Go APIs, other than the PinTracker interface now requiring a `PinQueueSize` method. - -##### Other - -The following metrics are now available in the Prometheus endpoint when enabled: - -``` -ipfscluster_pins_ipfs_pins gauge -ipfscluster_pins_pin_add counter -ipfscluster_pins_pin_add_errors counter -ipfscluster_blocks_put counter -ipfscluster_blocks_added_size counter -ipfscluster_blocks_added counter -ipfscluster_blocks_put_error counter -``` - -The following metrics were converted from `counter` to `gauge`: - -``` -ipfscluster_pins_pin_queued -ipfscluster_pins_pinning -ipfscluster_pins_pin_error -``` - -Peers that are reporting `freespace` as 0 and which use this metric to -allocate pins, will no longer be available for allocations (they stop -broadcasting this metric). This means setting `StorageMax` on IPFS to 0 -effectively prevents any pins from being explicitly allocated to a peer -(that is, when replication_factor != *everywhere*). - ---- - -### v1.0.0 - 2022-04-22 - -IPFS Cluster v1.0.0 is a major release that represents that this project has -reached maturity and is able to perform and scale on production environment -(50+ million pins and 20 nodes). - -This is a breaking release, v1.0.0 cluster peers are not compatible with -previous cluster peers as we have bumped the RPC protocol version (which had -remained unchanged since 0.12.0). - -This release's major change is the switch to using streaming RPC endpoints for -several RPC methods (listing pins, listing statuses, listing peers, adding -blocks), which we added support for in go-libp2p-gorpc. - -This causes major impact on two areas: - -- Memory consumption with very large pinsets: before, listing all the pins on - the HTTP API required loading all the pins in the pinset into memory, then - responding with a json-array containing the full pinset. When working at - large scale with multimillion pinsets, this caused large memory usage spikes - (whenever the full pinset was needed anywhere). Streaming RPC means - components no longer need to send requests or responses in a single large - collection (a json array), but can individually stream items end-to-end, - without having to load-all and store in memory while the request is being - handled. - -- Adding via cluster peers: before, when adding content to IPFS though a - Cluster peer, it would chunk and send every individual chunk the cluster - peers supposed to store the content, and then they would send it to IPFS - individually, which resulted in a separate `block/put` request against the - IPFS HTTP API. Files with a dozen chunks already showed that performance was - not great. With streaming RPC, we can setup a single libp2p stream from the - adding node to the destinations, and they can stream the blocks with a - single `block/put` multipart-request directly into IPFS. We recommend using - go-ipfs >= 0.12.0 for this. - -These changes affect how cluster peers talk to each other and also how API -endpoints that responded with array collections behave (they now stream json -objects). - -This release additionally includes the first version of the experimental -[IPFS Pinning Service API](https://ipfs.github.io/pinning-services-api-spec/) -for IPFS Cluster. This API runs along the existing HTTP REST API and IPFS -Proxy API and allows sending and querying pins from Cluster using standard -Pinning-service clients (works well with go-ipfs's `ipfs pin remote`). Note -that it does not support authentication nor tracking different requests for -the same CID (request ID is the CID). - -The full list of additional features and bug fixes can be found below. - -#### List of changes - -##### Features - -* restapi/adder: Add `?no-pin=true/false` option to `/add` endpoint | [ipfs-cluster/ipfs-cluster#1590](https://github.com/ipfs-cluster/ipfs-cluster/issues/1590) -* cluster: add `pin_only_on_trusted_peers` config option | [ipfs-cluster/ipfs-cluster#1585](https://github.com/ipfs-cluster/ipfs-cluster/issues/1585) | [ipfs-cluster/ipfs-cluster#1591](https://github.com/ipfs-cluster/ipfs-cluster/issues/1591) -* restapi/client: support querying status for multiple CIDs | [ipfs-cluster/ipfs-cluster#1564](https://github.com/ipfs-cluster/ipfs-cluster/issues/1564) | [ipfs-cluster/ipfs-cluster#1592](https://github.com/ipfs-cluster/ipfs-cluster/issues/1592) -* Pinning Services API | [ipfs-cluster/ipfs-cluster#1213](https://github.com/ipfs-cluster/ipfs-cluster/issues/1213) | [ipfs-cluster/ipfs-cluster#1483](https://github.com/ipfs-cluster/ipfs-cluster/issues/1483) -* restapi/adder: Return pin allocations on add output | [ipfs-cluster/ipfs-cluster#1598](https://github.com/ipfs-cluster/ipfs-cluster/issues/1598) | [ipfs-cluster/ipfs-cluster#1599](https://github.com/ipfs-cluster/ipfs-cluster/issues/1599) -* RPC Streaming | [ipfs-cluster/ipfs-cluster#1602](https://github.com/ipfs-cluster/ipfs-cluster/issues/1602) | [ipfs-cluster/ipfs-cluster#1607](https://github.com/ipfs-cluster/ipfs-cluster/issues/1607) | [ipfs-cluster/ipfs-cluster#1611](https://github.com/ipfs-cluster/ipfs-cluster/issues/1611) | [ipfs-cluster/ipfs-cluster#810](https://github.com/ipfs-cluster/ipfs-cluster/issues/810) | [ipfs-cluster/ipfs-cluster#1437](https://github.com/ipfs-cluster/ipfs-cluster/issues/1437) | [ipfs-cluster/ipfs-cluster#1616](https://github.com/ipfs-cluster/ipfs-cluster/issues/1616) | [ipfs-cluster/ipfs-cluster#1621](https://github.com/ipfs-cluster/ipfs-cluster/issues/1621) | [ipfs-cluster/ipfs-cluster#1631](https://github.com/ipfs-cluster/ipfs-cluster/issues/1631) | [ipfs-cluster/ipfs-cluster#1632](https://github.com/ipfs-cluster/ipfs-cluster/issues/1632) - -##### Bug fixes - -##### Other changes - -* pubsubmon: Remove accrual failure detection | [ipfs-cluster/ipfs-cluster#939](https://github.com/ipfs-cluster/ipfs-cluster/issues/939) | [ipfs-cluster/ipfs-cluster#1586](https://github.com/ipfs-cluster/ipfs-cluster/issues/1586) | [ipfs-cluster/ipfs-cluster#1589](https://github.com/ipfs-cluster/ipfs-cluster/issues/1589) -* crdt: log with INFO when batches are committed | [ipfs-cluster/ipfs-cluster#1596](https://github.com/ipfs-cluster/ipfs-cluster/issues/1596) -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1613](https://github.com/ipfs-cluster/ipfs-cluster/issues/1613) | [ipfs-cluster/ipfs-cluster#1617](https://github.com/ipfs-cluster/ipfs-cluster/issues/1617) | [ipfs-cluster/ipfs-cluster#1627](https://github.com/ipfs-cluster/ipfs-cluster/issues/1627) -* Bump RPC protocol version | [ipfs-cluster/ipfs-cluster#1615](https://github.com/ipfs-cluster/ipfs-cluster/issues/1615) -* Replace cid.Cid with api.Cid wrapper type | [ipfs-cluster/ipfs-cluster#1626](https://github.com/ipfs-cluster/ipfs-cluster/issues/1626) -* Provide string JSON marshaling for PinType | [ipfs-cluster/ipfs-cluster#1628](https://github.com/ipfs-cluster/ipfs-cluster/issues/1628) -* ipfs-cluster-ctl should exit with status 1 when an argument error happens | [ipfs-cluster/ipfs-cluster#1633](https://github.com/ipfs-cluster/ipfs-cluster/issues/1633) | [ipfs-cluster/ipfs-cluster#1634](https://github.com/ipfs-cluster/ipfs-cluster/issues/1634) -* Revamp and fix basic exported metrics: pins, queued, pinning, pin errors | [ipfs-cluster/ipfs-cluster#1187](https://github.com/ipfs-cluster/ipfs-cluster/issues/1187) | [ipfs-cluster/ipfs-cluster#1470](https://github.com/ipfs-cluster/ipfs-cluster/issues/1470) | [ipfs-cluster/ipfs-cluster#1637](https://github.com/ipfs-cluster/ipfs-cluster/issues/1637) - -#### Upgrading notices - -As mentioned, all peers in the cluster should upgrade and things will heavily break otherwise. - -##### Configuration changes - -There are no breaking configuration changes. Other than that: - -* A `pin_only_on_trusted_peers` boolean option that defaults to `false` has - been added to the `cluster` configuration section. When enabled, only - trusted peers will be considered when allocating pins. -* A new `pinsvcapi` section is now added to the `api` configuration section - for newly-initialized configurations. When this section is present, the - experimental Pinning Services API is launched. See the docs for the - different options. Most of the code/options are similar to the `restapi` - section as both share most of the code. - -##### REST API - -###### Streaming responses - -The following endpoint responses have changed: - -* `/allocations` returned a json array of api.Pin object and now it will stream them. -* `/pins` returned a json array of api.PinInfo objects and now it will stream them. -* `/recover` returned a json array of api.PinInfo objects and now it will stream them. - -Failures on streaming endpoints are captured in request Trailer headers (same -as `/add`), in particular with a `X-Stream-Error` trailer. Note that the -`X-Stream-Error` trailer may appear even no error happened (empty value in -this case). - -###### JSON-encoding of CIDs - -As of v1.0.0, every "cid" as returned inside any REST API object will no -longer encode as: - -``` -{ "/" : "" } -``` - -but instead just as `"cid"`. - -###### Add endpoint changes - -There are two small backwards compatible changes to the `/add` endpoint: - -* A `?no-pin` query option has been added. In this case, cluster will not pin -the content after having added it. -* The output objects returned when adding (i.e. the ones containing the CIDs - of the files) now include an `Allocations` field, with an array of peer IDs - corresponding to the peers on which the blocks were added. - -###### Pin object changes - -`Pin` objects (returned from `/allocations`, `POST /pins` etc). will not -encode the Type as a human-readable string and not as a number, as previously -happened. - -###### PinInfo object changes - -`PinInfo`/`GlobalPinInfo` objects (returned from `/pins` and `/recover` endpoitns), now -include additional fields (which before were only accessible via `/allocations`): - -- `allocations`: an array of peer IDs indicating the pin allocations. -- `origins`: the list of origins associated to this pin. -- `metadata`: an object with pin metadata. -- `created`: date when the pin was added to the cluster. -- `ipfs_peer_id`: IPFS peer ID to which the object is pinned (when known). -- `ipfs_peer_addresses`: IPFS addresses of the IPFS daemon to which the object is pinned (when known). - -##### Pinning Services API - -This API now exists. It does not support Authentication and is experimental. - -##### IPFS Proxy API - -The `/add?pin=false` call will no longer trigger a cluster pin followed by an unpin. - -The `/pin/ls?stream=true` query option is now supported. - -##### Go APIs - -There have been many changes to different interfaces (i.e. to stream out -collections over channels rather than return slices). - -We have also taken the opportunity to get rid of pointers to objects in many -places. This was a bad step, which makes cluster perform many more allocations -that it should, and as a result causes more GC pressure. In any case, it was -not a good Go development practice to use referenced types all around for -objects that are not supposed to be mutated. - -##### Other - -The following metrics are now available in the Prometheus endpoint when enabled: - -``` -ipfscluster_pins -ipfscluster_pins_pin_queued -ipfscluster_pins_pin_error -ipfscluster_pins_pinning -``` - - ---- - -### v0.14.5 - 2022-02-16 - -This is a minor IPFS Cluster release. The main feature is the upgrade of the -go-ds-crdt library which now supports resuming the processing of CRDT-DAGs -that were not fully synced. - -On first start on an updated node, the CRDT library will have to re-walk the -full CRDT-DAG. This happens in the background. - -For the full list of feature and bugfixes, see list below. - -#### List of changes - -##### Features - -* CRDT: update with RepairInterval option and more workers | [ipfs-cluster/ipfs-cluster#1561](https://github.com/ipfs-cluster/ipfs-cluster/issues/1561) | [ipfs-cluster/ipfs-cluster#1576](https://github.com/ipfs-cluster/ipfs-cluster/issues/1576) -* Add `?cids` query parameter to /pins: limit status request to several CIDs | [ipfs-cluster/ipfs-cluster#1562](https://github.com/ipfs-cluster/ipfs-cluster/issues/1562) -* Pintracker improvements | [ipfs-cluster/ipfs-cluster#1556](https://github.com/ipfs-cluster/ipfs-cluster/issues/1556) | [ipfs-cluster/ipfs-cluster#1554](https://github.com/ipfs-cluster/ipfs-cluster/issues/1554) | [ipfs-cluster/ipfs-cluster#1212](https://github.com/ipfs-cluster/ipfs-cluster/issues/1212) - * Status information shows peer ID of IPFS peer pinning the content - * Peernames correctly set for remote peers on status objects - * Pin names not set for in-flight pin status objects - -##### Bug fixes - -* Fix: logging was too noisy | [ipfs-cluster/ipfs-cluster#1581](https://github.com/ipfs-cluster/ipfs-cluster/issues/1581) | [ipfs-cluster/ipfs-cluster#1579](https://github.com/ipfs-cluster/ipfs-cluster/issues/1579) -* Remove warning message about informer metrics | [ipfs-cluster/ipfs-cluster#1543](https://github.com/ipfs-cluster/ipfs-cluster/issues/1543) -* Fix: IPFS repo/stat gets hammered on busy peers | [ipfs-cluster/ipfs-cluster#1559](https://github.com/ipfs-cluster/ipfs-cluster/issues/1559) -* Fix: faster shutdown by aborting state list on context cancellation | [ipfs-cluster/ipfs-cluster#1555](https://github.com/ipfs-cluster/ipfs-cluster/issues/1555) - -##### Other changes - -* Leave peername empty when unknown on status response | [ipfs-cluster/ipfs-cluster#1569](https://github.com/ipfs-cluster/ipfs-cluster/issues/1569) | [ipfs-cluster/ipfs-cluster#1575](https://github.com/ipfs-cluster/ipfs-cluster/issues/1575) -* Fix comment in graphs.go | [ipfs-cluster/ipfs-cluster#1570](https://github.com/ipfs-cluster/ipfs-cluster/issues/1570) | [ipfs-cluster/ipfs-cluster#1574](https://github.com/ipfs-cluster/ipfs-cluster/issues/1574) -* Make `/add?local=true` requests forcefully allocate to local peer | [ipfs-cluster/ipfs-cluster#1560](https://github.com/ipfs-cluster/ipfs-cluster/issues/1560) -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1580](https://github.com/ipfs-cluster/ipfs-cluster/issues/1580) - -#### Upgrading notices - -##### Configuration changes - -Configuration is backwards compatible with previous versions. - -The `consensus/crdt` section has a new option `repair_interval` which is set -by default to `1h` and controls how often we check if the crdt DAG needs to be -reprocessed (i.e. when it becomes marked dirty due to an error). Setting it to -`0` disables repairs. - -The `ipfs_connector/ipfshttp` section has a new option -`informer_trigger_interval` which defaults to `0` (disabled). This controls -whether clusters issue a metrics update every certain number of pins (i.e. for -fine-grain control of freespace after a pin happens). - -The `monitor/pubsubmon/failure_threshold` option no longer has any effect. - -##### REST API - -The `/pins` (StatusAll) endpoint now takes a `?cid=cid1,cid2` option which -allows to filter the resulting list to specific CIDs. - -##### Go APIs - -We added a `LatestForPeer()` method to the PeerMonitor interface which returns -the latest metric of a certain type received by a peer. - -##### Other - -Before, adding content using the `local=true` option would add the blocks to -the peer receiving the request and then allocate the pin normally (i.e. to the -peers with most free space available, which may or not be the local peer). Now, -"local add" requests will always allocate the pin to the local peer since it -already has the content. - -Before, we would send a freespace metric update every 10 pins. After: we don't -do it anymore and relay on the normal metric interval, unless -`informer_trigger_interval` is configured. - -The CRDT library will create a database of processed DAG blocks during the -first start on an upgraded node. This happens on the background and should -only happen once. Peers with very large CRDT-DAGs, may experience increased -disk usage during this time. - ---- - - -### v0.14.4 - 2022-01-11 - -This is a minor IPFS Cluster release with additional performance improvements. - -On one side, we have improved branch pruning when syncing CRDT dags. This -should improve the time it takes for a peer to sync the pinset when joining a -high-activity cluster, where branching happens often. - -On the other side, we have improved how Cluster finds and re-triggers pinning -operations for items that failed to pin previously, heavily reducing the -pressure on the IPFS daemon and speeding up the operation. - - -#### List of changes - -##### Features - -No new features. - -##### Bug fixes - -* Improved pruning on crdt-sync | [ipfs-cluster/ipfs-cluster#1541](https://github.com/ipfs-cluster/ipfs-cluster/issues/1541) -* Pintracker: avoid pin/ls for every item | [ipfs-cluster/ipfs-cluster#1538](https://github.com/ipfs-cluster/ipfs-cluster/issues/1538) -* Pintracker: set unexpectedly_unpinned status correctly | [ipfs-cluster/ipfs-cluster#1537](https://github.com/ipfs-cluster/ipfs-cluster/issues/1537) -* Tags informer: TTL should be default when not provided | [ipfs-cluster/ipfs-cluster#1519](https://github.com/ipfs-cluster/ipfs-cluster/issues/1519) - -##### Other changes - -* ipfs-cluster-service: buffered i/o on state import/export | [ipfs-cluster/ipfs-cluster#1517](https://github.com/ipfs-cluster/ipfs-cluster/issues/1517) -* Dependency upgrades, go-libp2p v0.17.0 | 1540 - -#### Upgrading notices - -##### Configuration changes - -No changes. - -##### REST API - -The `/pins/recover` (RecoverAll) endpoint now only returns items that have -been re-queued for pinning (because they were in error). Before, it returned -all items in the state (similar to the `/pins` endpoint, but at a huge perf -impact with large pinsets). - -##### Go APIs - -No changes. - -##### Other - -`ipfs-cluster-ctl recover` only returns items that have been re-queued (see -REST APIs above). - ---- - -### v0.14.3 - 2022-01-03 - -This is a minor IPFS Cluster release with some performance improvements and -bug fixes. - -First, we have improved the speed at which the pinset can be listed (around -3x). This is important for very large clusters with millions of items on the -pinset. Cluster peers regularly check on all items in the pinset (i.e. to -re-pin failed items or remove expired pins), so this means these operations -will consume less resources and complete faster. - -Second, we have added additional options to the `state import` command to -provide more flexibility when migrating content to a new cluster. For example, -allocations and replication factors for all pins can be replaced on -import. One usecase is to convert a cluster with "replicate-everywhere" pins -into one cluster with pins allocated to a particular set of peers (as a prior -step to scaling up the cluster by adding more peers). - -Among the bugs fixed, the worst was one causing errors when deserializing some -pins from their JSON representation. This happened when pins had the `Origins` -property set. - - -#### List of changes - -##### Features - -* State import: allow replication factor and allocations overwrite | [ipfs-cluster/ipfs-cluster#1508](https://github.com/ipfs-cluster/ipfs-cluster/issues/1508) - -##### Bug fixes - -* Fix state deserialization | [ipfs-cluster/ipfs-cluster#1507](https://github.com/ipfs-cluster/ipfs-cluster/issues/1507) -* Fix pintracker shutdown errors | [ipfs-cluster/ipfs-cluster#1510](https://github.com/ipfs-cluster/ipfs-cluster/issues/1510) -* API: CORS pre-flight (OPTIONS) requests should bypass authentication | [ipfs-cluster/ipfs-cluster#1512](https://github.com/ipfs-cluster/ipfs-cluster/issues/1512) | [ipfs-cluster/ipfs-cluster#1513](https://github.com/ipfs-cluster/ipfs-cluster/issues/1513) | [ipfs-cluster/ipfs-cluster#1514](https://github.com/ipfs-cluster/ipfs-cluster/issues/1514) -* Monitor: avoid sending invalid metrics | [ipfs-cluster/ipfs-cluster#1511](https://github.com/ipfs-cluster/ipfs-cluster/issues/1511) - -##### Other changes - -* Performance improvements to state list and logging for large states | [ipfs-cluster/ipfs-cluster#1510](https://github.com/ipfs-cluster/ipfs-cluster/issues/1510) - -#### Upgrading notices - -##### Configuration changes - -No changes. - -##### REST API - -No changes. - -##### Go APIs - -No changes. - -##### Other - -`ipfs-cluster-service state import` has new `rmin`, `rmax` and `allocations` -flags. See `ipfs-cluster-service state import --help` for more information. - ---- - -### v0.14.2 - 2021-12-09 - -This is a minor IPFS Cluster release focused on providing features for -production Cluster deployments with very high pin ingestion rates. - -It addresses two important questions from our users: - - * How to ensure that my pins are automatically pinned on my cluster peers - around the world in a balanced fashion. - * How to ensure that items that cannot be pinned do not delay the pinning - of items that are available. - -We address the first of the questions by introducing an improved allocator and -user-defined "tag" metrics. Each cluster peer can now be tagged, and the -allocator can be configured to pin items in a way that they are distributed -among tags. For example, a cluster peer can tagged with `region: us, -availability-zone: us-west` and so on. Assuming a cluster made of 6 peers, 2 -per region, and one per availability zone, the allocator would ensure that a -pin with replication factor = 3 lands in the 3 different regions and in the -availability zones with most available space of the two. - -The second question is addressed by enriching pin metadata. Pins will now -store the time that they were added to the cluster. The pin tracker will -additionally keep track of how many times an operation has been retried. Using -these two items, we can prioritize pinning of items that are new and have not -repeatedly failed to pin. The max age and max number of retries used to -prioritize a pin can be controlled in the configuration. - -Please see the information below for more details about how to make use and -configure these new features. - -#### List of changes - -##### Features - - * Tags informer and partition-based allocations | [ipfs-cluster/ipfs-cluster#159](https://github.com/ipfs-cluster/ipfs-cluster/issues/159) | [ipfs-cluster/ipfs-cluster#1468](https://github.com/ipfs-cluster/ipfs-cluster/issues/1468) | [ipfs-cluster/ipfs-cluster#1485](https://github.com/ipfs-cluster/ipfs-cluster/issues/1485) - * Add timestamps to pin objects | [ipfs-cluster/ipfs-cluster#1484](https://github.com/ipfs-cluster/ipfs-cluster/issues/1484) | [ipfs-cluster/ipfs-cluster#989](https://github.com/ipfs-cluster/ipfs-cluster/issues/989) - * Support priority pinning for recent pins with small number of retries | [ipfs-cluster/ipfs-cluster#1469](https://github.com/ipfs-cluster/ipfs-cluster/issues/1469) | [ipfs-cluster/ipfs-cluster#1490](https://github.com/ipfs-cluster/ipfs-cluster/issues/1490) - -##### Bug fixes - - * Fix flaky adder test | [ipfs-cluster/ipfs-cluster#1461](https://github.com/ipfs-cluster/ipfs-cluster/issues/1461) | [ipfs-cluster/ipfs-cluster#1462](https://github.com/ipfs-cluster/ipfs-cluster/issues/1462) - -##### Other changes - - * Refactor API to facilitate re-use of functionality | [ipfs-cluster/ipfs-cluster#1471](https://github.com/ipfs-cluster/ipfs-cluster/issues/1471) - * Move testing to Github Actions | [ipfs-cluster/ipfs-cluster#1486](https://github.com/ipfs-cluster/ipfs-cluster/issues/1486) - * Dependency upgrades (go-libp2p v0.16.0 etc.) | [ipfs-cluster/ipfs-cluster#1491](https://github.com/ipfs-cluster/ipfs-cluster/issues/1491) | [ipfs-cluster/ipfs-cluster#1501](https://github.com/ipfs-cluster/ipfs-cluster/issues/1501) | [ipfs-cluster/ipfs-cluster#1504](https://github.com/ipfs-cluster/ipfs-cluster/issues/1504) - * Improve `health metrics ` output in ipfs-cluster-ctl | [ipfs-cluster/ipfs-cluster#1506](https://github.com/ipfs-cluster/ipfs-cluster/issues/1506) - -#### Upgrading notices - -Despite of the new features, cluster peers should behave exactly as before -when using the previous configuration and should interact well with peers in -the previous version. However, for the new features to take full effect, all -peers should be upgraded to this release. - -##### Configuration changes - -The `pintracker/stateless` configuration sector gets 2 new options, which will take defaults when unset: - - * `priority_pin_max_age`, with a default of `24h`, and - * `priority_pin_max_retries`, with a default of `5`. - -A new informer type called "tags" now exists. By default, in has a subsection -in the `informer` configuration section with the following defaults: - -```json - "informer": { - "disk": {...} - }, - "tags": { - "metric_ttl": "30s", - "tags": { - "group": "default" - } - } - }, -``` - -This enables the use of the "tags" informer. The `tags` configuration key in -it allows to add user-defined tags to this peer. For every tag, a new metric -will be broadcasted to other peers in the cluster carrying the tag -information. By default, peers would broadcast a metric of type "tag:group" -and value "default" (`ipfs-cluster-ctl health metrics` can be used to see what -metrics a cluster peer knows about). These tags metrics can be used to setup -advanced allocation strategies using the new "balanced" allocator described -below. - -A new `allocator` top level section with a `balanced` configuration -sub-section can now be used to setup the new allocator. It has the following -default on new configurations: - -```json - "allocator": { - "balanced": { - "allocate_by": [ - "tag:group", - "freespace" - ] - } - }, -``` - -When the allocator is NOT defined (legacy configurations), the `allocate_by` -option is only set to `["freespace"]`, to keep backwards compatibility (the -tags allocator with a "group:default" tag will not be present). - -This asks the allocator to allocate pins first by the value of the "group" -tag-metric, as produced by the tag informer, and then by the value of the -"freespace" metric. Allocating solely by the "freespace" is the equivalent of -the cluster behavior on previous versions. This default assumes the default -`informer/tags` configuration section mentioned above is present. - -##### REST API - -The objects returned by the `/pins` endpoints ("GlobalPinInfo" types) now -include an additional `attempt_count` property, that counts how many times the -pin or unpin operation was retried, and a `priority_pin` boolean property, -that indicates whether the ongoing pin operation was last queued in the -priority queue or not. - -The objects returned by the `/allocations` enpdpoints ("Pin" types) now -include an additional `timestamp` property. - -The objects returned by the `/monitor/metrics/` endpoint now include a -`weight` property, which is used to sort metrics (before they were sorted by -parsing the value as decimal number). - -The REST API client will now support QUIC for libp2p requests whenever not -using private networks. - -##### Go APIs - -There are no relevant changes other than the additional fields in the objects -as mentioned by the section right above. - -##### Other - -Nothing. - ---- - - -### v0.14.1 - 2021-08-16 - -This is an IPFS Cluster maintenance release addressing some issues and -bringing a couple of tweaks. The main fix is an issue that would prevent -cluster peers with very large pinsets (in the millions of objects) from fully -starting quickly. - -This release is fully compatible with the previous release. - -#### List of changes - -##### Features - -* Improve support for pre-0.14.0 peers | [ipfs-cluster/ipfs-cluster#1409](https://github.com/ipfs-cluster/ipfs-cluster/issues/1409) | [ipfs-cluster/ipfs-cluster#1446](https://github.com/ipfs-cluster/ipfs-cluster/issues/1446) -* Improve log-level handling | [ipfs-cluster/ipfs-cluster#1439](https://github.com/ipfs-cluster/ipfs-cluster/issues/1439) -* ctl: --wait returns as soon as replication-factor-min is reached | [ipfs-cluster/ipfs-cluster#1427](https://github.com/ipfs-cluster/ipfs-cluster/issues/1427) | [ipfs-cluster/ipfs-cluster#1444](https://github.com/ipfs-cluster/ipfs-cluster/issues/1444) - -##### Bug fixes - -* Fix some data races in tests | [ipfs-cluster/ipfs-cluster#1428](https://github.com/ipfs-cluster/ipfs-cluster/issues/1428) -* Do not block peer startup while waiting for RecoverAll | [ipfs-cluster/ipfs-cluster#1436](https://github.com/ipfs-cluster/ipfs-cluster/issues/1436) | [ipfs-cluster/ipfs-cluster#1438](https://github.com/ipfs-cluster/ipfs-cluster/issues/1438) -* Use HTTP 307-redirects on restapi paths ending with "/" | [ipfs-cluster/ipfs-cluster#1415](https://github.com/ipfs-cluster/ipfs-cluster/issues/1415) | [ipfs-cluster/ipfs-cluster#1445](https://github.com/ipfs-cluster/ipfs-cluster/issues/1445) - -##### Other changes - -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1451](https://github.com/ipfs-cluster/ipfs-cluster/issues/1451) - -#### Upgrading notices - -##### Configuration changes - -No changes. Configurations are fully backwards compatible. - -##### REST API - -Paths ending with a `/` (slash) were being automatically redirected to the -path without the slash using a 301 code (permanent redirect). However, most -clients do not respect the method name when following 301-redirects, thus a -POST request to `/allocations/` would become a GET request to `/allocations`. - -We have now set these redirects to use 307 instead (temporary -redirect). Clients do keep the HTTP method when following 307 redirects. - -##### Go APIs - -The parameters object to the RestAPI client `WaitFor` function now has a -`Limit` field. This allows to return as soon as a number of peers have reached -the target status. When unset, previous behavior should be maintained. - -##### Other - -Per the `WaitFor` modification above, `ipfs-cluster-ctl` now sets the limit to -the replication-factor-min value on pin/add commands when using the `--wait` -flag. These will potentially return earlier. - ---- - -### v0.14.0 - 2021-07-09 - -This IPFS Cluster release brings a few features to improve cluster operations -at scale (pinsets over 100k items), along with some bug fixes. - -This release is not fully compatible with previous ones. Nodes on different -versions will be unable to parse metrics from each other (thus `peers ls` -will not report peers on different versions) and the StatusAll RPC method -(a.k.a `ipfs-cluster-ctl status` or `/pins` API endpoint) will not work. Hence -the minor version bump. **Please upgrade all of your cluster peers**. - -This release brings a few key improvements to the cluster state storage: -badger will automatically perform garbage collection on regular intervals, -resolving a long standing issue of badger using up to 100x the actual needed -space. Badger GC will automatically be enabled with defaults, which will -result in increased disk I/O if there is a lot to GC 15 minutes after starting -the peer. **Make sure to disable GC manually if increased disk I/O during GC -may affect your service upon upgrade**. In our tests the impact was soft -enough to consider this a safe default, though in environments with very -constrained disk I/O it will be surely noticed, at least in the first GC -cycle, since the datastore was never GC'ed before. - -Badger is the datastore we are more familiar with and the most scalable choice -(chosen by both IPFS and Filecoin). However, it may be that badger behavior -and GC-needs are not best suited or not preferred, or more downsides are -discovered in the future. For those cases, we have added the option to run -with a leveldb backend as an alternative. Level DB does not need GC and it -will auto-compact. It should also scale pretty well for most cases, though we -have not tested or compared against badger with very large pinsets. The -backend can be configured during the daemon `init`, along with the consensus -component using a new `--datastore` flag. Like the default Badger backend, the -new LevelDB backend exposes all LevelDB internal configuration options. - -Additionally, operators handling very large clusters may have noticed that -checking status of pinning,queued items (`ipfs-cluster-ctl status --filter -pinning,queued`) took very long as it listed and iterated on the full ipfs -pinset. We have added some fixes so that we save the time when filtering for -items that do not require listing the full state. - -Finally, cluster pins now have an `origins` option, which allows submitters to -provide hints for providers of the content. Cluster will instruct IPFS to -connect to the `origins` of a pin before pinning. Note that for the moment -[ipfs will keep connected to those peers permanently](https://github.com/ipfs-cluster/ipfs-cluster/issues/1376). - -Please read carefully through the notes below, as the release includes subtle -changes in configuration, defaults and behaviors which may in some cases -affect you (although probably will not). - -#### List of changes - -##### Features - -* Set disable_repinning to true by default, for new configurations | [ipfs-cluster/ipfs-cluster#1398](https://github.com/ipfs-cluster/ipfs-cluster/issues/1398) -* Efficient status queries with filters | [ipfs-cluster/ipfs-cluster#1360](https://github.com/ipfs-cluster/ipfs-cluster/issues/1360) | [ipfs-cluster/ipfs-cluster#1377](https://github.com/ipfs-cluster/ipfs-cluster/issues/1377) | [ipfs-cluster/ipfs-cluster#1399](https://github.com/ipfs-cluster/ipfs-cluster/issues/1399) -* User-provided pin "origins" | [ipfs-cluster/ipfs-cluster#1374](https://github.com/ipfs-cluster/ipfs-cluster/issues/1374) | [ipfs-cluster/ipfs-cluster#1375](https://github.com/ipfs-cluster/ipfs-cluster/issues/1375) -* Provide darwin/arm64 binaries (Apple M1). Needs testing! | [ipfs-cluster/ipfs-cluster#1369](https://github.com/ipfs-cluster/ipfs-cluster/issues/1369) -* Set the "size" field in the response when adding CARs when the archive contains a single unixfs file | [ipfs-cluster/ipfs-cluster#1362](https://github.com/ipfs-cluster/ipfs-cluster/issues/1362) | [ipfs-cluster/ipfs-cluster#1372](https://github.com/ipfs-cluster/ipfs-cluster/issues/1372) -* Support a leveldb-datastore backend | [ipfs-cluster/ipfs-cluster#1364](https://github.com/ipfs-cluster/ipfs-cluster/issues/1364) | [ipfs-cluster/ipfs-cluster#1373](https://github.com/ipfs-cluster/ipfs-cluster/issues/1373) -* Speed up pin/ls by not filtering when not needed | [ipfs-cluster/ipfs-cluster#1405](https://github.com/ipfs-cluster/ipfs-cluster/issues/1405) - -##### Bug fixes - -* Badger datastore takes too much size | [ipfs-cluster/ipfs-cluster#1320](https://github.com/ipfs-cluster/ipfs-cluster/issues/1320) | [ipfs-cluster/ipfs-cluster#1370](https://github.com/ipfs-cluster/ipfs-cluster/issues/1370) -* Fix: error-type responses from the IPFS proxy not understood by ipfs | [ipfs-cluster/ipfs-cluster#1366](https://github.com/ipfs-cluster/ipfs-cluster/issues/1366) | [ipfs-cluster/ipfs-cluster#1371](https://github.com/ipfs-cluster/ipfs-cluster/issues/1371) -* Fix: adding with cid-version=1 does not automagically set raw-leaves | [ipfs-cluster/ipfs-cluster#1358](https://github.com/ipfs-cluster/ipfs-cluster/issues/1358) | [ipfs-cluster/ipfs-cluster#1359](https://github.com/ipfs-cluster/ipfs-cluster/issues/1359) -* Tests: close datastore on test node shutdown | [ipfs-cluster/ipfs-cluster#1389](https://github.com/ipfs-cluster/ipfs-cluster/issues/1389) -* Fix ipfs-cluster-ctl not using dns name when talking to remote https endpoints | [ipfs-cluster/ipfs-cluster#1403](https://github.com/ipfs-cluster/ipfs-cluster/issues/1403) | [ipfs-cluster/ipfs-cluster#1404](https://github.com/ipfs-cluster/ipfs-cluster/issues/1404) - - -##### Other changes - -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1378](https://github.com/ipfs-cluster/ipfs-cluster/issues/1378) | [ipfs-cluster/ipfs-cluster#1395](https://github.com/ipfs-cluster/ipfs-cluster/issues/1395) -* Update compose to use the latest go-ipfs | [ipfs-cluster/ipfs-cluster#1363](https://github.com/ipfs-cluster/ipfs-cluster/issues/1363) -* Update IRC links to point to new Matrix channel | [ipfs-cluster/ipfs-cluster#1361](https://github.com/ipfs-cluster/ipfs-cluster/issues/1361) - -#### Upgrading notices - -##### Configuration changes - -Configurations are fully backwards compatible. - -The `cluster.disable_repinning` setting now defaults to true on new generated configurations. - -The `datastore.badger` section now includes settings to control (and disable) automatic GC: - -```json - "badger": { - "gc_discard_ratio": 0.2, - "gc_interval": "15m0s", - "gc_sleep": "10s", - ... - } -``` - -**When not present, these settings take their defaults**, so GC will -automatically be enabled on nodes that upgrade keeping their previous -configurations. - -GC can be disabled by setting `gc_interval` to `"0s"`. A GC cycle is made by -multiple GC rounds. Setting `gc_sleep` to `"0s"` will result in a single GC -round. - -Finally, nodes initializing with `--datastore leveldb` will obtain a -`datastore.leveldb` section (instead of a `badger` one). Configurations can -only include one datastore section, either `badger` or `leveldb`. Currently we -offer no way to convert states between the two datastore backends. - -##### REST API - -Pin options (`POST /add` and `POST /pins` endpoints) now take an `origins` -query parameter as an additional pin option. It can be set to a -comma-separated list of full peer multiaddresses to which IPFS can connect to -fetch the content. Only the first 10 multiaddresses will be taken into -account. - -The response of `POST /add?format=car` endpoint when adding a CAR file (a single -pin progress object) always had the "size" field set to 0. This is now set to -the unixfs FileSize property, when the root of added CAR correspond to a -unixfs node of type File. In any other case, it stays at 0. - -The `GET /pins` endpoint reports pin status for all pins in the pinset by -default and optionally takes a `filter` query param. Before, it would include -a full GlobalPinInfo object for a pin as long as the status of the CID in one -of the peers matched the filter, so the object could include statuses for -other cluster peers for that CID which did not match the filter. Starting on -this version, the returned statuses will be fully limited to those of the -peers matching the filter. - -On the same endpoint, a new `unexpectedly_unpinned` pin status has been -added, which can also be used as a filter. Previously, pins in this state were -reported as `pin_error`. Note the `error` filter does not match -`unexpectedly_unpinned` status as it did before, which should be queried -directly (or without any filter). - -##### Go APIs - -The PinTracker interface has been updated so that the `StatusAll` method takes -a TrackerStatus filter. The stateless pintracker implementation has been -updated accordingly. - -##### Other - -Docker containers now support `IPFS_CLUSTER_DATASTORE` to set the datastore -type during initialization (similar to `IPFS_CLUSTER_CONSENSUS`). - -Due to the deprecation of the multicodecs repository, we no longer serialize -metrics by prepending the msgpack multicodec code to the bytes and instead -encode the metrics directly. This means older peers will not know how to -deserialize metrics from newer peers, and vice-versa. While peers will keep -working (particularly follower peers will keep tracking content etc), peers -will not include other peers with different versions in their "peerset and -many operations that rely on this will not work as intended or show partial -views. - ---- - -### v0.13.3 - 2021-05-14 - -IPFS Cluster v0.13.3 brings two new features: CAR file imports and crdt-commit batching. - -The first one allows to upload CAR files directly to the Cluster using the -existing Add endpoint with a new option set: `/add?format=car`. The endpoint -remains fully backwards compatible. CAR files are a simple wrapper around a -collection of IPFS blocks making up a DAG. Thus, this enables arbitrary DAG -imports directly through the Cluster REST API, taking advantange of the rest -of its features like basic-auth access control, libp2p endpoint and multipeer -block-put when adding. - -The second feature unlocks large escalability improvements for pin ingestion -with the crdt "consensus" component. By default, each pin or unpin requests -results in an insertion to the crdt-datastore-DAG that maintains and syncs the -state between nodes, creating a new root. Batching allows to group multiple -updates in a single crdt DAG-node. This reduces the number of broadcasts, the -depth of the DAG, the breadth of the DAG and the syncing times when the -Cluster is ingesting many pins, removing most of the overhead in the -process. The batches are automatically committed when reaching a certain age or -a certain size, both configurable. - -Additionally, improvements to timeout behaviors have been introduced. - -For more details, check the list below and the latest documentation on the -[website](https://ipfscluster.io). - -#### List of changes - -##### Features - -* Support adding CAR files | [ipfs-cluster/ipfs-cluster#1343](https://github.com/ipfs-cluster/ipfs-cluster/issues/1343) -* CRDT batching support | [ipfs-cluster/ipfs-cluster#1008](https://github.com/ipfs-cluster/ipfs-cluster/issues/1008) | [ipfs-cluster/ipfs-cluster#1346](https://github.com/ipfs-cluster/ipfs-cluster/issues/1346) | [ipfs-cluster/ipfs-cluster#1356](https://github.com/ipfs-cluster/ipfs-cluster/issues/1356) - -##### Bug fixes - -* Improve timeouts and timeout faster when dialing | [ipfs-cluster/ipfs-cluster#1350](https://github.com/ipfs-cluster/ipfs-cluster/issues/1350) | [ipfs-cluster/ipfs-cluster#1351](https://github.com/ipfs-cluster/ipfs-cluster/issues/1351) - -##### Other changes - -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1357](https://github.com/ipfs-cluster/ipfs-cluster/issues/1357) - -#### Upgrading notices - -##### Configuration changes - -The `crdt` section of the configuration now has a `batching` subsection which controls batching settings: - -```json -"batching": { - "max_batch_size": 0, - "max_batch_age": "0s" -} -``` - -An additional, hidden `max_queue_size` option exists, with default to -`50000`. The meanings of the options are documented on the reference (website) -and the code. - -Batching is disabled by default. To be enabled, both `max_batch_size` and -`max_batch_age` need to be set to positive values. - -The `cluster` section of the configuration has a new `dial_peer_timeout` -option, which defaults to "3s". It controls the default dial timeout when -libp2p is attempting to open a connection to a peer. - -##### REST API - -The `/add` endpoint now understands a new query parameter `?format=`, which -can be set to `unixfs` (default), or `car` (when uploading a CAR file). CAR -files should have a single root. Additional parts in multipart uploads for CAR -files are ignored. - -##### Go APIs - -The `AddParams` object that controls API options for the Add endpoint has been -updated with the new `Format` option. - -##### Other - -Nothing. - - - ---- - -### v0.13.2 - 2021-04-06 - -IPFS Cluster v0.13.2 is a maintenance release addressing bugs and adding a -couple of small features. It is fully compatible with the previous release. - -#### List of changes - -##### Features - -* Make mDNS failures non-fatal | [ipfs-cluster/ipfs-cluster#1193](https://github.com/ipfs-cluster/ipfs-cluster/issues/1193) | [ipfs-cluster/ipfs-cluster#1310](https://github.com/ipfs-cluster/ipfs-cluster/issues/1310) -* Add `--wait` flag to `ipfs-cluster-ctl add` command | [ipfs-cluster/ipfs-cluster#1285](https://github.com/ipfs-cluster/ipfs-cluster/issues/1285) | [ipfs-cluster/ipfs-cluster#1301](https://github.com/ipfs-cluster/ipfs-cluster/issues/1301) - -##### Bug fixes - -* Stop using secio in REST API libp2p server and client | [ipfs-cluster/ipfs-cluster#1315](https://github.com/ipfs-cluster/ipfs-cluster/issues/1315) | [ipfs-cluster/ipfs-cluster#1316](https://github.com/ipfs-cluster/ipfs-cluster/issues/1316) -* CID status wrongly reported as REMOTE | [ipfs-cluster/ipfs-cluster#1319](https://github.com/ipfs-cluster/ipfs-cluster/issues/1319) | [ipfs-cluster/ipfs-cluster#1331](https://github.com/ipfs-cluster/ipfs-cluster/issues/1331) - - -##### Other changes - -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1335](https://github.com/ipfs-cluster/ipfs-cluster/issues/1335) -* Use cid.Cid as map keys in Pintracker | [ipfs-cluster/ipfs-cluster#1322](https://github.com/ipfs-cluster/ipfs-cluster/issues/1322) - -#### Upgrading notices - -##### Configuration changes - -No configuration changes in this release. - -##### REST API - -The REST API server and clients will no longer negotiate the secio -security. This transport was already the lowest priority one and should have -not been used. This however, may break 3rd party clients which only supported -secio. - - -##### Go APIs - -Nothing. - -##### Other - -Nothing. - ---- - -### v0.13.1 - 2021-01-14 - -IPFS Cluster v0.13.1 is a maintenance release with some bugfixes and updated -dependencies. It should be fully backwards compatible. - -This release deprecates `secio` (as required by libp2p), but this was already -the lowest priority security transport and `tls` would have been used by default. -The new `noise` transport becomes the preferred option. - -#### List of changes - -##### Features - -* Support for multiple architectures added to the Docker container | [ipfs-cluster/ipfs-cluster#1085](https://github.com/ipfs-cluster/ipfs-cluster/issues/1085) | [ipfs-cluster/ipfs-cluster#1196](https://github.com/ipfs-cluster/ipfs-cluster/issues/1196) -* Add `--name` and `--expire` to `ipfs-cluster-ctl pin update` | [ipfs-cluster/ipfs-cluster#1184](https://github.com/ipfs-cluster/ipfs-cluster/issues/1184) | [ipfs-cluster/ipfs-cluster#1195](https://github.com/ipfs-cluster/ipfs-cluster/issues/1195) -* Failover client integrated in `ipfs-cluster-ctl` | [ipfs-cluster/ipfs-cluster#1222](https://github.com/ipfs-cluster/ipfs-cluster/issues/1222) | [ipfs-cluster/ipfs-cluster#1250](https://github.com/ipfs-cluster/ipfs-cluster/issues/1250) -* `ipfs-cluster-ctl health alerts` lists the last expired metrics seen by the peer | [ipfs-cluster/ipfs-cluster#165](https://github.com/ipfs-cluster/ipfs-cluster/issues/165) | [ipfs-cluster/ipfs-cluster#978](https://github.com/ipfs-cluster/ipfs-cluster/issues/978) - -##### Bug fixes - -* IPFS Proxy: pin progress objects wrongly includes non empty `Hash` key | [ipfs-cluster/ipfs-cluster#1286](https://github.com/ipfs-cluster/ipfs-cluster/issues/1286) | [ipfs-cluster/ipfs-cluster#1287](https://github.com/ipfs-cluster/ipfs-cluster/issues/1287) -* CRDT: Fix pubsub peer validation check | [ipfs-cluster/ipfs-cluster#1288](https://github.com/ipfs-cluster/ipfs-cluster/issues/1288) - -##### Other changes - -* Typos | [ipfs-cluster/ipfs-cluster#1181](https://github.com/ipfs-cluster/ipfs-cluster/issues/1181) | [ipfs-cluster/ipfs-cluster#1183](https://github.com/ipfs-cluster/ipfs-cluster/issues/1183) -* Reduce default pin_timeout to 2 minutes | [ipfs-cluster/ipfs-cluster#1160](https://github.com/ipfs-cluster/ipfs-cluster/issues/1160) -* Dependency upgrades | [ipfs-cluster/ipfs-cluster#1125](https://github.com/ipfs-cluster/ipfs-cluster/issues/1125) | [ipfs-cluster/ipfs-cluster#1238](https://github.com/ipfs-cluster/ipfs-cluster/issues/1238) -* Remove `secio` security transport | [ipfs-cluster/ipfs-cluster#1214](https://github.com/ipfs-cluster/ipfs-cluster/issues/1214) | [ipfs-cluster/ipfs-cluster#1227](https://github.com/ipfs-cluster/ipfs-cluster/issues/1227) - -#### Upgrading notices - -##### Configuration changes - -The new default for `ipfs_http.pin_timeout` is `2m`. This is the time that -needs to pass for a pin operation to error and it starts counting from the -last block pinned. - -##### REST API - -A new `/health/alerts` endpoint exists to support `ipfs-cluster-ctl health alerts`. - -##### Go APIs - -The definition of `types.Alert` has changed. This type was not exposed to the -outside before. RPC endpoints affected are only used locally. - -##### Other - -Nothing. - ---- - -### v0.13.0 - 2020-05-19 - -IPFS Cluster v0.13.0 provides many improvements and bugfixes on multiple fronts. - -First, this release takes advantange of all the major features that have -landed in libp2p and IPFS lands (via ipfs-lite) during the last few months, -including the dual-DHT and faster block exchange with Bitswap. On the -downside, **QUIC support for private networks has been temporally dropped**, -which means we cannot use the transport for Cluster peers anymore. We have disabled -QUIC for the time being until private network support is re-added. - -Secondly, `go-ds-crdt` has received major improvements since the last version, -resolving some bugs and increasing performance. Because of this, **cluster -peers in CRDT mode running older versions will be unable to process updates -sent by peers running the newer versions**. This means, for example, that -followers on v0.12.1 and earlier will be unable to receive updates from -trusted peers on v0.13.0 and later. However, peers running v0.13.0 will still -understand updates sent from older peers. - -Finally, we have resolved some bugs and added a few very useful features, -which are detailed in the list below. We recommend everyone to upgrade as soon -as possible for a swifter experience with IPFS Cluster. - -#### List of changes - -##### Features - -* Support multiple listen interfaces | [ipfs-cluster/ipfs-cluster#1000](https://github.com/ipfs-cluster/ipfs-cluster/issues/1000) | [ipfs-cluster/ipfs-cluster#1010](https://github.com/ipfs-cluster/ipfs-cluster/issues/1010) | [ipfs-cluster/ipfs-cluster#1002](https://github.com/ipfs-cluster/ipfs-cluster/issues/1002) -* Show expiration information in `ipfs-cluster-ctl pin ls` | [ipfs-cluster/ipfs-cluster#998](https://github.com/ipfs-cluster/ipfs-cluster/issues/998) | [ipfs-cluster/ipfs-cluster#1024](https://github.com/ipfs-cluster/ipfs-cluster/issues/1024) | [ipfs-cluster/ipfs-cluster#1066](https://github.com/ipfs-cluster/ipfs-cluster/issues/1066) -* Show pin names in `ipfs-cluster-ctl status` (and API endpoint) | [ipfs-cluster/ipfs-cluster#1129](https://github.com/ipfs-cluster/ipfs-cluster/issues/1129) -* Allow updating expiration when doing `pin update` | [ipfs-cluster/ipfs-cluster#996](https://github.com/ipfs-cluster/ipfs-cluster/issues/996) | [ipfs-cluster/ipfs-cluster#1065](https://github.com/ipfs-cluster/ipfs-cluster/issues/1065) | [ipfs-cluster/ipfs-cluster#1013](https://github.com/ipfs-cluster/ipfs-cluster/issues/1013) -* Add "direct" pin mode. Cluster supports direct pins | [ipfs-cluster/ipfs-cluster#1009](https://github.com/ipfs-cluster/ipfs-cluster/issues/1009) | [ipfs-cluster/ipfs-cluster#1083](https://github.com/ipfs-cluster/ipfs-cluster/issues/1083) -* Better badger defaults for less memory usage | [ipfs-cluster/ipfs-cluster#1027](https://github.com/ipfs-cluster/ipfs-cluster/issues/1027) -* Print configuration (without sensitive values) when enabling debug for `ipfs-cluster-service` | [ipfs-cluster/ipfs-cluster#937](https://github.com/ipfs-cluster/ipfs-cluster/issues/937) | [ipfs-cluster/ipfs-cluster#959](https://github.com/ipfs-cluster/ipfs-cluster/issues/959) -* `ipfs-cluster-follow list` works fully offline (without needing IPFS to run) | [ipfs-cluster/ipfs-cluster#1129](https://github.com/ipfs-cluster/ipfs-cluster/issues/1129) - -##### Bug fixes - -* Fix adding when using CidV1 | [ipfs-cluster/ipfs-cluster#1016](https://github.com/ipfs-cluster/ipfs-cluster/issues/1016) | [ipfs-cluster/ipfs-cluster#1006](https://github.com/ipfs-cluster/ipfs-cluster/issues/1006) -* Fix too many requests error on `ipfs-cluster-follow list` | [ipfs-cluster/ipfs-cluster#1013](https://github.com/ipfs-cluster/ipfs-cluster/issues/1013) | [ipfs-cluster/ipfs-cluster#1129](https://github.com/ipfs-cluster/ipfs-cluster/issues/1129) -* Fix repinning not working reliably on collaborative clusters with replication factors set | [ipfs-cluster/ipfs-cluster#1064](https://github.com/ipfs-cluster/ipfs-cluster/issues/1064) | [ipfs-cluster/ipfs-cluster#1127](https://github.com/ipfs-cluster/ipfs-cluster/issues/1127) -* Fix underflow in repo size metric | [ipfs-cluster/ipfs-cluster#1120](https://github.com/ipfs-cluster/ipfs-cluster/issues/1120) | [ipfs-cluster/ipfs-cluster#1121](https://github.com/ipfs-cluster/ipfs-cluster/issues/1121) -* Fix adding keeps going if all BlockPut failed | [ipfs-cluster/ipfs-cluster#1131](https://github.com/ipfs-cluster/ipfs-cluster/issues/1131) - -##### Other changes - -* Update license files | [ipfs-cluster/ipfs-cluster#1014](https://github.com/ipfs-cluster/ipfs-cluster/issues/1014) -* Fix typos | [ipfs-cluster/ipfs-cluster#999](https://github.com/ipfs-cluster/ipfs-cluster/issues/999) | [ipfs-cluster/ipfs-cluster#1001](https://github.com/ipfs-cluster/ipfs-cluster/issues/1001) | [ipfs-cluster/ipfs-cluster#1075](https://github.com/ipfs-cluster/ipfs-cluster/issues/1075) -* Lots of dependency upgrades | [ipfs-cluster/ipfs-cluster#1020](https://github.com/ipfs-cluster/ipfs-cluster/issues/1020) | [ipfs-cluster/ipfs-cluster#1051](https://github.com/ipfs-cluster/ipfs-cluster/issues/1051) | [ipfs-cluster/ipfs-cluster#1073](https://github.com/ipfs-cluster/ipfs-cluster/issues/1073) | [ipfs-cluster/ipfs-cluster#1074](https://github.com/ipfs-cluster/ipfs-cluster/issues/1074) -* Adjust codecov thresholds | [ipfs-cluster/ipfs-cluster#1022](https://github.com/ipfs-cluster/ipfs-cluster/issues/1022) -* Fix all staticcheck warnings | [ipfs-cluster/ipfs-cluster#1071](https://github.com/ipfs-cluster/ipfs-cluster/issues/1071) | [ipfs-cluster/ipfs-cluster#1128](https://github.com/ipfs-cluster/ipfs-cluster/issues/1128) -* Detach RPC protocol version from Cluster releases | [ipfs-cluster/ipfs-cluster#1093](https://github.com/ipfs-cluster/ipfs-cluster/issues/1093) -* Trim paths on Makefile build command | [ipfs-cluster/ipfs-cluster#1012](https://github.com/ipfs-cluster/ipfs-cluster/issues/1012) | [ipfs-cluster/ipfs-cluster#1015](https://github.com/ipfs-cluster/ipfs-cluster/issues/1015) -* Add contexts to HTTP requests in the client | [ipfs-cluster/ipfs-cluster#1019](https://github.com/ipfs-cluster/ipfs-cluster/issues/1019) - - -#### Upgrading notices - -##### Configuration changes - -* The default options in the `datastore/badger/badger_options` have changed - and should reduce memory usage significantly: - * `truncate` is set to `true`. - * `value_log_loading_mode` is set to `0` (FileIO). - * `max_table_size` is set to `16777216`. -* `api/ipfsproxy/listen_multiaddress`, `api/rest/http_listen_multiaddress` and - `api/rest/libp2p_listen_multiaddress` now support an array of multiaddresses - rather than a single one (a single one still works). This allows, for - example, listening on both IPv6 and IPv4 interfaces. - -##### REST API - -The `POST /pins/{hash}` endpoint (`pin add`) now supports a `mode` query -parameter than can be set to `recursive` or `direct`. The responses including -Pin objects (`GET /allocations`, `pin ls`) include a `mode` field set -accordingly. - -The IPFS proxy `/pin/add` endpoint now supports `recursive=false` for direct pins. - -The `/pins` endpoint now return `GlobalPinInfo` objects that include a `name` -field for the pin name. The same objects do not embed redundant information -anymore for each peer in the `peer_map`: `cid` and `peer` are omitted. - -##### Go APIs - -The `ipfscluster.IPFSConnector` component signature for `PinLsCid` has changed -and receives a full `api.Pin` object, rather than a Cid. The RPC endpoint has -changed accordingly, but since this is a private endpoint, it does not affect -interoperability between peers. - -The `api.GlobalPinInfo` type now maps every peer to a new `api.PinInfoShort` -type, that does not include any redundant information (Cid, Peer), as the -`PinInfo` type did. The `Cid` is available as a top-level field. The `Peer` -corresponds to the map key. A new `Name` top-level field contains the Pin -Name. - -The `api.PinInfo` file includes also a new `Name` field. - -##### Other - -From this release, IPFS Cluster peers running in different minor versions will -remain compatible at the RPC layer (before, all cluster peers had to be -running on precisely the same minor version to be able to communicate). This -means that v0.13.0 peers are still compatible with v0.12.x peers (with the -caveat for CRDT-peers mentioned at the top). `ipfs-cluster-ctl --enc=json id` -shows information about the RPC protocol used. - -Since the QUIC libp2p transport does not support private networks at this -point, it has been disabled, even though we keep the QUIC endpoint among the -default listeners. - ---- - -### v0.12.1 - 2019-12-24 - -IPFS Cluster v0.12.1 is a maintenance release fixing issues on `ipfs-cluster-follow`. - -#### List of changes - -##### Bug fixes - -* follow: the `info` command panics when ipfs is offline | [ipfs-cluster/ipfs-cluster#991](https://github.com/ipfs-cluster/ipfs-cluster/issues/991) | [ipfs-cluster/ipfs-cluster#993](https://github.com/ipfs-cluster/ipfs-cluster/issues/993) -* follow: the gateway url is not set on Run&Init command | [ipfs-cluster/ipfs-cluster#992](https://github.com/ipfs-cluster/ipfs-cluster/issues/992) | [ipfs-cluster/ipfs-cluster#993](https://github.com/ipfs-cluster/ipfs-cluster/issues/993) -* follow: disallow trusted peers for RepoGCLocal operation | [ipfs-cluster/ipfs-cluster#993](https://github.com/ipfs-cluster/ipfs-cluster/issues/993) - ---- - -### v0.12.0 - 2019-12-20 - -IPFS Cluster v0.12.0 brings many useful features and makes it very easy to -create and participate on collaborative clusters. - -The new `ipfs-cluster-follow` command provides a very simple way of joining -one or several clusters as a follower (a peer without permissions to pin/unpin -anything). `ipfs-cluster-follow` peers are initialize using a configuration -"template" distributed over IPFS or HTTP, which is then optimized and secured. - -`ipfs-cluster-follow` is limited in scope and attempts to be very -straightforward to use. `ipfs-cluster-service` continues to offer power users -the full set of options to running peers of all kinds (followers or not). - -We have additionally added many new features: pin with an expiration date, the -ability to trigger garbage collection on IPFS daemons, improvements on -NAT-traversal and connectivity etc. - -Users planning to setup public collaborative clusters should upgrade to this -release, which improves the user experience and comes with documentation on -how to setup and join these clusters -(https://ipfscluster.io/documentation/collaborative). - - -#### List of changes - -##### Features - -* cluster: `--local` flag for add: adds only to the local peer instead of multiple destinations | [ipfs-cluster/ipfs-cluster#848](https://github.com/ipfs-cluster/ipfs-cluster/issues/848) | [ipfs-cluster/ipfs-cluster#907](https://github.com/ipfs-cluster/ipfs-cluster/issues/907) -* cluster: `RecoverAll` operation can trigger recover operation in all peers. -* ipfsproxy: log HTTP requests | [ipfs-cluster/ipfs-cluster#574](https://github.com/ipfs-cluster/ipfs-cluster/issues/574) | [ipfs-cluster/ipfs-cluster#915](https://github.com/ipfs-cluster/ipfs-cluster/issues/915) -* api: `health/metrics` returns list of available metrics | [ipfs-cluster/ipfs-cluster#374](https://github.com/ipfs-cluster/ipfs-cluster/issues/374) | [ipfs-cluster/ipfs-cluster#924](https://github.com/ipfs-cluster/ipfs-cluster/issues/924) -* service: `init --randomports` sets random, unused ports on initialization | [ipfs-cluster/ipfs-cluster#794](https://github.com/ipfs-cluster/ipfs-cluster/issues/794) | [ipfs-cluster/ipfs-cluster#926](https://github.com/ipfs-cluster/ipfs-cluster/issues/926) -* cluster: support pin expiration | [ipfs-cluster/ipfs-cluster#481](https://github.com/ipfs-cluster/ipfs-cluster/issues/481) | [ipfs-cluster/ipfs-cluster#923](https://github.com/ipfs-cluster/ipfs-cluster/issues/923) -* cluster: quic, autorelay, autonat, TLS handshake support | [ipfs-cluster/ipfs-cluster#614](https://github.com/ipfs-cluster/ipfs-cluster/issues/614) | [ipfs-cluster/ipfs-cluster#932](https://github.com/ipfs-cluster/ipfs-cluster/issues/932) | [ipfs-cluster/ipfs-cluster#973](https://github.com/ipfs-cluster/ipfs-cluster/issues/973) | [ipfs-cluster/ipfs-cluster#975](https://github.com/ipfs-cluster/ipfs-cluster/issues/975) -* cluster: `health/graph` improvements | [ipfs-cluster/ipfs-cluster#800](https://github.com/ipfs-cluster/ipfs-cluster/issues/800) | [ipfs-cluster/ipfs-cluster#925](https://github.com/ipfs-cluster/ipfs-cluster/issues/925) | [ipfs-cluster/ipfs-cluster#954](https://github.com/ipfs-cluster/ipfs-cluster/issues/954) -* cluster: `ipfs-cluster-ctl ipfs gc` triggers GC on cluster peers | [ipfs-cluster/ipfs-cluster#628](https://github.com/ipfs-cluster/ipfs-cluster/issues/628) | [ipfs-cluster/ipfs-cluster#777](https://github.com/ipfs-cluster/ipfs-cluster/issues/777) | [ipfs-cluster/ipfs-cluster#739](https://github.com/ipfs-cluster/ipfs-cluster/issues/739) | [ipfs-cluster/ipfs-cluster#945](https://github.com/ipfs-cluster/ipfs-cluster/issues/945) | [ipfs-cluster/ipfs-cluster#961](https://github.com/ipfs-cluster/ipfs-cluster/issues/961) -* cluster: advertise external addresses as soon as known | [ipfs-cluster/ipfs-cluster#949](https://github.com/ipfs-cluster/ipfs-cluster/issues/949) | [ipfs-cluster/ipfs-cluster#950](https://github.com/ipfs-cluster/ipfs-cluster/issues/950) -* cluster: skip contacting remote-allocations (peers) for recover/status operations | [ipfs-cluster/ipfs-cluster#935](https://github.com/ipfs-cluster/ipfs-cluster/issues/935) | [ipfs-cluster/ipfs-cluster#947](https://github.com/ipfs-cluster/ipfs-cluster/issues/947) -* restapi: support listening on a unix socket | [ipfs-cluster/ipfs-cluster#969](https://github.com/ipfs-cluster/ipfs-cluster/issues/969) -* config: support `peer_addresses` | [ipfs-cluster/ipfs-cluster#791](https://github.com/ipfs-cluster/ipfs-cluster/issues/791) -* pintracker: remove `mappintracker`. Upgrade `stateless` for prime-time | [ipfs-cluster/ipfs-cluster#944](https://github.com/ipfs-cluster/ipfs-cluster/issues/944) | [ipfs-cluster/ipfs-cluster#929](https://github.com/ipfs-cluster/ipfs-cluster/issues/929) -* service: `--loglevel` supports specifying levels for multiple components | [ipfs-cluster/ipfs-cluster#938](https://github.com/ipfs-cluster/ipfs-cluster/issues/938) | [ipfs-cluster/ipfs-cluster#960](https://github.com/ipfs-cluster/ipfs-cluster/issues/960) -* ipfs-cluster-follow: a new CLI tool to run follower cluster peers | [ipfs-cluster/ipfs-cluster#976](https://github.com/ipfs-cluster/ipfs-cluster/issues/976) - -##### Bug fixes - -* restapi/client: Fix out of bounds error on load balanced client | [ipfs-cluster/ipfs-cluster#951](https://github.com/ipfs-cluster/ipfs-cluster/issues/951) -* service: disable libp2p restapi on CRDT clusters | [ipfs-cluster/ipfs-cluster#968](https://github.com/ipfs-cluster/ipfs-cluster/issues/968) -* observations: Fix pprof index links | [ipfs-cluster/ipfs-cluster#965](https://github.com/ipfs-cluster/ipfs-cluster/issues/965) - -##### Other changes - -* Spelling fix in changelog | [ipfs-cluster/ipfs-cluster#920](https://github.com/ipfs-cluster/ipfs-cluster/issues/920) -* Tests: multiple fixes | [ipfs-cluster/ipfs-cluster#919](https://github.com/ipfs-cluster/ipfs-cluster/issues/919) | [ipfs-cluster/ipfs-cluster#943](https://github.com/ipfs-cluster/ipfs-cluster/issues/943) | [ipfs-cluster/ipfs-cluster#953](https://github.com/ipfs-cluster/ipfs-cluster/issues/953) | [ipfs-cluster/ipfs-cluster#956](https://github.com/ipfs-cluster/ipfs-cluster/issues/956) -* Stateless tracker: increase default queue size | [ipfs-cluster/ipfs-cluster#377](https://github.com/ipfs-cluster/ipfs-cluster/issues/377) | [ipfs-cluster/ipfs-cluster#917](https://github.com/ipfs-cluster/ipfs-cluster/issues/917) -* Upgrade to Go1.13 | [ipfs-cluster/ipfs-cluster#934](https://github.com/ipfs-cluster/ipfs-cluster/issues/934) -* Dockerfiles: improvements | [ipfs-cluster/ipfs-cluster#946](https://github.com/ipfs-cluster/ipfs-cluster/issues/946) -* cluster: support multiple informers on initialization | [ipfs-cluster/ipfs-cluster#940](https://github.com/ipfs-cluster/ipfs-cluster/issues/940) | 962 -* cmdutils: move some methods to cmdutils | [ipfs-cluster/ipfs-cluster#970](https://github.com/ipfs-cluster/ipfs-cluster/issues/970) - - -#### Upgrading notices - - -##### Configuration changes - -* `cluster` section: - * A new `peer_addresses` key allows specifying additional peer addresses in the configuration (similar to the `peerstore` file). These are treated as libp2p bootstrap addreses (do not mix with Raft bootstrap process). This setting is mostly useful for CRDT collaborative clusters, as template configurations can be distributed including bootstrap peers (usually the same as trusted peers). The values are the full multiaddress of these peers: `/ip4/x.x.x.x/tcp/1234/p2p/Qmxxx...`. - * `listen_multiaddress` can now be set to be an array providing multiple listen multiaddresses, the new defaults being `/tcp/9096` and `/udp/9096/quic`. - * `enable_relay_hop` (true by default), lets the cluster peer act as a relay for other cluster peers behind NATs. This is only for the Cluster network. As a reminder, while this setting is problematic on IPFS (due to the amount of traffic the HOP peers start relaying), the cluster-peers networks are smaller and do not move huge amounts of content around. - * The `ipfs_sync_interval` option disappears as the stateless tracker does not keep a state that can lose synchronization with IPFS. -* `ipfshttp` section: - * A new `repogc_timeout` key specifies the timeout for garbage collection operations on IPFS. It is set to 24h by default. - - -##### REST API - -The `pin/add` and `add` endpoints support two new query parameters to indicate pin expirations: `expire-at` (with an expected value in RFC3339 format) and `expire-in` (with an expected value in Go's time format, i.e. `12h`). `expire-at` has preference. - -A new `/ipfs/gc` endpoint has been added to trigger GC in the IPFS daemons attached to Cluster peers. It supports the `local` parameter to limit the operation to the local peer. - - -##### Go APIs - -There are few changes to Go APIs. The `RepoGC` and `RepoGCLocal` methods have been added, the `mappintracker` module has been removed and the `stateless` module has changed the signature of the constructor. - -##### Other - -The IPFS Proxy now intercepts the `/repo/gc` endpoint and triggers a cluster-wide GC operation. - -The `ipfs-cluster-follow` application is an easy to use way to run one or several cluster peers in follower mode using remote configuration templates. It is fully independent from `ipfs-cluster-service` and `ipfs-cluster-ctl` and acts as both a peer (`run` subcommand) and a client (`list` subcommand). The purpose is to facilitate IPFS Cluster usage without having to deal with the configuration and flags etc. - -That said, the configuration layout and folder is the same for both `ipfs-cluster-service` and `ipfs-cluster-follow` and they can be run one in place of the other. In the same way, remote-source configurations usually used for `ipfs-cluster-follow` can be replaced with local ones usually used by `ipfs-cluster-service`. - -The removal of the `map pintracker` has resulted in a simplification of some operations. `StateSync` (regularly run every `state_sync_interval`) does not trigger repinnings now, but only checks for pin expirations. `RecoverAllLocal` (regularly run every `pin_recover_interval`) will now trigger repinnings when necessary (i.e. when things that were expected to be on IPFS are not). On very large pinsets, this operation can trigger a memory spike as the full recursive pinset from IPFS is requested and loaded on memory (before this happened on `StateSync`). - ---- - -### v0.11.0 - 2019-09-13 - -#### Summary - -IPFS Cluster v0.11.0 is the biggest release in the project's history. Its main -feature is the introduction of the new CRDT "consensus" component. Leveraging -Pubsub, Bitswap and the DHT and using CRDTs, cluster peers can track the -global pinset without needing to be online or worrying about the rest of the -peers as it happens with the original Raft approach. - -The CRDT component brings a lots of features around it, like RPC -authorization, which effectively lets cluster peers run in clusters where only -a trusted subset of nodes can access peer endpoints and made modifications to -the pinsets. - -We have additionally taken lots of steps to improve configuration management -of peers, separating the peer identity from the rest of the configuration and -allowing to use remote configurations fetched from an HTTP url (which may well -be the local IPFS gateway). This allows cluster administrators to provide -the configurations needed for any peers to join a cluster as followers. - -The CRDT arrival incorporates a large number of improvements in peerset -management, bootstrapping, connection management and auto-recovery of peers -after network disconnections. We have improved the peer monitoring system, -added support for efficient Pin-Update-based pinning, reworked timeout control -for pinning and fixed a number of annoying bugs. - -This release is mostly backwards compatible with the previous one and -clusters should keep working with the same configurations, but users should -have a look to the sections below and read the updated documentation, as a -number of changes have been introduced to support both consensus components. - -Consensus selection happens during initialization of the configuration (see -configuration changes below). Migration of the pinset is necessary by doing -`state export` (with Raft configured), followed by `state import` (with CRDT -configured). Note that all peers should be configured with the same consensus -type. - - -#### List of changes - -##### Features - - -* crdt: introduce crdt-based consensus component | [ipfs-cluster/ipfs-cluster#685](https://github.com/ipfs-cluster/ipfs-cluster/issues/685) | [ipfs-cluster/ipfs-cluster#804](https://github.com/ipfs-cluster/ipfs-cluster/issues/804) | [ipfs-cluster/ipfs-cluster#787](https://github.com/ipfs-cluster/ipfs-cluster/issues/787) | [ipfs-cluster/ipfs-cluster#798](https://github.com/ipfs-cluster/ipfs-cluster/issues/798) | [ipfs-cluster/ipfs-cluster#805](https://github.com/ipfs-cluster/ipfs-cluster/issues/805) | [ipfs-cluster/ipfs-cluster#811](https://github.com/ipfs-cluster/ipfs-cluster/issues/811) | [ipfs-cluster/ipfs-cluster#816](https://github.com/ipfs-cluster/ipfs-cluster/issues/816) | [ipfs-cluster/ipfs-cluster#820](https://github.com/ipfs-cluster/ipfs-cluster/issues/820) | [ipfs-cluster/ipfs-cluster#856](https://github.com/ipfs-cluster/ipfs-cluster/issues/856) | [ipfs-cluster/ipfs-cluster#857](https://github.com/ipfs-cluster/ipfs-cluster/issues/857) | [ipfs-cluster/ipfs-cluster#834](https://github.com/ipfs-cluster/ipfs-cluster/issues/834) | [ipfs-cluster/ipfs-cluster#856](https://github.com/ipfs-cluster/ipfs-cluster/issues/856) | [ipfs-cluster/ipfs-cluster#867](https://github.com/ipfs-cluster/ipfs-cluster/issues/867) | [ipfs-cluster/ipfs-cluster#874](https://github.com/ipfs-cluster/ipfs-cluster/issues/874) | [ipfs-cluster/ipfs-cluster#885](https://github.com/ipfs-cluster/ipfs-cluster/issues/885) | [ipfs-cluster/ipfs-cluster#899](https://github.com/ipfs-cluster/ipfs-cluster/issues/899) | [ipfs-cluster/ipfs-cluster#906](https://github.com/ipfs-cluster/ipfs-cluster/issues/906) | [ipfs-cluster/ipfs-cluster#918](https://github.com/ipfs-cluster/ipfs-cluster/issues/918) -* configs: separate identity and configuration | [ipfs-cluster/ipfs-cluster#760](https://github.com/ipfs-cluster/ipfs-cluster/issues/760) | [ipfs-cluster/ipfs-cluster#766](https://github.com/ipfs-cluster/ipfs-cluster/issues/766) | [ipfs-cluster/ipfs-cluster#780](https://github.com/ipfs-cluster/ipfs-cluster/issues/780) -* configs: support running with a remote `service.json` (http) | [ipfs-cluster/ipfs-cluster#868](https://github.com/ipfs-cluster/ipfs-cluster/issues/868) -* configs: support a `follower_mode` option | [ipfs-cluster/ipfs-cluster#803](https://github.com/ipfs-cluster/ipfs-cluster/issues/803) | [ipfs-cluster/ipfs-cluster#864](https://github.com/ipfs-cluster/ipfs-cluster/issues/864) -* service/configs: do not load API components if no config present | [ipfs-cluster/ipfs-cluster#452](https://github.com/ipfs-cluster/ipfs-cluster/issues/452) | [ipfs-cluster/ipfs-cluster#836](https://github.com/ipfs-cluster/ipfs-cluster/issues/836) -* service: add `ipfs-cluster-service init --peers` flag to initialize with given peers | [ipfs-cluster/ipfs-cluster#835](https://github.com/ipfs-cluster/ipfs-cluster/issues/835) | [ipfs-cluster/ipfs-cluster#839](https://github.com/ipfs-cluster/ipfs-cluster/issues/839) | [ipfs-cluster/ipfs-cluster#870](https://github.com/ipfs-cluster/ipfs-cluster/issues/870) -* cluster: RPC auth: block rpc endpoints for non trusted peers | [ipfs-cluster/ipfs-cluster#775](https://github.com/ipfs-cluster/ipfs-cluster/issues/775) | [ipfs-cluster/ipfs-cluster#710](https://github.com/ipfs-cluster/ipfs-cluster/issues/710) | [ipfs-cluster/ipfs-cluster#666](https://github.com/ipfs-cluster/ipfs-cluster/issues/666) | [ipfs-cluster/ipfs-cluster#773](https://github.com/ipfs-cluster/ipfs-cluster/issues/773) | [ipfs-cluster/ipfs-cluster#905](https://github.com/ipfs-cluster/ipfs-cluster/issues/905) -* cluster: introduce connection manager | [ipfs-cluster/ipfs-cluster#791](https://github.com/ipfs-cluster/ipfs-cluster/issues/791) -* cluster: support new `PinUpdate` option for new pins | [ipfs-cluster/ipfs-cluster#869](https://github.com/ipfs-cluster/ipfs-cluster/issues/869) | [ipfs-cluster/ipfs-cluster#732](https://github.com/ipfs-cluster/ipfs-cluster/issues/732) -* cluster: trigger `Recover` automatically on a configurable interval | [ipfs-cluster/ipfs-cluster#831](https://github.com/ipfs-cluster/ipfs-cluster/issues/831) | [ipfs-cluster/ipfs-cluster#887](https://github.com/ipfs-cluster/ipfs-cluster/issues/887) -* cluster: enable mDNS discovery for peers | [ipfs-cluster/ipfs-cluster#882](https://github.com/ipfs-cluster/ipfs-cluster/issues/882) | [ipfs-cluster/ipfs-cluster#900](https://github.com/ipfs-cluster/ipfs-cluster/issues/900) -* IPFS Proxy: Support `pin/update` | [ipfs-cluster/ipfs-cluster#732](https://github.com/ipfs-cluster/ipfs-cluster/issues/732) | [ipfs-cluster/ipfs-cluster#768](https://github.com/ipfs-cluster/ipfs-cluster/issues/768) | [ipfs-cluster/ipfs-cluster#887](https://github.com/ipfs-cluster/ipfs-cluster/issues/887) -* monitor: Accrual failure detection. Leaderless re-pinning | [ipfs-cluster/ipfs-cluster#413](https://github.com/ipfs-cluster/ipfs-cluster/issues/413) | [ipfs-cluster/ipfs-cluster#713](https://github.com/ipfs-cluster/ipfs-cluster/issues/713) | [ipfs-cluster/ipfs-cluster#714](https://github.com/ipfs-cluster/ipfs-cluster/issues/714) | [ipfs-cluster/ipfs-cluster#812](https://github.com/ipfs-cluster/ipfs-cluster/issues/812) | [ipfs-cluster/ipfs-cluster#813](https://github.com/ipfs-cluster/ipfs-cluster/issues/813) | [ipfs-cluster/ipfs-cluster#814](https://github.com/ipfs-cluster/ipfs-cluster/issues/814) | [ipfs-cluster/ipfs-cluster#815](https://github.com/ipfs-cluster/ipfs-cluster/issues/815) -* datastore: Expose badger configuration | [ipfs-cluster/ipfs-cluster#771](https://github.com/ipfs-cluster/ipfs-cluster/issues/771) | [ipfs-cluster/ipfs-cluster#776](https://github.com/ipfs-cluster/ipfs-cluster/issues/776) -* IPFSConnector: pin timeout start counting from last received block | [ipfs-cluster/ipfs-cluster#497](https://github.com/ipfs-cluster/ipfs-cluster/issues/497) | [ipfs-cluster/ipfs-cluster#738](https://github.com/ipfs-cluster/ipfs-cluster/issues/738) -* IPFSConnector: remove pin method options | [ipfs-cluster/ipfs-cluster#875](https://github.com/ipfs-cluster/ipfs-cluster/issues/875) -* IPFSConnector: `unpin_disable` removes the ability to unpin anything from ipfs (experimental) | [ipfs-cluster/ipfs-cluster#793](https://github.com/ipfs-cluster/ipfs-cluster/issues/793) | [ipfs-cluster/ipfs-cluster#832](https://github.com/ipfs-cluster/ipfs-cluster/issues/832) -* REST API Client: Load-balancing Go client | [ipfs-cluster/ipfs-cluster#448](https://github.com/ipfs-cluster/ipfs-cluster/issues/448) | [ipfs-cluster/ipfs-cluster#737](https://github.com/ipfs-cluster/ipfs-cluster/issues/737) -* REST API: Return allocation objects on pin/unpin | [ipfs-cluster/ipfs-cluster#843](https://github.com/ipfs-cluster/ipfs-cluster/issues/843) -* REST API: Support request logging | [ipfs-cluster/ipfs-cluster#574](https://github.com/ipfs-cluster/ipfs-cluster/issues/574) | [ipfs-cluster/ipfs-cluster#894](https://github.com/ipfs-cluster/ipfs-cluster/issues/894) -* Adder: improve error handling. Keep adding while at least one allocation works | [ipfs-cluster/ipfs-cluster#852](https://github.com/ipfs-cluster/ipfs-cluster/issues/852) | [ipfs-cluster/ipfs-cluster#871](https://github.com/ipfs-cluster/ipfs-cluster/issues/871) -* Adder: support user-given allocations for the `Add` operation | [ipfs-cluster/ipfs-cluster#761](https://github.com/ipfs-cluster/ipfs-cluster/issues/761) | [ipfs-cluster/ipfs-cluster#890](https://github.com/ipfs-cluster/ipfs-cluster/issues/890) -* ctl: support adding pin metadata | [ipfs-cluster/ipfs-cluster#670](https://github.com/ipfs-cluster/ipfs-cluster/issues/670) | [ipfs-cluster/ipfs-cluster#891](https://github.com/ipfs-cluster/ipfs-cluster/issues/891) - - -##### Bug fixes - -* REST API: Fix `/allocations` when filter unset | [ipfs-cluster/ipfs-cluster#762](https://github.com/ipfs-cluster/ipfs-cluster/issues/762) -* REST API: Fix DELETE returning 500 when pin does not exist | [ipfs-cluster/ipfs-cluster#742](https://github.com/ipfs-cluster/ipfs-cluster/issues/742) | [ipfs-cluster/ipfs-cluster#854](https://github.com/ipfs-cluster/ipfs-cluster/issues/854) -* REST API: Return JSON body on 404s | [ipfs-cluster/ipfs-cluster#657](https://github.com/ipfs-cluster/ipfs-cluster/issues/657) | [ipfs-cluster/ipfs-cluster#879](https://github.com/ipfs-cluster/ipfs-cluster/issues/879) -* service: connectivity fixes | [ipfs-cluster/ipfs-cluster#787](https://github.com/ipfs-cluster/ipfs-cluster/issues/787) | [ipfs-cluster/ipfs-cluster#792](https://github.com/ipfs-cluster/ipfs-cluster/issues/792) -* service: fix using `/dnsaddr` peers | [ipfs-cluster/ipfs-cluster#818](https://github.com/ipfs-cluster/ipfs-cluster/issues/818) -* service: reading empty lines on peerstore panics | [ipfs-cluster/ipfs-cluster#886](https://github.com/ipfs-cluster/ipfs-cluster/issues/886) -* service/ctl: fix parsing string lists | [ipfs-cluster/ipfs-cluster#876](https://github.com/ipfs-cluster/ipfs-cluster/issues/876) | [ipfs-cluster/ipfs-cluster#841](https://github.com/ipfs-cluster/ipfs-cluster/issues/841) -* IPFSConnector: `pin/ls` does handle base32 and base58 cids properly | [ipfs-cluster/ipfs-cluster#808](https://github.com/ipfs-cluster/ipfs-cluster/issues/808) [ipfs-cluster/ipfs-cluster#809](https://github.com/ipfs-cluster/ipfs-cluster/issues/809) -* configs: some config keys not matching ENV vars names | [ipfs-cluster/ipfs-cluster#837](https://github.com/ipfs-cluster/ipfs-cluster/issues/837) | [ipfs-cluster/ipfs-cluster#778](https://github.com/ipfs-cluster/ipfs-cluster/issues/778) -* raft: delete removed raft peers from peerstore | [ipfs-cluster/ipfs-cluster#840](https://github.com/ipfs-cluster/ipfs-cluster/issues/840) | [ipfs-cluster/ipfs-cluster#846](https://github.com/ipfs-cluster/ipfs-cluster/issues/846) -* cluster: peers forgotten after being down | [ipfs-cluster/ipfs-cluster#648](https://github.com/ipfs-cluster/ipfs-cluster/issues/648) | [ipfs-cluster/ipfs-cluster#860](https://github.com/ipfs-cluster/ipfs-cluster/issues/860) -* cluster: State sync should not keep tracking when queue is full | [ipfs-cluster/ipfs-cluster#377](https://github.com/ipfs-cluster/ipfs-cluster/issues/377) | [ipfs-cluster/ipfs-cluster#901](https://github.com/ipfs-cluster/ipfs-cluster/issues/901) -* cluster: avoid random order on peer lists and listen multiaddresses | [ipfs-cluster/ipfs-cluster#327](https://github.com/ipfs-cluster/ipfs-cluster/issues/327) | [ipfs-cluster/ipfs-cluster#878](https://github.com/ipfs-cluster/ipfs-cluster/issues/878) -* cluster: fix recover and allocation re-assignment to existing pins | [ipfs-cluster/ipfs-cluster#912](https://github.com/ipfs-cluster/ipfs-cluster/issues/912) | [ipfs-cluster/ipfs-cluster#888](https://github.com/ipfs-cluster/ipfs-cluster/issues/888) - -##### Other changes - -* cluster: Dependency updates | [ipfs-cluster/ipfs-cluster#769](https://github.com/ipfs-cluster/ipfs-cluster/issues/769) | [ipfs-cluster/ipfs-cluster#789](https://github.com/ipfs-cluster/ipfs-cluster/issues/789) | [ipfs-cluster/ipfs-cluster#795](https://github.com/ipfs-cluster/ipfs-cluster/issues/795) | [ipfs-cluster/ipfs-cluster#822](https://github.com/ipfs-cluster/ipfs-cluster/issues/822) | [ipfs-cluster/ipfs-cluster#823](https://github.com/ipfs-cluster/ipfs-cluster/issues/823) | [ipfs-cluster/ipfs-cluster#828](https://github.com/ipfs-cluster/ipfs-cluster/issues/828) | [ipfs-cluster/ipfs-cluster#830](https://github.com/ipfs-cluster/ipfs-cluster/issues/830) | [ipfs-cluster/ipfs-cluster#853](https://github.com/ipfs-cluster/ipfs-cluster/issues/853) | [ipfs-cluster/ipfs-cluster#839](https://github.com/ipfs-cluster/ipfs-cluster/issues/839) -* cluster: Set `[]peer.ID` as type for user allocations | [ipfs-cluster/ipfs-cluster#767](https://github.com/ipfs-cluster/ipfs-cluster/issues/767) -* cluster: RPC: Split services among components | [ipfs-cluster/ipfs-cluster#773](https://github.com/ipfs-cluster/ipfs-cluster/issues/773) -* cluster: Multiple improvements to tests | [ipfs-cluster/ipfs-cluster#360](https://github.com/ipfs-cluster/ipfs-cluster/issues/360) | [ipfs-cluster/ipfs-cluster#502](https://github.com/ipfs-cluster/ipfs-cluster/issues/502) | [ipfs-cluster/ipfs-cluster#779](https://github.com/ipfs-cluster/ipfs-cluster/issues/779) | [ipfs-cluster/ipfs-cluster#833](https://github.com/ipfs-cluster/ipfs-cluster/issues/833) | [ipfs-cluster/ipfs-cluster#863](https://github.com/ipfs-cluster/ipfs-cluster/issues/863) | [ipfs-cluster/ipfs-cluster#883](https://github.com/ipfs-cluster/ipfs-cluster/issues/883) | [ipfs-cluster/ipfs-cluster#884](https://github.com/ipfs-cluster/ipfs-cluster/issues/884) | [ipfs-cluster/ipfs-cluster#797](https://github.com/ipfs-cluster/ipfs-cluster/issues/797) | [ipfs-cluster/ipfs-cluster#892](https://github.com/ipfs-cluster/ipfs-cluster/issues/892) -* cluster: Remove Gx | [ipfs-cluster/ipfs-cluster#765](https://github.com/ipfs-cluster/ipfs-cluster/issues/765) | [ipfs-cluster/ipfs-cluster#781](https://github.com/ipfs-cluster/ipfs-cluster/issues/781) -* cluster: Use `/p2p/` instead of `/ipfs/` in multiaddresses | [ipfs-cluster/ipfs-cluster#431](https://github.com/ipfs-cluster/ipfs-cluster/issues/431) | [ipfs-cluster/ipfs-cluster#877](https://github.com/ipfs-cluster/ipfs-cluster/issues/877) -* cluster: consolidate parsing of pin options | [ipfs-cluster/ipfs-cluster#913](https://github.com/ipfs-cluster/ipfs-cluster/issues/913) -* REST API: Replace regexps with `strings.HasPrefix` | [ipfs-cluster/ipfs-cluster#806](https://github.com/ipfs-cluster/ipfs-cluster/issues/806) | [ipfs-cluster/ipfs-cluster#807](https://github.com/ipfs-cluster/ipfs-cluster/issues/807) -* docker: use GOPROXY to build containers | [ipfs-cluster/ipfs-cluster#872](https://github.com/ipfs-cluster/ipfs-cluster/issues/872) -* docker: support `IPFS_CLUSTER_CONSENSUS` flag and other improvements | [ipfs-cluster/ipfs-cluster#882](https://github.com/ipfs-cluster/ipfs-cluster/issues/882) -* ctl: increase space for peernames | [ipfs-cluster/ipfs-cluster#887](https://github.com/ipfs-cluster/ipfs-cluster/issues/887) -* ctl: improve replication factor 0 explanation | [ipfs-cluster/ipfs-cluster#755](https://github.com/ipfs-cluster/ipfs-cluster/issues/755) | [ipfs-cluster/ipfs-cluster#909](https://github.com/ipfs-cluster/ipfs-cluster/issues/909) - -#### Upgrading notices - - -##### Configuration changes - -This release introduces a number of backwards-compatible configuration changes: - -* The `service.json` file no longer includes `ID` and `PrivateKey`, which are - now part of an `identity.json` file. However things should work as before if - they do. Running `ipfs-cluster-service daemon` on a older configuration will - automatically write an `identity.json` file with the old credentials so that - things do not break when the compatibility hack is removed. - -* The `service.json` can use a new single top-level `source` field which can - be set to an HTTP url pointing to a full `service.json`. When present, - this will be read and used when starting the daemon. `ipfs-cluster-service - init http://url` produces this type of "remote configuration" file. - -* `cluster` section: - * A new, hidden `follower_mode` option has been introduced in the main - `cluster` configuration section. When set, the cluster peer will provide - clear errors when pinning or unpinning. This is a UI feature. The capacity - of a cluster peer to pin/unpin depends on whether it is trusted by other - peers, not on settin this hidden option. - * A new `pin_recover_interval` option to controls how often pins in error - states are retried. - * A new `mdns_interval` controls the time between mDNS broadcasts to - discover other peers in the network. Setting it to 0 disables mDNS - altogether (default is 10 seconds). - * A new `connection_manager` object can be used to limit the number of - connections kept by the libp2p host: - -```js -"connection_manager": { - "high_water": 400, - "low_water": 100, - "grace_period": "2m0s" -}, -``` - - -* `consensus` section: - * Only one configuration object is allowed inside the `consensus` section, - and it must be either the `crdt` or the `raft` one. The presence of one or - another is used to autoselect the consensus component to be used when - running the daemon or performing `ipfs-cluster-service state` - operations. `ipfs-cluster-service init` receives an optional `--consensus` - flag to select which one to produce. By default it is the `crdt`. - -* `ipfs_connector/ipfshttp` section: - * The `pin_timeout` in the `ipfshttp` section is now starting from the last - block received. Thus it allows more flexibility for things which are - pinning very slowly, but still pinning. - * The `pin_method` option has been removed, as go-ipfs does not do a - pin-global-lock anymore. Therefore `pin add` will be called directly, can - be called multiple times in parallel and should be faster than the - deprecated `refs -r` way. - * The `ipfshttp` section has a new (hidden) `unpin_disable` option - (boolean). The component will refuse to unpin anything from IPFS when - enabled. It can be used as a failsafe option to make sure cluster peers - never unpin content. - -* `datastore` section: - * The configuration has a new `datastore/badger` section, which is relevant - when using the `crdt` consensus component. It allows full control of the - [Badger configuration](https://godoc.org/github.com/dgraph-io/badger#Options), - which is particuarly important when running on systems with low memory: - - -``` - "datastore": { - "badger": { - "badger_options": { - "dir": "", - "value_dir": "", - "sync_writes": true, - "table_loading_mode": 2, - "value_log_loading_mode": 2, - "num_versions_to_keep": 1, - "max_table_size": 67108864, - "level_size_multiplier": 10, - "max_levels": 7, - "value_threshold": 32, - "num_memtables": 5, - "num_level_zero_tables": 5, - "num_level_zero_tables_stall": 10, - "level_one_size": 268435456, - "value_log_file_size": 1073741823, - "value_log_max_entries": 1000000, - "num_compactors": 2, - "compact_l_0_on_close": true, - "read_only": false, - "truncate": false - } - } - } -``` - -* `pin_tracker/maptracker` section: - * The `max_pin_queue_size` parameter has been hidden for default - configurations and the default has been set to 1000000. - -* `api/restapi` section: - * A new `http_log_file` options allows to redirect the REST API logging to a - file. Otherwise, it is logged as part of the regular log. Lines follow the - Apache Common Log Format (CLF). - -##### REST API - -The `POST /pins/{cid}` and `DELETE /pins/{cid}` now returns a pin object with -`200 Success` rather than an empty `204 Accepted` response. - -Using an unexistent route will now correctly return a JSON object along with -the 404 HTTP code, rather than text. - -##### Go APIs - -There have been some changes to Go APIs. Applications integrating Cluster -directly will be affected by the new signatures of Pin/Unpin: - -* The `Pin` and `Unpin` methods now return an object of `api.Pin` type, along with an error. -* The `Pin` method takes a CID and `PinOptions` rather than an `api.Pin` object wrapping -those. -* A new `PinUpdate` method has been introduced. - -Additionally: - -* The Consensus Component interface has changed to accommodate peer-trust operations. -* The IPFSConnector Component interface `Pin` method has changed to take an `api.Pin` type. - - -##### Other - -* The IPFS Proxy now hijacks the `/api/v0/pin/update` and makes a Cluster PinUpdate. -* `ipfs-cluster-service init` now takes a `--consensus` flag to select between - `crdt` (default) and `raft`. Depending on the values, the generated - configuration will have the relevant sections for each. -* The Dockerfiles have been updated to: - * Support the `IPFS_CLUSTER_CONSENSUS` flag to determine which consensus to - use for the automatic `init`. - * No longer use `IPFS_API` environment variable to do a `sed` replacement on - the config, as `CLUSTER_IPFSHTTP_NODEMULTIADDRESS` is the canonical one to - use. - * No longer use `sed` replacement to set the APIs listen IPs to `0.0.0.0` - automatically, as this can be achieved with environment variables - (`CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS` and - `CLUSTER_IPFSPROXY_LISTENMULTIADDRESS`) and can be dangerous for containers - running in `net=host` mode. - * The `docker-compose.yml` has been updated and simplified to launch a CRDT - 3-peer TEST cluster -* Cluster now uses `/p2p/` instead of `/ipfs/` for libp2p multiaddresses by - default, but both protocol IDs are equivalent and interchangeable. -* Pinning an already existing pin will re-submit it to the consensus layer in - all cases, meaning that pins in error states will start pinning again - (before, sometimes this was only possible using recover). Recover stays as a - broadcast/sync operation to trigger pinning on errored items. As a reminder, - pin is consensus/async operation. - ---- - - -### v0.10.1 - 2019-04-10 - -#### Summary - -This release is a maintenance release with a number of bug fixes and a couple of small features. - - -#### List of changes - -##### Features - -* Switch to go.mod | [ipfs-cluster/ipfs-cluster#706](https://github.com/ipfs-cluster/ipfs-cluster/issues/706) | [ipfs-cluster/ipfs-cluster#707](https://github.com/ipfs-cluster/ipfs-cluster/issues/707) | [ipfs-cluster/ipfs-cluster#708](https://github.com/ipfs-cluster/ipfs-cluster/issues/708) -* Remove basic monitor | [ipfs-cluster/ipfs-cluster#689](https://github.com/ipfs-cluster/ipfs-cluster/issues/689) | [ipfs-cluster/ipfs-cluster#726](https://github.com/ipfs-cluster/ipfs-cluster/issues/726) -* Support `nocopy` when adding URLs | [ipfs-cluster/ipfs-cluster#735](https://github.com/ipfs-cluster/ipfs-cluster/issues/735) - -##### Bug fixes - -* Mitigate long header attack | [ipfs-cluster/ipfs-cluster#636](https://github.com/ipfs-cluster/ipfs-cluster/issues/636) | [ipfs-cluster/ipfs-cluster#712](https://github.com/ipfs-cluster/ipfs-cluster/issues/712) -* Fix download link in README | [ipfs-cluster/ipfs-cluster#723](https://github.com/ipfs-cluster/ipfs-cluster/issues/723) -* Fix `peers ls` error when peers down | [ipfs-cluster/ipfs-cluster#715](https://github.com/ipfs-cluster/ipfs-cluster/issues/715) | [ipfs-cluster/ipfs-cluster#719](https://github.com/ipfs-cluster/ipfs-cluster/issues/719) -* Nil pointer panic on `ipfs-cluster-ctl add` | [ipfs-cluster/ipfs-cluster#727](https://github.com/ipfs-cluster/ipfs-cluster/issues/727) | [ipfs-cluster/ipfs-cluster#728](https://github.com/ipfs-cluster/ipfs-cluster/issues/728) -* Fix `enc=json` output on `ipfs-cluster-ctl add` | [ipfs-cluster/ipfs-cluster#729](https://github.com/ipfs-cluster/ipfs-cluster/issues/729) -* Add SSL CAs to Docker container | [ipfs-cluster/ipfs-cluster#730](https://github.com/ipfs-cluster/ipfs-cluster/issues/730) | [ipfs-cluster/ipfs-cluster#731](https://github.com/ipfs-cluster/ipfs-cluster/issues/731) -* Remove duplicate import | [ipfs-cluster/ipfs-cluster#734](https://github.com/ipfs-cluster/ipfs-cluster/issues/734) -* Fix version json object | [ipfs-cluster/ipfs-cluster#743](https://github.com/ipfs-cluster/ipfs-cluster/issues/743) | [ipfs-cluster/ipfs-cluster#752](https://github.com/ipfs-cluster/ipfs-cluster/issues/752) - -#### Upgrading notices - - - -##### Configuration changes - -There are no configuration changes on this release. - -##### REST API - -The `/version` endpoint now returns a version object with *lowercase* `version` key. - -##### Go APIs - -There are no changes to the Go APIs. - -##### Other - -Since we have switched to Go modules for dependency management, `gx` is no -longer used and the maintenance of Gx dependencies has been dropped. The -`Makefile` has been updated accordinly, but now a simple `go install -./cmd/...` works. - ---- - -### v0.10.0 - 2019-03-07 - -#### Summary - -As we get ready to introduce a new CRDT-based "consensus" component to replace -Raft, IPFS Cluster 0.10.0 prepares the ground with substantial under-the-hood -changes. many performance improvements and a few very useful features. - -First of all, this release **requires** users to run `state upgrade` (or start -their daemons with `ipfs-cluster-service daemon --upgrade`). This is the last -upgrade in this fashion as we turn to go-datastore-based storage. The next -release of IPFS Cluster will not understand or be able to upgrade anything -below 0.10.0. - -Secondly, we have made some changes to internal types that should greatly -improve performance a lot, particularly calls involving large collections of -items (`pin ls` or `status`). There are also changes on how the state is -serialized, avoiding unnecessary in-memory copies. We have also upgraded the -dependency stack, incorporating many fixes from libp2p. - -Thirdly, our new great features: - -* `ipfs-cluster-ctl pin add/rm` now supports IPFS paths (`/ipfs/Qmxx.../...`, - `/ipns/Qmxx.../...`, `/ipld/Qm.../...`) which are resolved automatically - before pinning. -* All our configuration values can now be set via environment variables, and -these will be reflected when initializing a new configuration file. -* Pins can now specify a list of "priority allocations". This allows to pin -items to specific Cluster peers, overriding the default allocation policy. -* Finally, the REST API supports adding custom metadata entries as `key=value` - (we will soon add support in `ipfs-cluster-ctl`). Metadata can be added as - query arguments to the Pin or PinPath endpoints: `POST - /pins/?meta-key1=value1&meta-key2=value2...` - -Note that on this release we have also removed a lot of backwards-compatibility -code for things older than version 0.8.0, which kept things working but -printed respective warnings. If you're upgrading from an old release, consider -comparing your configuration with the new default one. - - -#### List of changes - -##### Features - - * Add full support for environment variables in configurations and initialization | [ipfs-cluster/ipfs-cluster#656](https://github.com/ipfs-cluster/ipfs-cluster/issues/656) | [ipfs-cluster/ipfs-cluster#663](https://github.com/ipfs-cluster/ipfs-cluster/issues/663) | [ipfs-cluster/ipfs-cluster#667](https://github.com/ipfs-cluster/ipfs-cluster/issues/667) - * Switch to codecov | [ipfs-cluster/ipfs-cluster#683](https://github.com/ipfs-cluster/ipfs-cluster/issues/683) - * Add auto-resolving IPFS paths | [ipfs-cluster/ipfs-cluster#450](https://github.com/ipfs-cluster/ipfs-cluster/issues/450) | [ipfs-cluster/ipfs-cluster#634](https://github.com/ipfs-cluster/ipfs-cluster/issues/634) - * Support user-defined allocations | [ipfs-cluster/ipfs-cluster#646](https://github.com/ipfs-cluster/ipfs-cluster/issues/646) | [ipfs-cluster/ipfs-cluster#647](https://github.com/ipfs-cluster/ipfs-cluster/issues/647) - * Support user-defined metadata in pin objects | [ipfs-cluster/ipfs-cluster#681](https://github.com/ipfs-cluster/ipfs-cluster/issues/681) - * Make normal types serializable and remove `*Serial` types | [ipfs-cluster/ipfs-cluster#654](https://github.com/ipfs-cluster/ipfs-cluster/issues/654) | [ipfs-cluster/ipfs-cluster#688](https://github.com/ipfs-cluster/ipfs-cluster/issues/688) | [ipfs-cluster/ipfs-cluster#700](https://github.com/ipfs-cluster/ipfs-cluster/issues/700) - * Support IPFS paths in the IPFS proxy | [ipfs-cluster/ipfs-cluster#480](https://github.com/ipfs-cluster/ipfs-cluster/issues/480) | [ipfs-cluster/ipfs-cluster#690](https://github.com/ipfs-cluster/ipfs-cluster/issues/690) - * Use go-datastore as backend for the cluster state | [ipfs-cluster/ipfs-cluster#655](https://github.com/ipfs-cluster/ipfs-cluster/issues/655) - * Upgrade dependencies | [ipfs-cluster/ipfs-cluster#675](https://github.com/ipfs-cluster/ipfs-cluster/issues/675) | [ipfs-cluster/ipfs-cluster#679](https://github.com/ipfs-cluster/ipfs-cluster/issues/679) | [ipfs-cluster/ipfs-cluster#686](https://github.com/ipfs-cluster/ipfs-cluster/issues/686) | [ipfs-cluster/ipfs-cluster#687](https://github.com/ipfs-cluster/ipfs-cluster/issues/687) - * Adopt MIT+Apache 2 License (no more sign-off required) | [ipfs-cluster/ipfs-cluster#692](https://github.com/ipfs-cluster/ipfs-cluster/issues/692) - * Add codecov configurtion file | [ipfs-cluster/ipfs-cluster#693](https://github.com/ipfs-cluster/ipfs-cluster/issues/693) - * Additional tests for basic auth | [ipfs-cluster/ipfs-cluster#645](https://github.com/ipfs-cluster/ipfs-cluster/issues/645) | [ipfs-cluster/ipfs-cluster#694](https://github.com/ipfs-cluster/ipfs-cluster/issues/694) - -##### Bug fixes - - * Fix docker compose tests | [ipfs-cluster/ipfs-cluster#696](https://github.com/ipfs-cluster/ipfs-cluster/issues/696) - * Hide `ipfsproxy.extract_headers_ttl` and `ipfsproxy.extract_headers_path` options by default | [ipfs-cluster/ipfs-cluster#699](https://github.com/ipfs-cluster/ipfs-cluster/issues/699) - -#### Upgrading notices - -This release needs an state upgrade before starting the Cluster daemon. Run `ipfs-cluster-service state upgrade` or run it as `ipfs-cluster-service daemon --upgrade`. We recommend backing up the `~/.ipfs-cluster` folder or exporting the pinset with `ipfs-cluster-service state export`. - -##### Configuration changes - -Configurations now respects environment variables for all sections. They are -in the form: - -`CLUSTER_COMPONENTNAME_KEYNAMEWITHOUTSPACES=value` - -Environment variables will override `service.json` configuration options when -defined and the Cluster peer is started. `ipfs-cluster-service init` will -reflect the value of any existing environment variables in the new -`service.json` file. - -##### REST API - -The main breaking change to the REST API corresponds to the JSON -representation of CIDs in response objects: - -* Before: `"cid": "Qm...."` -* Now: `"cid": { "/": "Qm...."}` - -The new CID encoding is the default as defined by the `cid` -library. Unfortunately, there is no good solution to keep the previous -representation without copying all the objects (an innefficient technique we -just removed). The new CID encoding is otherwise aligned with the rest of the -stack. - -The API also gets two new "Path" endpoints: - -* `POST /pins///...` and -* `DELETE /pins///...` - -Thus, it is equivalent to pin a CID with `POST /pins/` (as before) or -with `POST /pins/ipfs/`. - -The calls will however fail when a non-compliant IPFS path is provided: `POST -/pins//my/path` will fail because all paths must start with the `/ipfs`, -`/ipns` or `/ipld` components. - -##### Go APIs - -This release introduces lots of changes to the Go APIs, including the Go REST -API client, as we have started returning pointers to objects rather than the -objects directly. The `Pin` will now take `api.PinOptions` instead of -different arguments corresponding to the options. It is aligned with the new -`PinPath` and `UnpinPath`. - -##### Other - -As pointed above, 0.10.0's state migration is a required step to be able to -use future version of IPFS Cluster. - ---- - -### v0.9.0 - 2019-02-18 - -#### Summary - -IPFS Cluster version 0.9.0 comes with one big new feature, [OpenCensus](https://opencensus.io) support! This allows for the collection of distributed traces and metrics from the IPFS Cluster application as well as supporting libraries. Currently, we support the use of [Jaeger](https://jaegertracing.io) as the tracing backend and [Prometheus](https://prometheus.io) as the metrics backend. Support for other [OpenCensus backends](https://opencensus.io/exporters/) will be added as requested by the community. - -#### List of changes - -##### Features - - * Integrate [OpenCensus](https://opencensus.io) tracing and metrics into IPFS Cluster codebase | [ipfs-cluster/ipfs-cluster#486](https://github.com/ipfs-cluster/ipfs-cluster/issues/486) | [ipfs-cluster/ipfs-cluster#658](https://github.com/ipfs-cluster/ipfs-cluster/issues/658) | [ipfs-cluster/ipfs-cluster#659](https://github.com/ipfs-cluster/ipfs-cluster/issues/659) | [ipfs-cluster/ipfs-cluster#676](https://github.com/ipfs-cluster/ipfs-cluster/issues/676) | [ipfs-cluster/ipfs-cluster#671](https://github.com/ipfs-cluster/ipfs-cluster/issues/671) | [ipfs-cluster/ipfs-cluster#674](https://github.com/ipfs-cluster/ipfs-cluster/issues/674) - -##### Bug Fixes - -No bugs were fixed from the previous release. - -##### Deprecated - - * The snap distribution of IPFS Cluster has been removed | [ipfs-cluster/ipfs-cluster#593](https://github.com/ipfs-cluster/ipfs-cluster/issues/593) | [ipfs-cluster/ipfs-cluster#649](https://github.com/ipfs-cluster/ipfs-cluster/issues/649). - -#### Upgrading notices - -##### Configuration changes - -No changes to the existing configuration. - -There are two new configuration sections with this release: - -###### `tracing` section - -The `tracing` section configures the use of Jaeger as a tracing backend. - -```js - "tracing": { - "enable_tracing": false, - "jaeger_agent_endpoint": "/ip4/0.0.0.0/udp/6831", - "sampling_prob": 0.3, - "service_name": "cluster-daemon" - } -``` - -###### `metrics` section - -The `metrics` section configures the use of Prometheus as a metrics collector. - -```js - "metrics": { - "enable_stats": false, - "prometheus_endpoint": "/ip4/0.0.0.0/tcp/8888", - "reporting_interval": "2s" - } -``` - -##### REST API - -No changes to the REST API. - -##### Go APIs - -The Go APIs had the minor change of having a `context.Context` parameter added as the first argument -to those that didn't already have it. This was to enable the proporgation of tracing and metric -values. - -The following is a list of interfaces and their methods that were affected by this change: - - Component - - Shutdown - - Consensus - - Ready - - LogPin - - LogUnpin - - AddPeer - - RmPeer - - State - - Leader - - WaitForSync - - Clean - - Peers - - IpfsConnector - - ID - - ConnectSwarm - - SwarmPeers - - RepoStat - - BlockPut - - BlockGet - - Peered - - AddPeer - - RmPeer - - PinTracker - - Track - - Untrack - - StatusAll - - Status - - SyncAll - - Sync - - RecoverAll - - Recover - - Informer - - GetMetric - - PinAllocator - - Allocate - - PeerMonitor - - LogMetric - - PublishMetric - - LatestMetrics - - state.State - - Add - - Rm - - List - - Has - - Get - - Migrate - - rest.Client - - ID - - Peers - - PeerAdd - - PeerRm - - Add - - AddMultiFile - - Pin - - Unpin - - Allocations - - Allocation - - Status - - StatusAll - - Sync - - SyncAll - - Recover - - RecoverAll - - Version - - IPFS - - GetConnectGraph - - Metrics - -These interface changes were also made in the respective implementations. -All export methods of the Cluster type also had these changes made. - - -##### Other - -No other things. - ---- - -### v0.8.0 - 2019-01-16 - -#### Summary - -IPFS Cluster version 0.8.0 comes with a few useful features and some bugfixes. -A significant amount of work has been put to correctly handle CORS in both the -REST API and the IPFS Proxy endpoint, fixing some long-standing issues (we -hope once are for all). - -There has also been heavy work under the hood to separate the IPFS HTTP -Connector (the HTTP client to the IPFS daemon) from the IPFS proxy, which is -essentially an additional Cluster API. Check the configuration changes section -below for more information about how this affects the configuration file. - -Finally we have some useful small features: - -* The `ipfs-cluster-ctl status --filter` option allows to just list those -items which are still `pinning` or `queued` or `error` etc. You can combine -multiple filters. This translates to a new `filter` query parameter in the -`/pins` API endpoint. -* The `stream-channels=false` query parameter for the `/add` endpoint will let -the API buffer the output when adding and return a valid JSON array once done, -making this API endpoint behave like a regular, non-streaming one. -`ipfs-cluster-ctl add --no-stream` acts similarly, but buffering on the client -side. Note that this will cause in-memory buffering of potentially very large -responses when the number of added files is very large, but should be -perfectly fine for regular usage. -* The `ipfs-cluster-ctl add --quieter` flag now applies to the JSON output -too, allowing the user to just get the last added entry JSON object when -adding a file, which is always the root hash. - -#### List of changes - -##### Features - - * IPFS Proxy extraction to its own `API` component: `ipfsproxy` | [ipfs-cluster/ipfs-cluster#453](https://github.com/ipfs-cluster/ipfs-cluster/issues/453) | [ipfs-cluster/ipfs-cluster#576](https://github.com/ipfs-cluster/ipfs-cluster/issues/576) | [ipfs-cluster/ipfs-cluster#616](https://github.com/ipfs-cluster/ipfs-cluster/issues/616) | [ipfs-cluster/ipfs-cluster#617](https://github.com/ipfs-cluster/ipfs-cluster/issues/617) - * Add full CORS handling to `restapi` | [ipfs-cluster/ipfs-cluster#639](https://github.com/ipfs-cluster/ipfs-cluster/issues/639) | [ipfs-cluster/ipfs-cluster#640](https://github.com/ipfs-cluster/ipfs-cluster/issues/640) - * `restapi` configuration section entries can be overridden from environment variables | [ipfs-cluster/ipfs-cluster#609](https://github.com/ipfs-cluster/ipfs-cluster/issues/609) - * Update to `go-ipfs-files` 2.0 | [ipfs-cluster/ipfs-cluster#613](https://github.com/ipfs-cluster/ipfs-cluster/issues/613) - * Tests for the `/monitor/metrics` endpoint | [ipfs-cluster/ipfs-cluster#587](https://github.com/ipfs-cluster/ipfs-cluster/issues/587) | [ipfs-cluster/ipfs-cluster#622](https://github.com/ipfs-cluster/ipfs-cluster/issues/622) - * Support `stream-channels=fase` query parameter in `/add` | [ipfs-cluster/ipfs-cluster#632](https://github.com/ipfs-cluster/ipfs-cluster/issues/632) | [ipfs-cluster/ipfs-cluster#633](https://github.com/ipfs-cluster/ipfs-cluster/issues/633) - * Support server side `/pins` filtering | [ipfs-cluster/ipfs-cluster#445](https://github.com/ipfs-cluster/ipfs-cluster/issues/445) | [ipfs-cluster/ipfs-cluster#478](https://github.com/ipfs-cluster/ipfs-cluster/issues/478) | [ipfs-cluster/ipfs-cluster#627](https://github.com/ipfs-cluster/ipfs-cluster/issues/627) - * `ipfs-cluster-ctl add --no-stream` option | [ipfs-cluster/ipfs-cluster#632](https://github.com/ipfs-cluster/ipfs-cluster/issues/632) | [ipfs-cluster/ipfs-cluster#637](https://github.com/ipfs-cluster/ipfs-cluster/issues/637) - * Upgrade dependencies and libp2p to version 6.0.29 | [ipfs-cluster/ipfs-cluster#624](https://github.com/ipfs-cluster/ipfs-cluster/issues/624) - -##### Bug fixes - - * Respect IPFS daemon response headers on non-proxied calls | [ipfs-cluster/ipfs-cluster#382](https://github.com/ipfs-cluster/ipfs-cluster/issues/382) | [ipfs-cluster/ipfs-cluster#623](https://github.com/ipfs-cluster/ipfs-cluster/issues/623) | [ipfs-cluster/ipfs-cluster#638](https://github.com/ipfs-cluster/ipfs-cluster/issues/638) - * Fix `ipfs-cluster-ctl` usage with HTTPs and `/dns*` hostnames | [ipfs-cluster/ipfs-cluster#626](https://github.com/ipfs-cluster/ipfs-cluster/issues/626) - * Minor fixes in sharness | [ipfs-cluster/ipfs-cluster#641](https://github.com/ipfs-cluster/ipfs-cluster/issues/641) | [ipfs-cluster/ipfs-cluster#643](https://github.com/ipfs-cluster/ipfs-cluster/issues/643) - * Fix error handling when parsing the configuration | [ipfs-cluster/ipfs-cluster#642](https://github.com/ipfs-cluster/ipfs-cluster/issues/642) - - - -#### Upgrading notices - -This release comes with some configuration changes that are important to notice, -even though the peers will start with the same configurations as before. - -##### Configuration changes - -##### `ipfsproxy` section - -This version introduces a separate `ipfsproxy` API component. This is -reflected in the `service.json` configuration, which now includes a new -`ipfsproxy` subsection under the `api` section. By default it looks like: - -```js - "ipfsproxy": { - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", - "read_timeout": "0s", - "read_header_timeout": "5s", - "write_timeout": "0s", - "idle_timeout": "1m0s" - } -``` - -We have however added the necessary safeguards to keep backwards compatibility -for this release. If the `ipfsproxy` section is empty, it will be picked up from -the `ipfshttp` section as before. An ugly warning will be printed in this case. - -Based on the above, the `ipfshttp` configuration section loses the -proxy-related options. Note that `node_multiaddress` stays in both component -configurations and should likely be the same in most cases, but you can now -potentially proxy requests to a different daemon than the one used by the -cluster peer. - -Additional hidden configuration options to manage custom header extraction -from the IPFS daemon (for power users) have been added to the `ipfsproxy` -section but are not shown by default when initializing empty -configurations. See the documentation for more details. - -###### `restapi` section - -The introduction of proper CORS handling in the `restapi` component introduces -a number of new keys: - -```js - "cors_allowed_origins": [ - "*" - ], - "cors_allowed_methods": [ - "GET" - ], - "cors_allowed_headers": [], - "cors_exposed_headers": [ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length" - ], - "cors_allow_credentials": true, - "cors_max_age": "0s" -``` - -Note that CORS will be essentially unconfigured when these keys are not -defined. - -The `headers` key, which was used before to add some CORS related headers -manually, takes a new empty default. **We recommend emptying `headers` from -any CORS-related value.** - - -##### REST API - -The REST API is fully backwards compatible: - -* The `GET /pins` endpoint takes a new `?filter=` option. See - `ipfs-cluster-ctl status --help` for acceptable values. -* The `POST /add` endpoint accepts a new `?stream-channels=` - option. By default it is set to `true`. - -##### Go APIs - -The signature for the `StatusAll` method in the REST `client` module has -changed to include a `filter` parameter. - -There may have been other minimal changes to internal exported Go APIs, but -should not affect users. - -##### Other - -Proxy requests which are handled by the Cluster peer (`/pin/ls`, `/pin/add`, -`/pin/rm`, `/repo/stat` and `/add`) will now attempt to fully mimic ipfs -responses to the header level. This is done by triggering CORS pre-flight for -every hijacked request along with an occasional regular request to `/version` -to extract other headers (and possibly custom ones). - -The practical result is that the proxy now behaves correctly when dropped -instead of IPFS into CORS-aware contexts (like the browser). - ---- - -### v0.7.0 - 2018-11-01 - -#### Summary - -IPFS Cluster version 0.7.0 is a maintenance release that includes a few bugfixes and some small features. - -Note that the REST API response format for the `/add` endpoint has changed. Thus all clients need to be upgraded to deal with the new format. The `rest/api/client` has been accordingly updated. - -#### List of changes - -##### Features - - * Clean (rotate) the state when running `init` | [ipfs-cluster/ipfs-cluster#532](https://github.com/ipfs-cluster/ipfs-cluster/issues/532) | [ipfs-cluster/ipfs-cluster#553](https://github.com/ipfs-cluster/ipfs-cluster/issues/553) - * Configurable REST API headers and CORS defaults | [ipfs-cluster/ipfs-cluster#578](https://github.com/ipfs-cluster/ipfs-cluster/issues/578) - * Upgrade libp2p and other deps | [ipfs-cluster/ipfs-cluster#580](https://github.com/ipfs-cluster/ipfs-cluster/issues/580) | [ipfs-cluster/ipfs-cluster#590](https://github.com/ipfs-cluster/ipfs-cluster/issues/590) | [ipfs-cluster/ipfs-cluster#592](https://github.com/ipfs-cluster/ipfs-cluster/issues/592) | [ipfs-cluster/ipfs-cluster#598](https://github.com/ipfs-cluster/ipfs-cluster/issues/598) | [ipfs-cluster/ipfs-cluster#599](https://github.com/ipfs-cluster/ipfs-cluster/issues/599) - * Use `gossipsub` to broadcast metrics | [ipfs-cluster/ipfs-cluster#573](https://github.com/ipfs-cluster/ipfs-cluster/issues/573) - * Download gx and gx-go from IPFS preferentially | [ipfs-cluster/ipfs-cluster#577](https://github.com/ipfs-cluster/ipfs-cluster/issues/577) | [ipfs-cluster/ipfs-cluster#581](https://github.com/ipfs-cluster/ipfs-cluster/issues/581) - * Expose peer metrics in the API + ctl commands | [ipfs-cluster/ipfs-cluster#449](https://github.com/ipfs-cluster/ipfs-cluster/issues/449) | [ipfs-cluster/ipfs-cluster#572](https://github.com/ipfs-cluster/ipfs-cluster/issues/572) | [ipfs-cluster/ipfs-cluster#589](https://github.com/ipfs-cluster/ipfs-cluster/issues/589) | [ipfs-cluster/ipfs-cluster#587](https://github.com/ipfs-cluster/ipfs-cluster/issues/587) - * Add a `docker-compose.yml` template, which creates a two peer cluster | [ipfs-cluster/ipfs-cluster#585](https://github.com/ipfs-cluster/ipfs-cluster/issues/585) | [ipfs-cluster/ipfs-cluster#588](https://github.com/ipfs-cluster/ipfs-cluster/issues/588) - * Support overwriting configuration values in the `cluster` section with environmental values | [ipfs-cluster/ipfs-cluster#575](https://github.com/ipfs-cluster/ipfs-cluster/issues/575) | [ipfs-cluster/ipfs-cluster#596](https://github.com/ipfs-cluster/ipfs-cluster/issues/596) - * Set snaps to `classic` confinement mode and revert it since approval never arrived | [ipfs-cluster/ipfs-cluster#579](https://github.com/ipfs-cluster/ipfs-cluster/issues/579) | [ipfs-cluster/ipfs-cluster#594](https://github.com/ipfs-cluster/ipfs-cluster/issues/594) -* Use Go's reverse proxy library in the proxy endpoint | [ipfs-cluster/ipfs-cluster#570](https://github.com/ipfs-cluster/ipfs-cluster/issues/570) | [ipfs-cluster/ipfs-cluster#605](https://github.com/ipfs-cluster/ipfs-cluster/issues/605) - - -##### Bug fixes - - * `/add` endpoints improvements and IPFS Companion compatibility | [ipfs-cluster/ipfs-cluster#582](https://github.com/ipfs-cluster/ipfs-cluster/issues/582) | [ipfs-cluster/ipfs-cluster#569](https://github.com/ipfs-cluster/ipfs-cluster/issues/569) - * Fix adding with spaces in the name parameter | [ipfs-cluster/ipfs-cluster#583](https://github.com/ipfs-cluster/ipfs-cluster/issues/583) - * Escape filter query parameter | [ipfs-cluster/ipfs-cluster#586](https://github.com/ipfs-cluster/ipfs-cluster/issues/586) - * Fix some race conditions | [ipfs-cluster/ipfs-cluster#597](https://github.com/ipfs-cluster/ipfs-cluster/issues/597) - * Improve pin deserialization efficiency | [ipfs-cluster/ipfs-cluster#601](https://github.com/ipfs-cluster/ipfs-cluster/issues/601) - * Do not error remote pins | [ipfs-cluster/ipfs-cluster#600](https://github.com/ipfs-cluster/ipfs-cluster/issues/600) | [ipfs-cluster/ipfs-cluster#603](https://github.com/ipfs-cluster/ipfs-cluster/issues/603) - * Clean up testing folders in `rest` and `rest/client` after tests | [ipfs-cluster/ipfs-cluster#607](https://github.com/ipfs-cluster/ipfs-cluster/issues/607) - -#### Upgrading notices - -##### Configuration changes - -The configurations from previous versions are compatible, but a new `headers` key has been added to the `restapi` section. By default it gets CORS headers which will allow read-only interaction from any origin. - -Additionally, all fields from the main `cluster` configuration section can now be overwrriten with environment variables. i.e. `CLUSTER_SECRET`, or `CLUSTER_DISABLEREPINNING`. - -##### REST API - -The `/add` endpoint stream now returns different objects, in line with the rest of the API types. - -Before: - -``` -type AddedOutput struct { - Error - Name string - Hash string `json:",omitempty"` - Bytes int64 `json:",omitempty"` - Size string `json:",omitempty"` -} -``` - -Now: - -``` -type AddedOutput struct { - Name string `json:"name"` - Cid string `json:"cid,omitempty"` - Bytes uint64 `json:"bytes,omitempty"` - Size uint64 `json:"size,omitempty"` -} -``` - -The `/add` endpoint no longer reports errors as part of an AddedOutput object, but instead it uses trailer headers (same as `go-ipfs`). They are handled in the `client`. - -##### Go APIs - -The `AddedOutput` object has changed, thus the `api/rest/client` from older versions will not work with this one. - -##### Other - -No other things. - ---- - -### v0.6.0 - 2018-10-03 - -#### Summary - -IPFS version 0.6.0 is a new minor release of IPFS Cluster. - -We have increased the minor release number to signal changes to the Go APIs after upgrading to the new `cid` package, but, other than that, this release does not include any major changes. - -It brings a number of small fixes and features of which we can highlight two useful ones: - -* the first is the support for multiple cluster daemon versions in the same cluster, as long as they share the same major/minor release. That means, all releases in the `0.6` series (`0.6.0`, `0.6.1` and so on...) will be able to speak among each others, allowing partial cluster upgrades. -* the second is the inclusion of a `PeerName` key in the status (`PinInfo`) objects. `ipfs-cluster-status` will now show peer names instead of peer IDs, making it easy to identify the status for each peer. - -Many thanks to all the contributors to this release: @lanzafame, @meiqimichelle, @kishansagathiya, @cannium, @jglukasik and @mike-ngu. - -#### List of changes - -##### Features - - * Move commands to the `cmd/` folder | [ipfs-cluster/ipfs-cluster#485](https://github.com/ipfs-cluster/ipfs-cluster/issues/485) | [ipfs-cluster/ipfs-cluster#521](https://github.com/ipfs-cluster/ipfs-cluster/issues/521) | [ipfs-cluster/ipfs-cluster#556](https://github.com/ipfs-cluster/ipfs-cluster/issues/556) - * Dependency upgrades: `go-dot`, `go-libp2p`, `cid` | [ipfs-cluster/ipfs-cluster#533](https://github.com/ipfs-cluster/ipfs-cluster/issues/533) | [ipfs-cluster/ipfs-cluster#537](https://github.com/ipfs-cluster/ipfs-cluster/issues/537) | [ipfs-cluster/ipfs-cluster#535](https://github.com/ipfs-cluster/ipfs-cluster/issues/535) | [ipfs-cluster/ipfs-cluster#544](https://github.com/ipfs-cluster/ipfs-cluster/issues/544) | [ipfs-cluster/ipfs-cluster#561](https://github.com/ipfs-cluster/ipfs-cluster/issues/561) - * Build with go-1.11 | [ipfs-cluster/ipfs-cluster#558](https://github.com/ipfs-cluster/ipfs-cluster/issues/558) - * Peer names in `PinInfo` | [ipfs-cluster/ipfs-cluster#446](https://github.com/ipfs-cluster/ipfs-cluster/issues/446) | [ipfs-cluster/ipfs-cluster#531](https://github.com/ipfs-cluster/ipfs-cluster/issues/531) - * Wrap API client in an interface | [ipfs-cluster/ipfs-cluster#447](https://github.com/ipfs-cluster/ipfs-cluster/issues/447) | [ipfs-cluster/ipfs-cluster#523](https://github.com/ipfs-cluster/ipfs-cluster/issues/523) | [ipfs-cluster/ipfs-cluster#564](https://github.com/ipfs-cluster/ipfs-cluster/issues/564) - * `Makefile`: add `prcheck` target and fix `make all` | [ipfs-cluster/ipfs-cluster#536](https://github.com/ipfs-cluster/ipfs-cluster/issues/536) | [ipfs-cluster/ipfs-cluster#542](https://github.com/ipfs-cluster/ipfs-cluster/issues/542) | [ipfs-cluster/ipfs-cluster#539](https://github.com/ipfs-cluster/ipfs-cluster/issues/539) - * Docker: speed up [re]builds | [ipfs-cluster/ipfs-cluster#529](https://github.com/ipfs-cluster/ipfs-cluster/issues/529) - * Re-enable keep-alives on servers | [ipfs-cluster/ipfs-cluster#548](https://github.com/ipfs-cluster/ipfs-cluster/issues/548) | [ipfs-cluster/ipfs-cluster#560](https://github.com/ipfs-cluster/ipfs-cluster/issues/560) - -##### Bugfixes - - * Fix adding to cluster with unhealthy peers | [ipfs-cluster/ipfs-cluster#543](https://github.com/ipfs-cluster/ipfs-cluster/issues/543) | [ipfs-cluster/ipfs-cluster#549](https://github.com/ipfs-cluster/ipfs-cluster/issues/549) - * Fix Snap builds and pushes: multiple architectures re-enabled | [ipfs-cluster/ipfs-cluster#520](https://github.com/ipfs-cluster/ipfs-cluster/issues/520) | [ipfs-cluster/ipfs-cluster#554](https://github.com/ipfs-cluster/ipfs-cluster/issues/554) | [ipfs-cluster/ipfs-cluster#557](https://github.com/ipfs-cluster/ipfs-cluster/issues/557) | [ipfs-cluster/ipfs-cluster#562](https://github.com/ipfs-cluster/ipfs-cluster/issues/562) | [ipfs-cluster/ipfs-cluster#565](https://github.com/ipfs-cluster/ipfs-cluster/issues/565) - * Docs: Typos in Readme and some improvements | [ipfs-cluster/ipfs-cluster#547](https://github.com/ipfs-cluster/ipfs-cluster/issues/547) | [ipfs-cluster/ipfs-cluster#567](https://github.com/ipfs-cluster/ipfs-cluster/issues/567) - * Fix tests in `stateless` PinTracker | [ipfs-cluster/ipfs-cluster#552](https://github.com/ipfs-cluster/ipfs-cluster/issues/552) | [ipfs-cluster/ipfs-cluster#563](https://github.com/ipfs-cluster/ipfs-cluster/issues/563) - -#### Upgrading notices - -##### Configuration changes - -There are no changes to the configuration file on this release. - -##### REST API - -There are no changes to the REST API. - -##### Go APIs - -We have upgraded to the new version of the `cid` package. This means all `*cid.Cid` arguments are now `cid.Cid`. - -##### Other - -We are now using `go-1.11` to build and test cluster. We recommend using this version as well when building from source. - ---- - - -### v0.5.0 - 2018-08-23 - -#### Summary - -IPFS Cluster version 0.5.0 is a minor release which includes a major feature: **adding content to IPFS directly through Cluster**. - -This functionality is provided by `ipfs-cluster-ctl add` and by the API endpoint `/add`. The upload format (multipart) is similar to the IPFS `/add` endpoint, as well as the options (chunker, layout...). Cluster `add` generates the same DAG as `ipfs add` would, but it sends the added blocks directly to their allocations, pinning them on completion. The pin happens very quickly, as content is already locally available in the allocated peers. - -The release also includes most of the needed code for the [Sharding feature](https://ipfscluster.io/developer/rfcs/dag-sharding-rfc/), but it is not yet usable/enabled, pending features from go-ipfs. - -The 0.5.0 release additionally includes a new experimental PinTracker implementation: the `stateless` pin tracker. The stateless pin tracker relies on the IPFS pinset and the cluster state to keep track of pins, rather than keeping an in-memory copy of the cluster pinset, thus reducing the memory usage when having huge pinsets. It can be enabled with `ipfs-cluster-service daemon --pintracker stateless`. - -The last major feature is the use of a DHT as routing layer for cluster peers. This means that peers should be able to discover each others as long as they are connected to one cluster peer. This simplifies the setup requirements for starting a cluster and helps avoiding situations which make the cluster unhealthy. - -This release requires a state upgrade migration. It can be performed with `ipfs-cluster-service state upgrade` or simply launching the daemon with `ipfs-cluster-service daemon --upgrade`. - -#### List of changes - -##### Features - - * Libp2p upgrades (up to v6) | [ipfs-cluster/ipfs-cluster#456](https://github.com/ipfs-cluster/ipfs-cluster/issues/456) | [ipfs-cluster/ipfs-cluster#482](https://github.com/ipfs-cluster/ipfs-cluster/issues/482) - * Support `/dns` multiaddresses for `node_multiaddress` | [ipfs-cluster/ipfs-cluster#462](https://github.com/ipfs-cluster/ipfs-cluster/issues/462) | [ipfs-cluster/ipfs-cluster#463](https://github.com/ipfs-cluster/ipfs-cluster/issues/463) - * Increase `state_sync_interval` to 10 minutes | [ipfs-cluster/ipfs-cluster#468](https://github.com/ipfs-cluster/ipfs-cluster/issues/468) | [ipfs-cluster/ipfs-cluster#469](https://github.com/ipfs-cluster/ipfs-cluster/issues/469) - * Auto-interpret libp2p addresses in `rest/client`'s `APIAddr` configuration option | [ipfs-cluster/ipfs-cluster#498](https://github.com/ipfs-cluster/ipfs-cluster/issues/498) - * Resolve `APIAddr` (for `/dnsaddr` usage) in `rest/client` | [ipfs-cluster/ipfs-cluster#498](https://github.com/ipfs-cluster/ipfs-cluster/issues/498) - * Support for adding content to Cluster and sharding (sharding is disabled) | [ipfs-cluster/ipfs-cluster#484](https://github.com/ipfs-cluster/ipfs-cluster/issues/484) | [ipfs-cluster/ipfs-cluster#503](https://github.com/ipfs-cluster/ipfs-cluster/issues/503) | [ipfs-cluster/ipfs-cluster#495](https://github.com/ipfs-cluster/ipfs-cluster/issues/495) | [ipfs-cluster/ipfs-cluster#504](https://github.com/ipfs-cluster/ipfs-cluster/issues/504) | [ipfs-cluster/ipfs-cluster#509](https://github.com/ipfs-cluster/ipfs-cluster/issues/509) | [ipfs-cluster/ipfs-cluster#511](https://github.com/ipfs-cluster/ipfs-cluster/issues/511) | [ipfs-cluster/ipfs-cluster#518](https://github.com/ipfs-cluster/ipfs-cluster/issues/518) - * `stateless` PinTracker [ipfs-cluster/ipfs-cluster#308](https://github.com/ipfs-cluster/ipfs-cluster/issues/308) | [ipfs-cluster/ipfs-cluster#460](https://github.com/ipfs-cluster/ipfs-cluster/issues/460) - * Add `size-only=true` to `repo/stat` calls | [ipfs-cluster/ipfs-cluster#507](https://github.com/ipfs-cluster/ipfs-cluster/issues/507) - * Enable DHT-based peer discovery and routing for cluster peers | [ipfs-cluster/ipfs-cluster#489](https://github.com/ipfs-cluster/ipfs-cluster/issues/489) | [ipfs-cluster/ipfs-cluster#508](https://github.com/ipfs-cluster/ipfs-cluster/issues/508) - * Gx-go upgrade | [ipfs-cluster/ipfs-cluster#517](https://github.com/ipfs-cluster/ipfs-cluster/issues/517) - -##### Bugfixes - - * Fix type for constants | [ipfs-cluster/ipfs-cluster#455](https://github.com/ipfs-cluster/ipfs-cluster/issues/455) - * Gofmt fix | [ipfs-cluster/ipfs-cluster#464](https://github.com/ipfs-cluster/ipfs-cluster/issues/464) - * Fix tests for forked repositories | [ipfs-cluster/ipfs-cluster#465](https://github.com/ipfs-cluster/ipfs-cluster/issues/465) | [ipfs-cluster/ipfs-cluster#472](https://github.com/ipfs-cluster/ipfs-cluster/issues/472) - * Fix resolve panic on `rest/client` | [ipfs-cluster/ipfs-cluster#498](https://github.com/ipfs-cluster/ipfs-cluster/issues/498) - * Fix remote pins stuck in error state | [ipfs-cluster/ipfs-cluster#500](https://github.com/ipfs-cluster/ipfs-cluster/issues/500) | [ipfs-cluster/ipfs-cluster#460](https://github.com/ipfs-cluster/ipfs-cluster/issues/460) - * Fix running some tests with `-race` | [ipfs-cluster/ipfs-cluster#340](https://github.com/ipfs-cluster/ipfs-cluster/issues/340) | [ipfs-cluster/ipfs-cluster#458](https://github.com/ipfs-cluster/ipfs-cluster/issues/458) - * Fix ipfs proxy `/add` endpoint | [ipfs-cluster/ipfs-cluster#495](https://github.com/ipfs-cluster/ipfs-cluster/issues/495) | [ipfs-cluster/ipfs-cluster#81](https://github.com/ipfs-cluster/ipfs-cluster/issues/81) | [ipfs-cluster/ipfs-cluster#505](https://github.com/ipfs-cluster/ipfs-cluster/issues/505) - * Fix ipfs proxy not hijacking `repo/stat` | [ipfs-cluster/ipfs-cluster#466](https://github.com/ipfs-cluster/ipfs-cluster/issues/466) | [ipfs-cluster/ipfs-cluster#514](https://github.com/ipfs-cluster/ipfs-cluster/issues/514) - * Fix some godoc comments | [ipfs-cluster/ipfs-cluster#519](https://github.com/ipfs-cluster/ipfs-cluster/issues/519) - -#### Upgrading notices - -##### Configuration files - -**IMPORTANT**: `0s` is the new default for the `read_timeout` and `write_timeout` values in the `restapi` configuration section, as well as `proxy_read_timeout` and `proxy_write_timeout` options in the `ipfshttp` section. Adding files to cluster (via the REST api or the proxy) is likely to timeout otherwise. - -The `peerstore` file (in the configuration folder), no longer requires listing the multiaddresses for all cluster peers when initializing the cluster with a fixed peerset. It only requires the multiaddresses for one other cluster peer. The rest will be inferred using the DHT. The peerstore file is updated only on clean shutdown, and will store all known multiaddresses, even if not pertaining to cluster peers. - -The new `stateless` PinTracker implementation uses a new configuration subsection in the `pin_tracker` key. This is only generated with `ipfs-cluster-service init`. When not present, a default configuration will be used (and a warning printed). - -The `state_sync_interval` default has been increased to 10 minutes, as frequent syncing is not needed with the improvements in the PinTracker. Users are welcome to update this setting. - - -##### REST API - -The `/add` endpoint has been added. The `replication_factor_min` and `replication_factor_max` options (in `POST allocations/`) have been deprecated and subsititued for `replication-min` and `replication-max`, although backwards comaptibility is kept. - -Keep Alive has been disabled for the HTTP servers, as a bug in Go's HTTP client implementation may result adding corrupted content (and getting corrupted DAGs). However, while the libp2p API endpoint also suffers this, it will only close libp2p streams. Thus the performance impact on the libp2p-http endpoint should be minimal. - -##### Go APIs - -The `Config.PeerAddr` key in the `rest/client` module is deprecated. `APIAddr` should be used for both HTTP and LibP2P API endpoints. The type of address is automatically detected. - -The IPFSConnector `Pin` call now receives an integer instead of a `Recursive` flag. It indicates the maximum depth to which something should be pinned. The only supported value is `-1` (meaning recursive). `BlockGet` and `BlockPut` calls have been added to the IPFSConnector component. - -##### Other - -As noted above, upgrade to `state` format version 5 is needed before starting the cluster service. - ---- - -### v0.4.0 - 2018-05-30 - -#### Summary - -The IPFS Cluster version 0.4.0 includes breaking changes and a considerable number of new features causing them. The documentation (particularly that affecting the configuration and startup of peers) has been updated accordingly in https://ipfscluster.io . Be sure to also read it if you are upgrading. - -There are four main developments in this release: - -* Refactorings around the `consensus` component, removing dependencies to the main component and allowing separate initialization: this has prompted to re-approach how we handle the peerset, the peer addresses and the peer's startup when using bootstrap. We have gained finer control of Raft, which has allowed us to provide a clearer configuration and a better start up procedure, specially when bootstrapping. The configuration file no longer mutates while cluster is running. -* Improvements to the `pintracker`: our pin tracker is now able to cancel ongoing pins when receiving an unpin request for the same CID, and vice-versa. It will also optimize multiple pin requests (by only queuing and triggering them once) and can now report -whether an item is pinning (a request to ipfs is ongoing) vs. pin-queued (waiting for a worker to perform the request to ipfs). -* Broadcasting of monitoring metrics using PubSub: we have added a new `monitor` implementation that uses PubSub (rather than RPC broadcasting). With the upcoming improvements to PubSub this means that we can do efficient broadcasting of metrics while at the same time not requiring peers to have RPC permissions, which is preparing the ground for collaborative clusters. -* We have launched the IPFS Cluster website: https://ipfscluster.io . We moved most of the documentation over there, expanded it and updated it. - -#### List of changes - -##### Features - - * Consensus refactorings | [ipfs-cluster/ipfs-cluster#398](https://github.com/ipfs-cluster/ipfs-cluster/issues/398) | [ipfs-cluster/ipfs-cluster#371](https://github.com/ipfs-cluster/ipfs-cluster/issues/371) - * Pintracker revamp | [ipfs-cluster/ipfs-cluster#308](https://github.com/ipfs-cluster/ipfs-cluster/issues/308) | [ipfs-cluster/ipfs-cluster#383](https://github.com/ipfs-cluster/ipfs-cluster/issues/383) | [ipfs-cluster/ipfs-cluster#408](https://github.com/ipfs-cluster/ipfs-cluster/issues/408) | [ipfs-cluster/ipfs-cluster#415](https://github.com/ipfs-cluster/ipfs-cluster/issues/415) | [ipfs-cluster/ipfs-cluster#421](https://github.com/ipfs-cluster/ipfs-cluster/issues/421) | [ipfs-cluster/ipfs-cluster#427](https://github.com/ipfs-cluster/ipfs-cluster/issues/427) | [ipfs-cluster/ipfs-cluster#432](https://github.com/ipfs-cluster/ipfs-cluster/issues/432) - * Pubsub monitoring | [ipfs-cluster/ipfs-cluster#400](https://github.com/ipfs-cluster/ipfs-cluster/issues/400) - * Force killing cluster with double CTRL-C | [ipfs-cluster/ipfs-cluster#258](https://github.com/ipfs-cluster/ipfs-cluster/issues/258) | [ipfs-cluster/ipfs-cluster#358](https://github.com/ipfs-cluster/ipfs-cluster/issues/358) - * 3x faster testsuite | [ipfs-cluster/ipfs-cluster#339](https://github.com/ipfs-cluster/ipfs-cluster/issues/339) | [ipfs-cluster/ipfs-cluster#350](https://github.com/ipfs-cluster/ipfs-cluster/issues/350) - * Introduce `disable_repinning` option | [ipfs-cluster/ipfs-cluster#369](https://github.com/ipfs-cluster/ipfs-cluster/issues/369) | [ipfs-cluster/ipfs-cluster#387](https://github.com/ipfs-cluster/ipfs-cluster/issues/387) - * Documentation moved to website and fixes | [ipfs-cluster/ipfs-cluster#390](https://github.com/ipfs-cluster/ipfs-cluster/issues/390) | [ipfs-cluster/ipfs-cluster#391](https://github.com/ipfs-cluster/ipfs-cluster/issues/391) | [ipfs-cluster/ipfs-cluster#393](https://github.com/ipfs-cluster/ipfs-cluster/issues/393) | [ipfs-cluster/ipfs-cluster#347](https://github.com/ipfs-cluster/ipfs-cluster/issues/347) - * Run Docker container with `daemon --upgrade` by default | [ipfs-cluster/ipfs-cluster#394](https://github.com/ipfs-cluster/ipfs-cluster/issues/394) - * Remove the `ipfs-cluster-ctl peers add` command (bootstrap should be used to add peers) | [ipfs-cluster/ipfs-cluster#397](https://github.com/ipfs-cluster/ipfs-cluster/issues/397) - * Add tests using HTTPs endpoints | [ipfs-cluster/ipfs-cluster#191](https://github.com/ipfs-cluster/ipfs-cluster/issues/191) | [ipfs-cluster/ipfs-cluster#403](https://github.com/ipfs-cluster/ipfs-cluster/issues/403) - * Set `refs` as default `pinning_method` and `10` as default `concurrent_pins` | [ipfs-cluster/ipfs-cluster#420](https://github.com/ipfs-cluster/ipfs-cluster/issues/420) - * Use latest `gx` and `gx-go`. Be more verbose when installing | [ipfs-cluster/ipfs-cluster#418](https://github.com/ipfs-cluster/ipfs-cluster/issues/418) - * Makefile: Properly retrigger builds on source change | [ipfs-cluster/ipfs-cluster#426](https://github.com/ipfs-cluster/ipfs-cluster/issues/426) - * Improvements to StateSync() | [ipfs-cluster/ipfs-cluster#429](https://github.com/ipfs-cluster/ipfs-cluster/issues/429) - * Rename `ipfs-cluster-data` folder to `raft` | [ipfs-cluster/ipfs-cluster#430](https://github.com/ipfs-cluster/ipfs-cluster/issues/430) - * Officially support go 1.10 | [ipfs-cluster/ipfs-cluster#439](https://github.com/ipfs-cluster/ipfs-cluster/issues/439) - * Update to libp2p 5.0.17 | [ipfs-cluster/ipfs-cluster#440](https://github.com/ipfs-cluster/ipfs-cluster/issues/440) - -##### Bugsfixes: - - * Don't keep peers /ip*/ addresses if we know DNS addresses for them | [ipfs-cluster/ipfs-cluster#381](https://github.com/ipfs-cluster/ipfs-cluster/issues/381) - * Running cluster with wrong configuration path gives misleading error | [ipfs-cluster/ipfs-cluster#343](https://github.com/ipfs-cluster/ipfs-cluster/issues/343) | [ipfs-cluster/ipfs-cluster#370](https://github.com/ipfs-cluster/ipfs-cluster/issues/370) | [ipfs-cluster/ipfs-cluster#373](https://github.com/ipfs-cluster/ipfs-cluster/issues/373) - * Do not fail when running with `daemon --upgrade` and no state is present | [ipfs-cluster/ipfs-cluster#395](https://github.com/ipfs-cluster/ipfs-cluster/issues/395) - * IPFS Proxy: handle arguments passed as part of the url | [ipfs-cluster/ipfs-cluster#380](https://github.com/ipfs-cluster/ipfs-cluster/issues/380) | [ipfs-cluster/ipfs-cluster#392](https://github.com/ipfs-cluster/ipfs-cluster/issues/392) - * WaitForUpdates() may return before state is fully synced | [ipfs-cluster/ipfs-cluster#378](https://github.com/ipfs-cluster/ipfs-cluster/issues/378) - * Configuration mutates no more and shadowing is no longer necessary | [ipfs-cluster/ipfs-cluster#235](https://github.com/ipfs-cluster/ipfs-cluster/issues/235) - * Govet fixes | [ipfs-cluster/ipfs-cluster#417](https://github.com/ipfs-cluster/ipfs-cluster/issues/417) - * Fix release changelog when having RC tags - * Fix lock file not being removed on cluster force-kill | [ipfs-cluster/ipfs-cluster#423](https://github.com/ipfs-cluster/ipfs-cluster/issues/423) | [ipfs-cluster/ipfs-cluster#437](https://github.com/ipfs-cluster/ipfs-cluster/issues/437) - * Fix indirect pins not being correctly parsed | [ipfs-cluster/ipfs-cluster#428](https://github.com/ipfs-cluster/ipfs-cluster/issues/428) | [ipfs-cluster/ipfs-cluster#436](https://github.com/ipfs-cluster/ipfs-cluster/issues/436) - * Enable NAT support in libp2p host | [ipfs-cluster/ipfs-cluster#346](https://github.com/ipfs-cluster/ipfs-cluster/issues/346) | [ipfs-cluster/ipfs-cluster#441](https://github.com/ipfs-cluster/ipfs-cluster/issues/441) - * Fix pubsub monitor not working on ARM | [ipfs-cluster/ipfs-cluster#433](https://github.com/ipfs-cluster/ipfs-cluster/issues/433) | [ipfs-cluster/ipfs-cluster#443](https://github.com/ipfs-cluster/ipfs-cluster/issues/443) - -#### Upgrading notices - -##### Configuration file - -This release introduces **breaking changes to the configuration file**. An error will be displayed if `ipfs-cluster-service` is started with an old configuration file. We recommend re-initing the configuration file altogether. - -* The `peers` and `bootstrap` keys have been removed from the main section of the configuration -* You might need to provide Peer multiaddresses in a text file named `peerstore`, in your `~/.ipfs-cluster` folder (one per line). This allows your peers how to contact other peers. -* A `disable_repinning` option has been added to the main configuration section. Defaults to `false`. -* A `init_peerset` has been added to the `raft` configuration section. It should be used to define the starting set of peers when a cluster starts for the first time and is not bootstrapping to an existing running peer (otherwise it is ignored). The value is an array of peer IDs. -* A `backups_rotate` option has been added to the `raft` section and specifies how many copies of the Raft state to keep as backups when the state is cleaned up. -* An `ipfs_request_timeout` option has been introduced to the `ipfshttp` configuration section, and controls the timeout of general requests to the ipfs daemon. Defaults to 5 minutes. -* A `pin_timeout` option has been introduced to the `ipfshttp` section, it controls the timeout for Pin requests to ipfs. Defaults to 24 hours. -* An `unpin_timeout` option has been introduced to the `ipfshttp` section. it controls the timeout for Unpin requests to ipfs. Defaults to 3h. -* Both `pinning_timeout` and `unpinning_timeout` options have been removed from the `maptracker` section. -* A `monitor/pubsubmon` section configures the new PubSub monitoring component. The section is identical to the existing `monbasic`, its only option being `check_interval` (defaults to 15 seconds). - -The `ipfs-cluster-data` folder has been renamed to `raft`. Upon `ipfs-cluster-service daemon` start, the renaming will happen automatically if it exists. Otherwise it will be created with the new name. - -##### REST API - -There are no changes to REST APIs in this release. - -##### Go APIs - -Several component APIs have changed: `Consensus`, `PeerMonitor` and `IPFSConnector` have added new methods or changed methods signatures. - -##### Other - -Calling `ipfs-cluster-service` without subcommands no longer runs the peer. It is necessary to call `ipfs-cluster-service daemon`. Several daemon-specific flags have been made subcommand flags: `--bootstrap` and `--alloc`. - -The `--bootstrap` flag can now take a list of comma-separated multiaddresses. Using `--bootstrap` will automatically run `state clean`. - -The `ipfs-cluster-ctl` no longer has a `peers add` subcommand. Peers should not be added this way, but rather bootstrapped to an existing running peer. - ---- - -### v0.3.5 - 2018-03-29 - -This release comes full with new features. The biggest ones are the support for parallel pinning (using `refs -r` rather than `pin add` to pin things in IPFS), and the exposing of the http endpoints through libp2p. This allows users to securely interact with the HTTP API without having to setup SSL certificates. - -* Features - * `--no-status` for `ipfs-cluster-ctl pin add/rm` allows to speed up adding and removing by not fetching the status one second afterwards. Useful for ingesting pinsets to cluster | [ipfs-cluster/ipfs-cluster#286](https://github.com/ipfs-cluster/ipfs-cluster/issues/286) | [ipfs-cluster/ipfs-cluster#329](https://github.com/ipfs-cluster/ipfs-cluster/issues/329) - * `--wait` flag for `ipfs-cluster-ctl pin add/rm` allows to wait until a CID is fully pinned or unpinned [ipfs-cluster/ipfs-cluster#338](https://github.com/ipfs-cluster/ipfs-cluster/issues/338) | [ipfs-cluster/ipfs-cluster#348](https://github.com/ipfs-cluster/ipfs-cluster/issues/348) | [ipfs-cluster/ipfs-cluster#363](https://github.com/ipfs-cluster/ipfs-cluster/issues/363) - * Support `refs` pinning method. Parallel pinning | [ipfs-cluster/ipfs-cluster#326](https://github.com/ipfs-cluster/ipfs-cluster/issues/326) | [ipfs-cluster/ipfs-cluster#331](https://github.com/ipfs-cluster/ipfs-cluster/issues/331) - * Double default timeouts for `ipfs-cluster-ctl` | [ipfs-cluster/ipfs-cluster#323](https://github.com/ipfs-cluster/ipfs-cluster/issues/323) | [ipfs-cluster/ipfs-cluster#334](https://github.com/ipfs-cluster/ipfs-cluster/issues/334) - * Better error messages during startup | [ipfs-cluster/ipfs-cluster#167](https://github.com/ipfs-cluster/ipfs-cluster/issues/167) | [ipfs-cluster/ipfs-cluster#344](https://github.com/ipfs-cluster/ipfs-cluster/issues/344) | [ipfs-cluster/ipfs-cluster#353](https://github.com/ipfs-cluster/ipfs-cluster/issues/353) - * REST API client now provides an `IPFS()` method which returns a `go-ipfs-api` shell instance pointing to the proxy endpoint | [ipfs-cluster/ipfs-cluster#269](https://github.com/ipfs-cluster/ipfs-cluster/issues/269) | [ipfs-cluster/ipfs-cluster#356](https://github.com/ipfs-cluster/ipfs-cluster/issues/356) - * REST http-api-over-libp2p. Server, client, `ipfs-cluster-ctl` support added | [ipfs-cluster/ipfs-cluster#305](https://github.com/ipfs-cluster/ipfs-cluster/issues/305) | [ipfs-cluster/ipfs-cluster#349](https://github.com/ipfs-cluster/ipfs-cluster/issues/349) - * Added support for priority pins and non-recursive pins (sharding-related) | [ipfs-cluster/ipfs-cluster#341](https://github.com/ipfs-cluster/ipfs-cluster/issues/341) | [ipfs-cluster/ipfs-cluster#342](https://github.com/ipfs-cluster/ipfs-cluster/issues/342) - * Documentation fixes | [ipfs-cluster/ipfs-cluster#328](https://github.com/ipfs-cluster/ipfs-cluster/issues/328) | [ipfs-cluster/ipfs-cluster#357](https://github.com/ipfs-cluster/ipfs-cluster/issues/357) - -* Bugfixes - * Print lock path in logs | [ipfs-cluster/ipfs-cluster#332](https://github.com/ipfs-cluster/ipfs-cluster/issues/332) | [ipfs-cluster/ipfs-cluster#333](https://github.com/ipfs-cluster/ipfs-cluster/issues/333) - -There are no breaking API changes and all configurations should be backwards compatible. The `api/rest/client` provides a new `IPFS()` method. - -We recommend updating the `service.json` configurations to include all the new configuration options: - -* The `pin_method` option has been added to the `ipfshttp` section. It supports `refs` and `pin` (default) values. Use `refs` for parallel pinning, but only if you don't run automatic GC on your ipfs nodes. -* The `concurrent_pins` option has been added to the `maptracker` section. Only useful with `refs` option in `pin_method`. -* The `listen_multiaddress` option in the `restapi` section should be renamed to `http_listen_multiaddress`. - -This release will require a **state upgrade**. Run `ipfs-cluster-service state upgrade` in all your peers, or start cluster with `ipfs-cluster-service daemon --upgrade`. - ---- - -### v0.3.4 - 2018-02-20 - -This release fixes the pre-built binaries. - -* Bugfixes - * Pre-built binaries panic on start | [ipfs-cluster/ipfs-cluster#320](https://github.com/ipfs-cluster/ipfs-cluster/issues/320) - ---- - -### v0.3.3 - 2018-02-12 - -This release includes additional `ipfs-cluster-service state` subcommands and the connectivity graph feature. - -* Features - * `ipfs-cluster-service daemon --upgrade` allows to automatically run migrations before starting | [ipfs-cluster/ipfs-cluster#300](https://github.com/ipfs-cluster/ipfs-cluster/issues/300) | [ipfs-cluster/ipfs-cluster#307](https://github.com/ipfs-cluster/ipfs-cluster/issues/307) - * `ipfs-cluster-service state version` reports the shared state format version | [ipfs-cluster/ipfs-cluster#298](https://github.com/ipfs-cluster/ipfs-cluster/issues/298) | [ipfs-cluster/ipfs-cluster#307](https://github.com/ipfs-cluster/ipfs-cluster/issues/307) - * `ipfs-cluster-service health graph` generates a .dot graph file of cluster connectivity | [ipfs-cluster/ipfs-cluster#17](https://github.com/ipfs-cluster/ipfs-cluster/issues/17) | [ipfs-cluster/ipfs-cluster#291](https://github.com/ipfs-cluster/ipfs-cluster/issues/291) | [ipfs-cluster/ipfs-cluster#311](https://github.com/ipfs-cluster/ipfs-cluster/issues/311) - -* Bugfixes - * Do not upgrade state if already up to date | [ipfs-cluster/ipfs-cluster#296](https://github.com/ipfs-cluster/ipfs-cluster/issues/296) | [ipfs-cluster/ipfs-cluster#307](https://github.com/ipfs-cluster/ipfs-cluster/issues/307) - * Fix `ipfs-cluster-service daemon` failing with `unknown allocation strategy` error | [ipfs-cluster/ipfs-cluster#314](https://github.com/ipfs-cluster/ipfs-cluster/issues/314) | [ipfs-cluster/ipfs-cluster#315](https://github.com/ipfs-cluster/ipfs-cluster/issues/315) - -APIs have not changed in this release. The `/health/graph` endpoint has been added. - ---- - -### v0.3.2 - 2018-01-25 - -This release includes a number of bufixes regarding the upgrade and import of state, along with two important features: - * Commands to export and import the internal cluster state: these allow to perform easy and human-readable dumps of the shared cluster state while offline, and eventually restore it in a different peer or cluster. - * The introduction of `replication_factor_min` and `replication_factor_max` parameters for every Pin (along with the deprecation of `replication_factor`). The defaults are specified in the configuration. For more information on the usage and behavior of these new options, check the IPFS cluster guide. - -* Features - * New `ipfs-cluster-service state export/import/cleanup` commands | [ipfs-cluster/ipfs-cluster#240](https://github.com/ipfs-cluster/ipfs-cluster/issues/240) | [ipfs-cluster/ipfs-cluster#290](https://github.com/ipfs-cluster/ipfs-cluster/issues/290) - * New min/max replication factor control | [ipfs-cluster/ipfs-cluster#277](https://github.com/ipfs-cluster/ipfs-cluster/issues/277) | [ipfs-cluster/ipfs-cluster#292](https://github.com/ipfs-cluster/ipfs-cluster/issues/292) - * Improved migration code | [ipfs-cluster/ipfs-cluster#283](https://github.com/ipfs-cluster/ipfs-cluster/issues/283) - * `ipfs-cluster-service version` output simplified (see below) | [ipfs-cluster/ipfs-cluster#274](https://github.com/ipfs-cluster/ipfs-cluster/issues/274) - * Testing improvements: - * Added tests for Dockerfiles | [ipfs-cluster/ipfs-cluster#200](https://github.com/ipfs-cluster/ipfs-cluster/issues/200) | [ipfs-cluster/ipfs-cluster#282](https://github.com/ipfs-cluster/ipfs-cluster/issues/282) - * Enabled Jenkins testing and made it work | [ipfs-cluster/ipfs-cluster#256](https://github.com/ipfs-cluster/ipfs-cluster/issues/256) | [ipfs-cluster/ipfs-cluster#294](https://github.com/ipfs-cluster/ipfs-cluster/issues/294) - * Documentation improvements: - * Guide contains more details on state upgrade procedures | [ipfs-cluster/ipfs-cluster#270](https://github.com/ipfs-cluster/ipfs-cluster/issues/270) - * ipfs-cluster-ctl exit status are documented on the README | [ipfs-cluster/ipfs-cluster#178](https://github.com/ipfs-cluster/ipfs-cluster/issues/178) - -* Bugfixes - * Force cleanup after sharness tests | [ipfs-cluster/ipfs-cluster#181](https://github.com/ipfs-cluster/ipfs-cluster/issues/181) | [ipfs-cluster/ipfs-cluster#288](https://github.com/ipfs-cluster/ipfs-cluster/issues/288) - * Fix state version validation on start | [ipfs-cluster/ipfs-cluster#293](https://github.com/ipfs-cluster/ipfs-cluster/issues/293) - * Wait until last index is applied before attempting snapshot on shutdown | [ipfs-cluster/ipfs-cluster#275](https://github.com/ipfs-cluster/ipfs-cluster/issues/275) - * Snaps from master not pushed due to bad credentials - * Fix overpinning or underpinning of CIDs after re-join | [ipfs-cluster/ipfs-cluster#222](https://github.com/ipfs-cluster/ipfs-cluster/issues/222) - * Fix unmarshaling state on top of an existing one | [ipfs-cluster/ipfs-cluster#297](https://github.com/ipfs-cluster/ipfs-cluster/issues/297) - * Fix catching up on imported state | [ipfs-cluster/ipfs-cluster#297](https://github.com/ipfs-cluster/ipfs-cluster/issues/297) - -These release is compatible with previous versions of ipfs-cluster on the API level, with the exception of the `ipfs-cluster-service version` command, which returns `x.x.x-shortcommit` rather than `ipfs-cluster-service version 0.3.1`. The former output is still available as `ipfs-cluster-service --version`. - -The `replication_factor` option is deprecated, but still supported and will serve as a shortcut to set both `replication_factor_min` and `replication_factor_max` to the same value. This affects the configuration file, the REST API and the `ipfs-cluster-ctl pin add` command. - ---- - -### v0.3.1 - 2017-12-11 - -This release includes changes around the consensus state management, so that upgrades can be performed when the internal format changes. It also comes with several features and changes to support a live deployment and integration with IPFS pin-bot, including a REST API client for Go. - -* Features - * `ipfs-cluster-service state upgrade` | [ipfs-cluster/ipfs-cluster#194](https://github.com/ipfs-cluster/ipfs-cluster/issues/194) - * `ipfs-cluster-test` Docker image runs with `ipfs:master` | [ipfs-cluster/ipfs-cluster#155](https://github.com/ipfs-cluster/ipfs-cluster/issues/155) | [ipfs-cluster/ipfs-cluster#259](https://github.com/ipfs-cluster/ipfs-cluster/issues/259) - * `ipfs-cluster` Docker image only runs `ipfs-cluster-service` (and not the ipfs daemon anymore) | [ipfs-cluster/ipfs-cluster#197](https://github.com/ipfs-cluster/ipfs-cluster/issues/197) | [ipfs-cluster/ipfs-cluster#155](https://github.com/ipfs-cluster/ipfs-cluster/issues/155) | [ipfs-cluster/ipfs-cluster#259](https://github.com/ipfs-cluster/ipfs-cluster/issues/259) - * Support for DNS multiaddresses for cluster peers | [ipfs-cluster/ipfs-cluster#155](https://github.com/ipfs-cluster/ipfs-cluster/issues/155) | [ipfs-cluster/ipfs-cluster#259](https://github.com/ipfs-cluster/ipfs-cluster/issues/259) - * Add configuration section and options for `pin_tracker` | [ipfs-cluster/ipfs-cluster#155](https://github.com/ipfs-cluster/ipfs-cluster/issues/155) | [ipfs-cluster/ipfs-cluster#259](https://github.com/ipfs-cluster/ipfs-cluster/issues/259) - * Add `local` flag to Status, Sync, Recover endpoints which allows to run this operations only in the peer receiving the request | [ipfs-cluster/ipfs-cluster#155](https://github.com/ipfs-cluster/ipfs-cluster/issues/155) | [ipfs-cluster/ipfs-cluster#259](https://github.com/ipfs-cluster/ipfs-cluster/issues/259) - * Add Pin names | [ipfs-cluster/ipfs-cluster#249](https://github.com/ipfs-cluster/ipfs-cluster/issues/249) - * Add Peer names | [ipfs-cluster/ipfs-cluster#250](https://github.com/ipfs-cluster/ipfs-cluster/issues/250) - * New REST API Client module `github.com/ipfs-cluster/ipfs-cluster/api/rest/client` allows to integrate against cluster | [ipfs-cluster/ipfs-cluster#260](https://github.com/ipfs-cluster/ipfs-cluster/issues/260) | [ipfs-cluster/ipfs-cluster#263](https://github.com/ipfs-cluster/ipfs-cluster/issues/263) | [ipfs-cluster/ipfs-cluster#266](https://github.com/ipfs-cluster/ipfs-cluster/issues/266) - * A few rounds addressing code quality issues | [ipfs-cluster/ipfs-cluster#264](https://github.com/ipfs-cluster/ipfs-cluster/issues/264) - -This release should stay backwards compatible with the previous one. Nevertheless, some REST API endpoints take the `local` flag, and matching new Go public functions have been added (`RecoverAllLocal`, `SyncAllLocal`...). - ---- - -### v0.3.0 - 2017-11-15 - -This release introduces Raft 1.0.0 and incorporates deep changes to the management of the cluster peerset. - -* Features - * Upgrade Raft to 1.0.0 | [ipfs-cluster/ipfs-cluster#194](https://github.com/ipfs-cluster/ipfs-cluster/issues/194) | [ipfs-cluster/ipfs-cluster#196](https://github.com/ipfs-cluster/ipfs-cluster/issues/196) - * Support Snaps | [ipfs-cluster/ipfs-cluster#234](https://github.com/ipfs-cluster/ipfs-cluster/issues/234) | [ipfs-cluster/ipfs-cluster#228](https://github.com/ipfs-cluster/ipfs-cluster/issues/228) | [ipfs-cluster/ipfs-cluster#232](https://github.com/ipfs-cluster/ipfs-cluster/issues/232) - * Rotating backups for ipfs-cluster-data | [ipfs-cluster/ipfs-cluster#233](https://github.com/ipfs-cluster/ipfs-cluster/issues/233) - * Bring documentation up to date with the code [ipfs-cluster/ipfs-cluster#223](https://github.com/ipfs-cluster/ipfs-cluster/issues/223) - -Bugfixes: - * Fix docker startup | [ipfs-cluster/ipfs-cluster#216](https://github.com/ipfs-cluster/ipfs-cluster/issues/216) | [ipfs-cluster/ipfs-cluster#217](https://github.com/ipfs-cluster/ipfs-cluster/issues/217) - * Fix configuration save | [ipfs-cluster/ipfs-cluster#213](https://github.com/ipfs-cluster/ipfs-cluster/issues/213) | [ipfs-cluster/ipfs-cluster#214](https://github.com/ipfs-cluster/ipfs-cluster/issues/214) - * Forward progress updates with IPFS-Proxy | [ipfs-cluster/ipfs-cluster#224](https://github.com/ipfs-cluster/ipfs-cluster/issues/224) | [ipfs-cluster/ipfs-cluster#231](https://github.com/ipfs-cluster/ipfs-cluster/issues/231) - * Delay ipfs connect swarms on boot and safeguard against panic condition | [ipfs-cluster/ipfs-cluster#238](https://github.com/ipfs-cluster/ipfs-cluster/issues/238) - * Multiple minor fixes | [ipfs-cluster/ipfs-cluster#236](https://github.com/ipfs-cluster/ipfs-cluster/issues/236) - * Avoid shutting down consensus in the middle of a commit - * Return an ID containing current peers in PeerAdd - * Do not shut down libp2p host in the middle of peer removal - * Send cluster addresses to the new peer before adding it - * Wait for configuration save on init - * Fix error message when not enough allocations exist for a pin - -This releases introduces some changes affecting the configuration file and some breaking changes affecting `go` and the REST APIs: - -* The `consensus.raft` section of the configuration has new options but should be backwards compatible. -* The `Consensus` component interface has changed, `LogAddPeer` and `LogRmPeer` have been replaced by `AddPeer` and `RmPeer`. It additionally provides `Clean` and `Peers` methods. The `consensus/raft` implementation has been updated accordingly. -* The `api.ID` (used in REST API among others) object key `ClusterPeers` key is now a list of peer IDs, and not a list of multiaddresses as before. The object includes a new key `ClusterPeersAddresses` which includes the multiaddresses. -* Note that `--bootstrap` and `--leave` flags when calling `ipfs-cluster-service` will be stored permanently in the configuration (see [ipfs-cluster/ipfs-cluster#235](https://github.com/ipfs-cluster/ipfs-cluster/issues/235)). - ---- - -### v0.2.1 - 2017-10-26 - -This is a maintenance release with some important bugfixes. - -* Fixes: - * Dockerfile runs `ipfs-cluster-service` instead of `ctl` | [ipfs-cluster/ipfs-cluster#194](https://github.com/ipfs-cluster/ipfs-cluster/issues/194) | [ipfs-cluster/ipfs-cluster#196](https://github.com/ipfs-cluster/ipfs-cluster/issues/196) - * Peers and bootstrap entries in the configuration are ignored | [ipfs-cluster/ipfs-cluster#203](https://github.com/ipfs-cluster/ipfs-cluster/issues/203) | [ipfs-cluster/ipfs-cluster#204](https://github.com/ipfs-cluster/ipfs-cluster/issues/204) - * Informers do not work on 32-bit architectures | [ipfs-cluster/ipfs-cluster#202](https://github.com/ipfs-cluster/ipfs-cluster/issues/202) | [ipfs-cluster/ipfs-cluster#205](https://github.com/ipfs-cluster/ipfs-cluster/issues/205) - * Replication factor entry in the configuration is ignored | [ipfs-cluster/ipfs-cluster#208](https://github.com/ipfs-cluster/ipfs-cluster/issues/208) | [ipfs-cluster/ipfs-cluster#209](https://github.com/ipfs-cluster/ipfs-cluster/issues/209) - -The fix for 32-bit architectures has required a change in the `IPFSConnector` interface (`FreeSpace()` and `Reposize()` return `uint64` now). The current implementation by the `ipfshttp` module has changed accordingly. - - ---- - -### v0.2.0 - 2017-10-23 - -* Features: - * Basic authentication support added to API component | [ipfs-cluster/ipfs-cluster#121](https://github.com/ipfs-cluster/ipfs-cluster/issues/121) | [ipfs-cluster/ipfs-cluster#147](https://github.com/ipfs-cluster/ipfs-cluster/issues/147) | [ipfs-cluster/ipfs-cluster#179](https://github.com/ipfs-cluster/ipfs-cluster/issues/179) - * Copy peers to bootstrap when leaving a cluster | [ipfs-cluster/ipfs-cluster#170](https://github.com/ipfs-cluster/ipfs-cluster/issues/170) | [ipfs-cluster/ipfs-cluster#112](https://github.com/ipfs-cluster/ipfs-cluster/issues/112) - * New configuration format | [ipfs-cluster/ipfs-cluster#162](https://github.com/ipfs-cluster/ipfs-cluster/issues/162) | [ipfs-cluster/ipfs-cluster#177](https://github.com/ipfs-cluster/ipfs-cluster/issues/177) - * Freespace disk metric implementation. It's now the default. | [ipfs-cluster/ipfs-cluster#142](https://github.com/ipfs-cluster/ipfs-cluster/issues/142) | [ipfs-cluster/ipfs-cluster#99](https://github.com/ipfs-cluster/ipfs-cluster/issues/99) - -* Fixes: - * IPFS Connector should use only POST | [ipfs-cluster/ipfs-cluster#176](https://github.com/ipfs-cluster/ipfs-cluster/issues/176) | [ipfs-cluster/ipfs-cluster#161](https://github.com/ipfs-cluster/ipfs-cluster/issues/161) - * `ipfs-cluster-ctl` exit status with error responses | [ipfs-cluster/ipfs-cluster#174](https://github.com/ipfs-cluster/ipfs-cluster/issues/174) - * Sharness tests and update testing container | [ipfs-cluster/ipfs-cluster#171](https://github.com/ipfs-cluster/ipfs-cluster/issues/171) - * Update Dockerfiles | [ipfs-cluster/ipfs-cluster#154](https://github.com/ipfs-cluster/ipfs-cluster/issues/154) | [ipfs-cluster/ipfs-cluster#185](https://github.com/ipfs-cluster/ipfs-cluster/issues/185) - * `ipfs-cluster-service`: Do not run service with unknown subcommands | [ipfs-cluster/ipfs-cluster#186](https://github.com/ipfs-cluster/ipfs-cluster/issues/186) - -This release introduces some breaking changes affecting configuration files and `go` integrations: - -* Config: The old configuration format is no longer valid and cluster will fail to start from it. Configuration file needs to be re-initialized with `ipfs-cluster-service init`. -* Go: The `restapi` component has been renamed to `rest` and some of its public methods have been renamed. -* Go: Initializers (`New(...)`) for most components have changed to accept a `Config` object. Some initializers have been removed. - ---- - -Note, when adding changelog entries, write links to issues as `@` and then replace them with links with the following command: - -``` -sed -i -r 's/@([0-9]+)/[ipfs\/ipfs-cluster#\1](https:\/\/github.com\/ipfs\/ipfs-cluster\/issues\/\1)/g' CHANGELOG.md -``` diff --git a/packages/networking/ipfs-cluster/CONTRIBUTING.md b/packages/networking/ipfs-cluster/CONTRIBUTING.md deleted file mode 100644 index 8529f31..0000000 --- a/packages/networking/ipfs-cluster/CONTRIBUTING.md +++ /dev/null @@ -1,3 +0,0 @@ -# Guidelines for contributing - -Please see https://ipfscluster.io/developer/contribute . diff --git a/packages/networking/ipfs-cluster/COPYRIGHT b/packages/networking/ipfs-cluster/COPYRIGHT deleted file mode 100644 index 771e6f7..0000000 --- a/packages/networking/ipfs-cluster/COPYRIGHT +++ /dev/null @@ -1,3 +0,0 @@ -Copyright 2019. Protocol Labs, Inc. - -This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/packages/networking/ipfs-cluster/LICENSE b/packages/networking/ipfs-cluster/LICENSE deleted file mode 100644 index 0020f2a..0000000 --- a/packages/networking/ipfs-cluster/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Dual-licensed under MIT and ASLv2, by way of the [Permissive License -Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). - -Apache-2.0: https://www.apache.org/licenses/license-2.0 -MIT: https://www.opensource.org/licenses/mit diff --git a/packages/networking/ipfs-cluster/LICENSE-APACHE b/packages/networking/ipfs-cluster/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/packages/networking/ipfs-cluster/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/packages/networking/ipfs-cluster/LICENSE-MIT b/packages/networking/ipfs-cluster/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/packages/networking/ipfs-cluster/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/networking/ipfs-cluster/Makefile b/packages/networking/ipfs-cluster/Makefile deleted file mode 100644 index c988da5..0000000 --- a/packages/networking/ipfs-cluster/Makefile +++ /dev/null @@ -1,82 +0,0 @@ -sharness = sharness/lib/sharness - -export GO111MODULE := on - -all: build -clean: rwundo clean_sharness - $(MAKE) -C cmd/ipfs-cluster-service clean - $(MAKE) -C cmd/ipfs-cluster-ctl clean - $(MAKE) -C cmd/ipfs-cluster-follow clean - @rm -rf ./test/testingData - @rm -rf ./compose - -install: - $(MAKE) -C cmd/ipfs-cluster-service install - $(MAKE) -C cmd/ipfs-cluster-ctl install - $(MAKE) -C cmd/ipfs-cluster-follow install - -build: - $(MAKE) -C cmd/ipfs-cluster-service build - $(MAKE) -C cmd/ipfs-cluster-ctl build - $(MAKE) -C cmd/ipfs-cluster-follow build - -service: - $(MAKE) -C cmd/ipfs-cluster-service ipfs-cluster-service -ctl: - $(MAKE) -C cmd/ipfs-cluster-ctl ipfs-cluster-ctl -follow: - $(MAKE) -C cmd/ipfs-cluster-follow ipfs-cluster-follow - -check: - go vet ./... - staticcheck --checks all ./... - misspell -error -locale US . - -test: - go test -v ./... - -test_sharness: $(sharness) - @sh sharness/run-sharness-tests.sh - -test_problem: - go test -timeout 20m -loglevel "DEBUG" -v -run $(problematic_test) - -$(sharness): - @echo "Downloading sharness" - @curl -L -s -o sharness/lib/sharness.tar.gz http://github.com/chriscool/sharness/archive/28c7490f5cdf1e95a8ebebf8b06ed5588db13875.tar.gz - @cd sharness/lib; tar -zxf sharness.tar.gz; cd ../.. - @mv sharness/lib/sharness-28c7490f5cdf1e95a8ebebf8b06ed5588db13875 sharness/lib/sharness - @rm sharness/lib/sharness.tar.gz - -clean_sharness: - @rm -rf ./sharness/test-results - @rm -rf ./sharness/lib/sharness - @rm -rf sharness/trash\ directory* - -docker: - docker build -t cluster-image -f Dockerfile . - docker run --name tmp-make-cluster -d --rm cluster-image && sleep 4 - docker exec tmp-make-cluster sh -c "ipfs-cluster-ctl version" - docker exec tmp-make-cluster sh -c "ipfs-cluster-service -v" - docker kill tmp-make-cluster - - docker build -t cluster-image-test -f Dockerfile-test . - docker run --name tmp-make-cluster-test -d --rm cluster-image && sleep 4 - docker exec tmp-make-cluster-test sh -c "ipfs-cluster-ctl version" - docker exec tmp-make-cluster-test sh -c "ipfs-cluster-service -v" - docker kill tmp-make-cluster-test - -docker-compose: - mkdir -p compose/ipfs0 compose/ipfs1 compose/cluster0 compose/cluster1 - chmod -R 0777 compose - CLUSTER_SECRET=$(shell od -vN 32 -An -tx1 /dev/urandom | tr -d ' \n') docker-compose up -d - sleep 35 - docker exec cluster0 ipfs-cluster-ctl peers ls - docker exec cluster1 ipfs-cluster-ctl peers ls - docker exec cluster0 ipfs-cluster-ctl peers ls | grep -o "Sees 2 other peers" | uniq -c | grep 3 - docker exec cluster1 ipfs-cluster-ctl peers ls | grep -o "Sees 2 other peers" | uniq -c | grep 3 - docker-compose down - -prcheck: check service ctl follow test - -.PHONY: all test test_sharness clean_sharness rw rwundo publish service ctl install clean docker diff --git a/packages/networking/ipfs-cluster/README.md b/packages/networking/ipfs-cluster/README.md deleted file mode 100644 index 202c6d6..0000000 --- a/packages/networking/ipfs-cluster/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# IPFS Cluster - -[![Made by](https://img.shields.io/badge/By-Protocol%20Labs-000000.svg?style=flat-square)](https://protocol.ai) -[![Main project](https://img.shields.io/badge/project-ipfs--cluster-ef5c43.svg?style=flat-square)](http://github.com/ipfs-cluster) -[![Discord](https://img.shields.io/badge/forum-discuss.ipfs.io-f9a035.svg?style=flat-square)](https://discuss.ipfs.io/c/help/help-ipfs-cluster/24) -[![Matrix channel](https://img.shields.io/badge/matrix-%23ipfs--cluster-3c8da0.svg?style=flat-square)](https://app.element.io/#/room/#ipfs-cluster:ipfs.io) -[![pkg.go.dev](https://pkg.go.dev/badge/github.com/ipfs-cluster/ipfs-cluster)](https://pkg.go.dev/github.com/ipfs-cluster/ipfs-cluster) -[![Go Report Card](https://goreportcard.com/badge/github.com/ipfs-cluster/ipfs-cluster)](https://goreportcard.com/report/github.com/ipfs-cluster/ipfs-cluster) -[![codecov](https://codecov.io/gh/ipfs-cluster/ipfs-cluster/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs-cluster/ipfs-cluster) - -> Pinset orchestration for IPFS - -

-logo -

- -[IPFS Cluster](https://ipfscluster.io) provides data orchestration across a swarm of IPFS daemons by allocating, replicating and tracking a global pinset distributed among multiple peers. - -There are 3 different applications: - -* A cluster peer application: `ipfs-cluster-service`, to be run along with `go-ipfs` as a sidecar. -* A client CLI application: `ipfs-cluster-ctl`, which allows easily interacting with the peer's HTTP API. -* An additional "follower" peer application: `ipfs-cluster-follow`, focused on simplifying the process of configuring and running follower peers. - ---- - -### Are you using IPFS Cluster? - -Please participate in the [IPFS Cluster user registry](https://docs.google.com/forms/d/e/1FAIpQLSdWF5aXNXrAK_sCyu1eVv2obTaKVO3Ac5dfgl2r5_IWcizGRg/viewform). - ---- - -## Table of Contents - -- [Documentation](#documentation) -- [News & Roadmap](#news--roadmap) -- [Install](#install) -- [Usage](#usage) -- [Contribute](#contribute) -- [License](#license) - - -## Documentation - -Please visit https://ipfscluster.io/documentation/ to access user documentation, guides and any other resources, including detailed **download** and **usage** instructions. - -## News & Roadmap - -We regularly post project updates to https://ipfscluster.io/news/ . - -The most up-to-date *Roadmap* is available at https://ipfscluster.io/roadmap/ . - -## Install - -Instructions for different installation methods (including from source) are available at https://ipfscluster.io/download . - -## Usage - -Extensive usage information is provided at https://ipfscluster.io/documentation/ , including: - -* [Docs for `ipfs-cluster-service`](https://ipfscluster.io/documentation/reference/service/) -* [Docs for `ipfs-cluster-ctl`](https://ipfscluster.io/documentation/reference/ctl/) -* [Docs for `ipfs-cluster-follow`](https://ipfscluster.io/documentation/reference/follow/) - -## Contribute - -PRs accepted. As part of the IPFS project, we have some [contribution guidelines](https://ipfscluster.io/support/#contribution-guidelines). - -## License - -This library is dual-licensed under Apache 2.0 and MIT terms. - -© 2022. Protocol Labs, Inc. diff --git a/packages/networking/ipfs-cluster/add_test.go b/packages/networking/ipfs-cluster/add_test.go deleted file mode 100644 index 6ae57b4..0000000 --- a/packages/networking/ipfs-cluster/add_test.go +++ /dev/null @@ -1,298 +0,0 @@ -package ipfscluster - -// This files has tests for Add* using multiple cluster peers. - -import ( - "context" - "mime/multipart" - "sync" - "testing" - "time" - - files "github.com/ipfs/go-ipfs-files" - "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func TestAdd(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - waitForLeaderAndMetrics(t, clusters) - - t.Run("default", func(t *testing.T) { - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - ci, err := clusters[0].AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - if ci.String() != test.ShardingDirBalancedRootCID { - t.Fatal("unexpected root CID for local add") - } - - // We need to sleep a lot because it takes time to - // catch up on a first/single pin on crdts - time.Sleep(10 * time.Second) - - f := func(t *testing.T, c *Cluster) { - pin := c.StatusLocal(ctx, ci) - if pin.Error != "" { - t.Error(pin.Error) - } - if pin.Status != api.TrackerStatusPinned { - t.Error("item should be pinned and is", pin.Status) - } - } - - runF(t, clusters, f) - }) - - t.Run("local_one_allocation", func(t *testing.T) { - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - params.ReplicationFactorMin = 1 - params.ReplicationFactorMax = 1 - params.Local = true - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - ci, err := clusters[2].AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - if ci.String() != test.ShardingDirBalancedRootCID { - t.Fatal("unexpected root CID for local add") - } - - // We need to sleep a lot because it takes time to - // catch up on a first/single pin on crdts - time.Sleep(10 * time.Second) - - f := func(t *testing.T, c *Cluster) { - pin := c.StatusLocal(ctx, ci) - if pin.Error != "" { - t.Error(pin.Error) - } - switch c.id { - case clusters[2].id: - if pin.Status != api.TrackerStatusPinned { - t.Error("item should be pinned and is", pin.Status) - } - default: - if pin.Status != api.TrackerStatusRemote { - t.Errorf("item should only be allocated to cluster2") - } - } - } - - runF(t, clusters, f) - }) -} - -func TestAddWithUserAllocations(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - waitForLeaderAndMetrics(t, clusters) - - t.Run("local", func(t *testing.T) { - params := api.DefaultAddParams() - params.ReplicationFactorMin = 2 - params.ReplicationFactorMax = 2 - params.UserAllocations = []peer.ID{clusters[0].id, clusters[1].id} - params.Shard = false - params.Name = "testlocal" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - ci, err := clusters[0].AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - f := func(t *testing.T, c *Cluster) { - if c == clusters[0] || c == clusters[1] { - pin := c.StatusLocal(ctx, ci) - if pin.Error != "" { - t.Error(pin.Error) - } - if pin.Status != api.TrackerStatusPinned { - t.Error("item should be pinned and is", pin.Status) - } - } else { - pin := c.StatusLocal(ctx, ci) - if pin.Status != api.TrackerStatusRemote { - t.Error("expected tracker status remote") - } - } - } - - runF(t, clusters, f) - }) -} - -func TestAddPeerDown(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - err := clusters[0].Shutdown(ctx) - if err != nil { - t.Fatal(err) - } - - waitForLeaderAndMetrics(t, clusters) - - t.Run("local", func(t *testing.T) { - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - ci, err := clusters[1].AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - if ci.String() != test.ShardingDirBalancedRootCID { - t.Fatal("unexpected root CID for local add") - } - - // We need to sleep a lot because it takes time to - // catch up on a first/single pin on crdts - time.Sleep(10 * time.Second) - - f := func(t *testing.T, c *Cluster) { - if c.id == clusters[0].id { - return - } - pin := c.StatusLocal(ctx, ci) - if pin.Error != "" { - t.Error(pin.Error) - } - if pin.Status != api.TrackerStatusPinned { - t.Error("item should be pinned") - } - } - - runF(t, clusters, f) - }) -} - -func TestAddOnePeerFails(t *testing.T) { - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - waitForLeaderAndMetrics(t, clusters) - - t.Run("local", func(t *testing.T) { - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - lg, closer := sth.GetRandFileReader(t, 100000) // 100 MB - defer closer.Close() - - mr := files.NewMultiFileReader(lg, true) - r := multipart.NewReader(mr, mr.Boundary()) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - _, err := clusters[0].AddFile(context.Background(), r, params) - if err != nil { - t.Error(err) - } - }() - - // Disconnect 1 cluster (the last). Things should keep working. - // Important that we close the hosts, otherwise the RPC - // Servers keep working along with BlockPuts. - time.Sleep(100 * time.Millisecond) - c := clusters[nClusters-1] - c.Shutdown(context.Background()) - c.dht.Close() - c.host.Close() - wg.Wait() - }) -} - -func TestAddAllPeersFail(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - waitForLeaderAndMetrics(t, clusters) - - t.Run("local", func(t *testing.T) { - // Prevent added content to be allocated to cluster 0 - // as it is already going to have something. - _, err := clusters[0].Pin(ctx, test.Cid1, api.PinOptions{ - ReplicationFactorMin: 1, - ReplicationFactorMax: 1, - UserAllocations: []peer.ID{clusters[0].host.ID()}, - }) - if err != nil { - t.Fatal(err) - } - - ttlDelay() - - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - // Allocate to every peer except 0 (which already has a pin) - params.PinOptions.ReplicationFactorMax = nClusters - 1 - params.PinOptions.ReplicationFactorMin = nClusters - 1 - - lg, closer := sth.GetRandFileReader(t, 100000) // 100 MB - defer closer.Close() - mr := files.NewMultiFileReader(lg, true) - r := multipart.NewReader(mr, mr.Boundary()) - - // var cid cid.Cid - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - _, err := clusters[0].AddFile(context.Background(), r, params) - if err != adder.ErrBlockAdder { - t.Error("expected ErrBlockAdder. Got: ", err) - } - }() - - time.Sleep(100 * time.Millisecond) - - // Shutdown all clusters except 0 to see the right error. - // Important that we shut down the hosts, otherwise - // the RPC Servers keep working along with BlockPuts. - // Note that this kills raft. - runF(t, clusters[1:], func(t *testing.T, c *Cluster) { - c.Shutdown(ctx) - c.dht.Close() - c.host.Close() - }) - wg.Wait() - }) -} diff --git a/packages/networking/ipfs-cluster/adder/adder.go b/packages/networking/ipfs-cluster/adder/adder.go deleted file mode 100644 index ea20cbf..0000000 --- a/packages/networking/ipfs-cluster/adder/adder.go +++ /dev/null @@ -1,331 +0,0 @@ -// Package adder implements functionality to add content to IPFS daemons -// managed by the Cluster. -package adder - -import ( - "context" - "errors" - "fmt" - "io" - "mime/multipart" - "strings" - - "github.com/ipfs-cluster/ipfs-cluster/adder/ipfsadd" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs/go-unixfs" - "github.com/ipld/go-car" - peer "github.com/libp2p/go-libp2p/core/peer" - - cid "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - cbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - merkledag "github.com/ipfs/go-merkledag" - multihash "github.com/multiformats/go-multihash" -) - -var logger = logging.Logger("adder") - -// go-merkledag does this, but it may be moved. -// We include for explicitness. -func init() { - ipld.Register(cid.DagProtobuf, merkledag.DecodeProtobufBlock) - ipld.Register(cid.Raw, merkledag.DecodeRawBlock) - ipld.Register(cid.DagCBOR, cbor.DecodeBlock) -} - -// ClusterDAGService is an implementation of ipld.DAGService plus a Finalize -// method. ClusterDAGServices can be used to provide Adders with a different -// add implementation. -type ClusterDAGService interface { - ipld.DAGService - // Finalize receives the IPFS content root CID as - // returned by the ipfs adder. - Finalize(ctx context.Context, ipfsRoot api.Cid) (api.Cid, error) - // Allocations returns the allocations made by the cluster DAG service - // for the added content. - Allocations() []peer.ID -} - -// A dagFormatter can create dags from files.Node. It can keep state -// to add several files to the same dag. -type dagFormatter interface { - Add(name string, f files.Node) (api.Cid, error) -} - -// Adder is used to add content to IPFS Cluster using an implementation of -// ClusterDAGService. -type Adder struct { - ctx context.Context - cancel context.CancelFunc - - dgs ClusterDAGService - - params api.AddParams - - // AddedOutput updates are placed on this channel - // whenever a block is processed. They contain information - // about the block, the CID, the Name etc. and are mostly - // meant to be streamed back to the user. - output chan api.AddedOutput -} - -// New returns a new Adder with the given ClusterDAGService, add options and a -// channel to send updates during the adding process. -// -// An Adder may only be used once. -func New(ds ClusterDAGService, p api.AddParams, out chan api.AddedOutput) *Adder { - // Discard all progress update output as the caller has not provided - // a channel for them to listen on. - if out == nil { - out = make(chan api.AddedOutput, 100) - go func() { - for range out { - } - }() - } - - return &Adder{ - dgs: ds, - params: p, - output: out, - } -} - -func (a *Adder) setContext(ctx context.Context) { - if a.ctx == nil { // only allows first context - ctxc, cancel := context.WithCancel(ctx) - a.ctx = ctxc - a.cancel = cancel - } -} - -// FromMultipart adds content from a multipart.Reader. The adder will -// no longer be usable after calling this method. -func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (api.Cid, error) { - logger.Debugf("adding from multipart with params: %+v", a.params) - - f, err := files.NewFileFromPartReader(r, "multipart/form-data") - if err != nil { - return api.CidUndef, err - } - defer f.Close() - return a.FromFiles(ctx, f) -} - -// FromFiles adds content from a files.Directory. The adder will no longer -// be usable after calling this method. -func (a *Adder) FromFiles(ctx context.Context, f files.Directory) (api.Cid, error) { - logger.Debug("adding from files") - a.setContext(ctx) - - if a.ctx.Err() != nil { // don't allow running twice - return api.CidUndef, a.ctx.Err() - } - - defer a.cancel() - defer close(a.output) - - var dagFmtr dagFormatter - var err error - switch a.params.Format { - case "", "unixfs": - dagFmtr, err = newIpfsAdder(ctx, a.dgs, a.params, a.output) - - case "car": - dagFmtr, err = newCarAdder(ctx, a.dgs, a.params, a.output) - default: - err = errors.New("bad dag formatter option") - } - if err != nil { - return api.CidUndef, err - } - - // setup wrapping - if a.params.Wrap { - f = files.NewSliceDirectory( - []files.DirEntry{files.FileEntry("", f)}, - ) - } - - it := f.Entries() - var adderRoot api.Cid - for it.Next() { - select { - case <-a.ctx.Done(): - return api.CidUndef, a.ctx.Err() - default: - logger.Debugf("ipfsAdder AddFile(%s)", it.Name()) - - adderRoot, err = dagFmtr.Add(it.Name(), it.Node()) - if err != nil { - logger.Error("error adding to cluster: ", err) - return api.CidUndef, err - } - } - // TODO (hector): We can only add a single CAR file for the - // moment. - if a.params.Format == "car" { - break - } - } - if it.Err() != nil { - return api.CidUndef, it.Err() - } - - clusterRoot, err := a.dgs.Finalize(a.ctx, adderRoot) - if err != nil { - logger.Error("error finalizing adder:", err) - return api.CidUndef, err - } - logger.Infof("%s successfully added to cluster", clusterRoot) - return clusterRoot, nil -} - -// A wrapper around the ipfsadd.Adder to satisfy the dagFormatter interface. -type ipfsAdder struct { - *ipfsadd.Adder -} - -func newIpfsAdder(ctx context.Context, dgs ClusterDAGService, params api.AddParams, out chan api.AddedOutput) (*ipfsAdder, error) { - iadder, err := ipfsadd.NewAdder(ctx, dgs, dgs.Allocations) - if err != nil { - logger.Error(err) - return nil, err - } - - iadder.Trickle = params.Layout == "trickle" - iadder.RawLeaves = params.RawLeaves - iadder.Chunker = params.Chunker - iadder.Out = out - iadder.Progress = params.Progress - iadder.NoCopy = params.NoCopy - - // Set up prefi - prefix, err := merkledag.PrefixForCidVersion(params.CidVersion) - if err != nil { - return nil, fmt.Errorf("bad CID Version: %s", err) - } - - hashFunCode, ok := multihash.Names[strings.ToLower(params.HashFun)] - if !ok { - return nil, errors.New("hash function name not known") - } - prefix.MhType = hashFunCode - prefix.MhLength = -1 - iadder.CidBuilder = &prefix - return &ipfsAdder{ - Adder: iadder, - }, nil -} - -func (ia *ipfsAdder) Add(name string, f files.Node) (api.Cid, error) { - // In order to set the AddedOutput names right, we use - // OutputPrefix: - // - // When adding a folder, this is the root folder name which is - // prepended to the addedpaths. When adding a single file, - // this is the name of the file which overrides the empty - // AddedOutput name. - // - // After coreunix/add.go was refactored in go-ipfs and we - // followed suit, it no longer receives the name of the - // file/folder being added and does not emit AddedOutput - // events with the right names. We addressed this by adding - // OutputPrefix to our version. go-ipfs modifies emitted - // events before sending to user). - ia.OutputPrefix = name - - nd, err := ia.AddAllAndPin(f) - if err != nil { - return api.CidUndef, err - } - return api.NewCid(nd.Cid()), nil -} - -// An adder to add CAR files. It is at the moment very basic, and can -// add a single CAR file with a single root. Ideally, it should be able to -// add more complex, or several CARs by wrapping them with a single root. -// But for that we would need to keep state and track an MFS root similarly to -// what the ipfsadder does. -type carAdder struct { - ctx context.Context - dgs ClusterDAGService - params api.AddParams - output chan api.AddedOutput -} - -func newCarAdder(ctx context.Context, dgs ClusterDAGService, params api.AddParams, out chan api.AddedOutput) (*carAdder, error) { - return &carAdder{ - ctx: ctx, - dgs: dgs, - params: params, - output: out, - }, nil -} - -// Add takes a node which should be a CAR file and nothing else and -// adds its blocks using the ClusterDAGService. -func (ca *carAdder) Add(name string, fn files.Node) (api.Cid, error) { - if ca.params.Wrap { - return api.CidUndef, errors.New("cannot wrap a CAR file upload") - } - - f, ok := fn.(files.File) - if !ok { - return api.CidUndef, errors.New("expected CAR file is not of type file") - } - carReader, err := car.NewCarReader(f) - if err != nil { - return api.CidUndef, err - } - - if len(carReader.Header.Roots) != 1 { - return api.CidUndef, errors.New("only CAR files with a single root are supported") - } - - root := carReader.Header.Roots[0] - bytes := uint64(0) - size := uint64(0) - - for { - block, err := carReader.Next() - if err != nil && err != io.EOF { - return api.CidUndef, err - } else if block == nil { - break - } - - bytes += uint64(len(block.RawData())) - - nd, err := ipld.Decode(block) - if err != nil { - return api.CidUndef, err - } - - // If the root is in the CAR and the root is a UnixFS - // node, then set the size in the output object. - if nd.Cid().Equals(root) { - ufs, err := unixfs.ExtractFSNode(nd) - if err == nil { - size = ufs.FileSize() - } - } - - err = ca.dgs.Add(ca.ctx, nd) - if err != nil { - return api.CidUndef, err - } - } - - ca.output <- api.AddedOutput{ - Name: name, - Cid: api.NewCid(root), - Bytes: bytes, - Size: size, - Allocations: ca.dgs.Allocations(), - } - - return api.NewCid(root), nil -} diff --git a/packages/networking/ipfs-cluster/adder/adder_test.go b/packages/networking/ipfs-cluster/adder/adder_test.go deleted file mode 100644 index 323536f..0000000 --- a/packages/networking/ipfs-cluster/adder/adder_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package adder - -import ( - "bytes" - "context" - "fmt" - "mime/multipart" - "sync" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - "github.com/ipld/go-car" - peer "github.com/libp2p/go-libp2p/core/peer" - - cid "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" -) - -type mockCDAGServ struct { - *test.MockDAGService -} - -func newMockCDAGServ() *mockCDAGServ { - return &mockCDAGServ{ - // write-only DAGs. - MockDAGService: test.NewMockDAGService(true), - } -} - -func newReadableMockCDAGServ() *mockCDAGServ { - return &mockCDAGServ{ - MockDAGService: test.NewMockDAGService(false), - } -} - -// noop -func (dag *mockCDAGServ) Finalize(ctx context.Context, root api.Cid) (api.Cid, error) { - return root, nil -} - -func (dag *mockCDAGServ) Allocations() []peer.ID { - return nil -} - -func TestAdder(t *testing.T) { - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - mr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mr, mr.Boundary()) - p := api.DefaultAddParams() - expectedCids := test.ShardingDirCids[:] - - dags := newMockCDAGServ() - - adder := New(dags, p, nil) - - root, err := adder.FromMultipart(context.Background(), r) - if err != nil { - t.Fatal(err) - } - - if root.String() != test.ShardingDirBalancedRootCID { - t.Error("expected the right content root") - } - - if len(expectedCids) != len(dags.Nodes) { - t.Fatal("unexpected number of blocks imported") - } - - for _, c := range expectedCids { - ci, _ := cid.Decode(c) - _, ok := dags.Nodes[ci] - if !ok { - t.Fatal("unexpected block emitted:", c) - } - } -} - -func TestAdder_DoubleStart(t *testing.T) { - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - f := sth.GetTreeSerialFile(t) - p := api.DefaultAddParams() - - dags := newMockCDAGServ() - - adder := New(dags, p, nil) - _, err := adder.FromFiles(context.Background(), f) - f.Close() - if err != nil { - t.Fatal(err) - } - - f = sth.GetTreeSerialFile(t) - _, err = adder.FromFiles(context.Background(), f) - f.Close() - if err == nil { - t.Fatal("expected an error: cannot run importer twice") - } -} - -func TestAdder_ContextCancelled(t *testing.T) { - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - lg, closer := sth.GetRandFileReader(t, 100000) // 50 MB - st := sth.GetTreeSerialFile(t) - defer closer.Close() - defer st.Close() - - slf := files.NewMapDirectory(map[string]files.Node{ - "a": lg, - "b": st, - }) - mr := files.NewMultiFileReader(slf, true) - - r := multipart.NewReader(mr, mr.Boundary()) - - p := api.DefaultAddParams() - - dags := newMockCDAGServ() - - ctx, cancel := context.WithCancel(context.Background()) - adder := New(dags, p, nil) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - _, err := adder.FromMultipart(ctx, r) - if err == nil { - t.Error("expected a context canceled error") - } - t.Log(err) - }() - // adder.FromMultipart will finish, if sleep more - time.Sleep(50 * time.Millisecond) - cancel() - wg.Wait() -} - -func TestAdder_CAR(t *testing.T) { - // prepare a CAR file - ctx := context.Background() - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - mr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mr, mr.Boundary()) - p := api.DefaultAddParams() - dags := newReadableMockCDAGServ() - adder := New(dags, p, nil) - root, err := adder.FromMultipart(ctx, r) - if err != nil { - t.Fatal(err) - } - var carBuf bytes.Buffer - // Make a CAR out of the files we added. - err = car.WriteCar(ctx, dags, []cid.Cid{root.Cid}, &carBuf) - if err != nil { - t.Fatal(err) - } - - // Make the CAR look like a multipart. - carFile := files.NewReaderFile(&carBuf) - carDir := files.NewMapDirectory( - map[string]files.Node{"": carFile}, - ) - carMf := files.NewMultiFileReader(carDir, true) - carMr := multipart.NewReader(carMf, carMf.Boundary()) - - // Add the car, discarding old dags. - dags = newMockCDAGServ() - p.Format = "car" - adder = New(dags, p, nil) - root2, err := adder.FromMultipart(ctx, carMr) - if err != nil { - t.Fatal(err) - } - - if !root.Equals(root2) { - t.Error("Imported CAR file does not have expected root") - } - - expectedCids := test.ShardingDirCids[:] - for _, c := range expectedCids { - ci, _ := cid.Decode(c) - _, ok := dags.Nodes[ci] - if !ok { - t.Fatal("unexpected block extracted from CAR:", c) - } - } - -} - -func TestAdder_LargeFolder(t *testing.T) { - items := 10000 // add 10000 items - - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - filesMap := make(map[string]files.Node) - for i := 0; i < items; i++ { - fstr := fmt.Sprintf("file%d", i) - f := files.NewBytesFile([]byte(fstr)) - filesMap[fstr] = f - } - - slf := files.NewMapDirectory(filesMap) - - p := api.DefaultAddParams() - p.Wrap = true - - dags := newMockCDAGServ() - - adder := New(dags, p, nil) - _, err := adder.FromFiles(context.Background(), slf) - - if err != nil { - t.Fatal(err) - } -} diff --git a/packages/networking/ipfs-cluster/adder/adderutils/adderutils.go b/packages/networking/ipfs-cluster/adder/adderutils/adderutils.go deleted file mode 100644 index a35f5dcc8..0000000 --- a/packages/networking/ipfs-cluster/adder/adderutils/adderutils.go +++ /dev/null @@ -1,135 +0,0 @@ -// Package adderutils provides some utilities for adding content to cluster. -package adderutils - -import ( - "context" - "encoding/json" - "mime/multipart" - "net/http" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/adder/sharding" - "github.com/ipfs-cluster/ipfs-cluster/adder/single" - "github.com/ipfs-cluster/ipfs-cluster/api" - - logging "github.com/ipfs/go-log/v2" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var logger = logging.Logger("adder") - -// AddMultipartHTTPHandler is a helper function to add content -// uploaded using a multipart request. The outputTransform parameter -// allows to customize the http response output format to something -// else than api.AddedOutput objects. -func AddMultipartHTTPHandler( - ctx context.Context, - rpc *rpc.Client, - params api.AddParams, - reader *multipart.Reader, - w http.ResponseWriter, - outputTransform func(api.AddedOutput) interface{}, -) (api.Cid, error) { - var dags adder.ClusterDAGService - output := make(chan api.AddedOutput, 200) - - if params.Shard { - dags = sharding.New(ctx, rpc, params, output) - } else { - dags = single.New(ctx, rpc, params, params.Local) - } - - if outputTransform == nil { - outputTransform = func(in api.AddedOutput) interface{} { return in } - } - - // This must be application/json otherwise go-ipfs client - // will break. - w.Header().Set("Content-Type", "application/json") - // Browsers should not cache these responses. - w.Header().Set("Cache-Control", "no-cache") - // We need to ask the clients to close the connection - // (no keep-alive) of things break badly when adding. - // https://github.com/ipfs/go-ipfs-cmds/pull/116 - w.Header().Set("Connection", "close") - - var wg sync.WaitGroup - if !params.StreamChannels { - // in this case we buffer responses in memory and - // return them as a valid JSON array. - wg.Add(1) - var bufOutput []interface{} // a slice of transformed AddedOutput - go func() { - defer wg.Done() - bufOutput = buildOutput(output, outputTransform) - }() - - enc := json.NewEncoder(w) - add := adder.New(dags, params, output) - root, err := add.FromMultipart(ctx, reader) - if err != nil { // Send an error - logger.Error(err) - w.WriteHeader(http.StatusInternalServerError) - errorResp := api.Error{ - Code: http.StatusInternalServerError, - Message: err.Error(), - } - - if err := enc.Encode(errorResp); err != nil { - logger.Error(err) - } - wg.Wait() - return root, err - } - wg.Wait() - w.WriteHeader(http.StatusOK) - enc.Encode(bufOutput) - return root, err - } - - // handle stream-adding. This should be the default. - - // https://github.com/ipfs-shipyard/ipfs-companion/issues/600 - w.Header().Set("X-Chunked-Output", "1") - // Used by go-ipfs to signal errors half-way through the stream. - w.Header().Set("Trailer", "X-Stream-Error") - w.WriteHeader(http.StatusOK) - wg.Add(1) - go func() { - defer wg.Done() - streamOutput(w, output, outputTransform) - }() - add := adder.New(dags, params, output) - root, err := add.FromMultipart(ctx, reader) - if err != nil { - logger.Error(err) - // Set trailer with error - w.Header().Set("X-Stream-Error", err.Error()) - } - wg.Wait() - return root, err -} - -func streamOutput(w http.ResponseWriter, output chan api.AddedOutput, transform func(api.AddedOutput) interface{}) { - flusher, flush := w.(http.Flusher) - enc := json.NewEncoder(w) - for v := range output { - err := enc.Encode(transform(v)) - if err != nil { - logger.Error(err) - break - } - if flush { - flusher.Flush() - } - } -} - -func buildOutput(output chan api.AddedOutput, transform func(api.AddedOutput) interface{}) []interface{} { - var finalOutput []interface{} - for v := range output { - finalOutput = append(finalOutput, transform(v)) - } - return finalOutput -} diff --git a/packages/networking/ipfs-cluster/adder/ipfsadd/add.go b/packages/networking/ipfs-cluster/adder/ipfsadd/add.go deleted file mode 100644 index ed81da4..0000000 --- a/packages/networking/ipfs-cluster/adder/ipfsadd/add.go +++ /dev/null @@ -1,488 +0,0 @@ -// Package ipfsadd is a simplified copy of go-ipfs/core/coreunix/add.go -package ipfsadd - -import ( - "context" - "errors" - "fmt" - "io" - gopath "path" - "path/filepath" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - cid "github.com/ipfs/go-cid" - chunker "github.com/ipfs/go-ipfs-chunker" - files "github.com/ipfs/go-ipfs-files" - posinfo "github.com/ipfs/go-ipfs-posinfo" - ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - dag "github.com/ipfs/go-merkledag" - mfs "github.com/ipfs/go-mfs" - unixfs "github.com/ipfs/go-unixfs" - balanced "github.com/ipfs/go-unixfs/importer/balanced" - ihelper "github.com/ipfs/go-unixfs/importer/helpers" - trickle "github.com/ipfs/go-unixfs/importer/trickle" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var log = logging.Logger("coreunix") - -// how many bytes of progress to wait before sending a progress update message -const progressReaderIncrement = 1024 * 256 - -var liveCacheSize = uint64(256 << 10) - -// NewAdder Returns a new Adder used for a file add operation. -func NewAdder(ctx context.Context, ds ipld.DAGService, allocs func() []peer.ID) (*Adder, error) { - // Cluster: we don't use pinner nor GCLocker. - return &Adder{ - ctx: ctx, - dagService: ds, - allocsFun: allocs, - Progress: false, - Trickle: false, - Chunker: "", - }, nil -} - -// Adder holds the switches passed to the `add` command. -type Adder struct { - ctx context.Context - dagService ipld.DAGService - allocsFun func() []peer.ID - Out chan api.AddedOutput - Progress bool - Trickle bool - RawLeaves bool - Silent bool - NoCopy bool - Chunker string - mroot *mfs.Root - tempRoot cid.Cid - CidBuilder cid.Builder - liveNodes uint64 - lastFile mfs.FSNode - // Cluster: ipfs does a hack in commands/add.go to set the filenames - // in emitted events correctly. We carry a root folder name (or a - // filename in the case of single files here and emit those events - // correctly from the beginning). - OutputPrefix string -} - -func (adder *Adder) mfsRoot() (*mfs.Root, error) { - if adder.mroot != nil { - return adder.mroot, nil - } - rnode := unixfs.EmptyDirNode() - rnode.SetCidBuilder(adder.CidBuilder) - mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil) - if err != nil { - return nil, err - } - adder.mroot = mr - return adder.mroot, nil -} - -// SetMfsRoot sets `r` as the root for Adder. -func (adder *Adder) SetMfsRoot(r *mfs.Root) { - adder.mroot = r -} - -// Constructs a node from reader's data, and adds it. Doesn't pin. -func (adder *Adder) add(reader io.Reader) (ipld.Node, error) { - chnk, err := chunker.FromString(reader, adder.Chunker) - if err != nil { - return nil, err - } - - // Cluster: we don't do batching/use BufferedDS. - - params := ihelper.DagBuilderParams{ - Dagserv: adder.dagService, - RawLeaves: adder.RawLeaves, - Maxlinks: ihelper.DefaultLinksPerBlock, - NoCopy: adder.NoCopy, - CidBuilder: adder.CidBuilder, - } - - db, err := params.New(chnk) - if err != nil { - return nil, err - } - - var nd ipld.Node - if adder.Trickle { - nd, err = trickle.Layout(db) - } else { - nd, err = balanced.Layout(db) - } - if err != nil { - return nil, err - } - - return nd, nil -} - -// Cluster: commented as it is unused -// // RootNode returns the mfs root node -// func (adder *Adder) curRootNode() (ipld.Node, error) { -// mr, err := adder.mfsRoot() -// if err != nil { -// return nil, err -// } -// root, err := mr.GetDirectory().GetNode() -// if err != nil { -// return nil, err -// } - -// // if one root file, use that hash as root. -// if len(root.Links()) == 1 { -// nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService) -// if err != nil { -// return nil, err -// } - -// root = nd -// } - -// return root, err -// } - -// PinRoot recursively pins the root node of Adder and -// writes the pin state to the backing datastore. -// Cluster: we don't pin. Former Finalize(). -func (adder *Adder) PinRoot(root ipld.Node) error { - rnk := root.Cid() - - err := adder.dagService.Add(adder.ctx, root) - if err != nil { - return err - } - - if adder.tempRoot.Defined() { - adder.tempRoot = rnk - } - - return nil -} - -func (adder *Adder) outputDirs(path string, fsn mfs.FSNode) error { - switch fsn := fsn.(type) { - case *mfs.File: - return nil - case *mfs.Directory: - names, err := fsn.ListNames(adder.ctx) - if err != nil { - return err - } - - for _, name := range names { - child, err := fsn.Child(name) - if err != nil { - // This fails when Child is of type *mfs.File - // because it tries to get them from the DAG - // service (does not implement this and returns - // a "not found" error) - // *mfs.Files are ignored in the recursive call - // anyway. - // For Cluster, we just ignore errors here. - continue - } - - childpath := gopath.Join(path, name) - err = adder.outputDirs(childpath, child) - if err != nil { - return err - } - - fsn.Uncache(name) - } - nd, err := fsn.GetNode() - if err != nil { - return err - } - - return adder.outputDagnode(adder.Out, path, nd) - default: - return fmt.Errorf("unrecognized fsn type: %#v", fsn) - } -} - -func (adder *Adder) addNode(node ipld.Node, path string) error { - // patch it into the root - outputName := path - if path == "" { - path = node.Cid().String() - outputName = "" - } - - if pi, ok := node.(*posinfo.FilestoreNode); ok { - node = pi.Node - } - - mr, err := adder.mfsRoot() - if err != nil { - return err - } - dir := gopath.Dir(path) - if dir != "." { - opts := mfs.MkdirOpts{ - Mkparents: true, - Flush: false, - CidBuilder: adder.CidBuilder, - } - if err := mfs.Mkdir(mr, dir, opts); err != nil { - return err - } - } - - if err := mfs.PutNode(mr, path, node); err != nil { - return err - } - - // Cluster: cache the last file added. - // This avoids using the DAGService to get the first children - // if the MFS root when not wrapping. - lastFile, err := mfs.NewFile(path, node, nil, adder.dagService) - if err != nil { - return err - } - adder.lastFile = lastFile - - if !adder.Silent { - return adder.outputDagnode(adder.Out, outputName, node) - } - return nil -} - -// AddAllAndPin adds the given request's files and pin them. -// Cluster: we don'pin. Former AddFiles. -func (adder *Adder) AddAllAndPin(file files.Node) (ipld.Node, error) { - if err := adder.addFileNode("", file, true); err != nil { - return nil, err - } - - // get root - mr, err := adder.mfsRoot() - if err != nil { - return nil, err - } - var root mfs.FSNode - rootdir := mr.GetDirectory() - root = rootdir - - err = root.Flush() - if err != nil { - return nil, err - } - - // if adding a file without wrapping, swap the root to it (when adding a - // directory, mfs root is the directory) - _, dir := file.(files.Directory) - var name string - if !dir { - children, err := rootdir.ListNames(adder.ctx) - if err != nil { - return nil, err - } - - if len(children) == 0 { - return nil, fmt.Errorf("expected at least one child dir, got none") - } - - // Replace root with the first child - name = children[0] - root, err = rootdir.Child(name) - if err != nil { - // Cluster: use the last file we added - // if we have one. - if adder.lastFile == nil { - return nil, err - } - root = adder.lastFile - } - } - - err = mr.Close() - if err != nil { - return nil, err - } - - nd, err := root.GetNode() - if err != nil { - return nil, err - } - - // output directory events - err = adder.outputDirs(name, root) - if err != nil { - return nil, err - } - - // Cluster: call PinRoot which adds the root cid to the DAGService. - // Unsure if this a bug in IPFS when not pinning. Or it would get added - // twice. - return nd, adder.PinRoot(nd) -} - -// Cluster: we don't Pause for GC -func (adder *Adder) addFileNode(path string, file files.Node, toplevel bool) error { - defer file.Close() - - if adder.liveNodes >= liveCacheSize { - // TODO: A smarter cache that uses some sort of lru cache with an eviction handler - mr, err := adder.mfsRoot() - if err != nil { - return err - } - if err := mr.FlushMemFree(adder.ctx); err != nil { - return err - } - - adder.liveNodes = 0 - } - adder.liveNodes++ - - switch f := file.(type) { - case files.Directory: - return adder.addDir(path, f, toplevel) - case *files.Symlink: - return adder.addSymlink(path, f) - case files.File: - return adder.addFile(path, f) - default: - return errors.New("unknown file type") - } -} - -func (adder *Adder) addSymlink(path string, l *files.Symlink) error { - sdata, err := unixfs.SymlinkData(l.Target) - if err != nil { - return err - } - - dagnode := dag.NodeWithData(sdata) - dagnode.SetCidBuilder(adder.CidBuilder) - err = adder.dagService.Add(adder.ctx, dagnode) - if err != nil { - return err - } - - return adder.addNode(dagnode, path) -} - -func (adder *Adder) addFile(path string, file files.File) error { - // if the progress flag was specified, wrap the file so that we can send - // progress updates to the client (over the output channel) - var reader io.Reader = file - if adder.Progress { - rdr := &progressReader{file: reader, path: path, out: adder.Out} - if fi, ok := file.(files.FileInfo); ok { - reader = &progressReader2{rdr, fi} - } else { - reader = rdr - } - } - - dagnode, err := adder.add(reader) - if err != nil { - return err - } - - // patch it into the root - return adder.addNode(dagnode, path) -} - -func (adder *Adder) addDir(path string, dir files.Directory, toplevel bool) error { - log.Infof("adding directory: %s", path) - - if !(toplevel && path == "") { - mr, err := adder.mfsRoot() - if err != nil { - return err - } - err = mfs.Mkdir(mr, path, mfs.MkdirOpts{ - Mkparents: true, - Flush: false, - CidBuilder: adder.CidBuilder, - }) - if err != nil { - return err - } - } - - it := dir.Entries() - for it.Next() { - fpath := gopath.Join(path, it.Name()) - err := adder.addFileNode(fpath, it.Node(), false) - if err != nil { - return err - } - } - - return it.Err() -} - -// outputDagnode sends dagnode info over the output channel. -// Cluster: we use api.AddedOutput instead of coreiface events -// and make this an adder method to be be able to prefix. -func (adder *Adder) outputDagnode(out chan api.AddedOutput, name string, dn ipld.Node) error { - if out == nil { - return nil - } - - s, err := dn.Size() - if err != nil { - return err - } - - // When adding things in a folder: "OutputPrefix/name" - // When adding a single file: "OutputPrefix" (name is unset) - // When adding a single thing with no name: "" - // Note: ipfs sets the name of files received on stdin to the CID, - // but cluster does not support stdin-adding so we do not - // account for this here. - name = filepath.Join(adder.OutputPrefix, name) - - out <- api.AddedOutput{ - Cid: api.NewCid(dn.Cid()), - Name: name, - Size: s, - Allocations: adder.allocsFun(), - } - - return nil -} - -type progressReader struct { - file io.Reader - path string - out chan api.AddedOutput - bytes int64 - lastProgress int64 -} - -func (i *progressReader) Read(p []byte) (int, error) { - n, err := i.file.Read(p) - - i.bytes += int64(n) - if i.bytes-i.lastProgress >= progressReaderIncrement || err == io.EOF { - i.lastProgress = i.bytes - i.out <- api.AddedOutput{ - Name: i.path, - Bytes: uint64(i.bytes), - } - } - - return n, err -} - -type progressReader2 struct { - *progressReader - files.FileInfo -} - -func (i *progressReader2) Read(p []byte) (int, error) { - return i.progressReader.Read(p) -} diff --git a/packages/networking/ipfs-cluster/adder/sharding/dag.go b/packages/networking/ipfs-cluster/adder/sharding/dag.go deleted file mode 100644 index 401efaf..0000000 --- a/packages/networking/ipfs-cluster/adder/sharding/dag.go +++ /dev/null @@ -1,186 +0,0 @@ -package sharding - -// dag.go defines functions for constructing and parsing ipld-cbor nodes -// of the clusterDAG used to track sharded DAGs in ipfs-cluster - -// Most logic goes into handling the edge cases in which clusterDAG -// metadata for a single shard cannot fit within a single shard node. We -// make the following simplifying assumption: a single shard will not track -// more than 35,808,256 links (~2^25). This is the limit at which the current -// shard node format would need 2 levels of indirect nodes to reference -// all of the links. Note that this limit is only reached at shard sizes 7 -// times the size of the current default and then only when files are all -// 1 byte in size. In the future we may generalize the shard dag to multiple -// indirect nodes to accommodate much bigger shard sizes. Also note that the -// move to using the identity hash function in cids of very small data -// will improve link density in shard nodes and further reduce the need for -// multiple levels of indirection. - -import ( - "context" - "fmt" - - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - dag "github.com/ipfs/go-merkledag" - mh "github.com/multiformats/go-multihash" -) - -// go-merkledag does this, but it may be moved. -// We include for explicitness. -func init() { - ipld.Register(cid.DagProtobuf, dag.DecodeProtobufBlock) - ipld.Register(cid.Raw, dag.DecodeRawBlock) - ipld.Register(cid.DagCBOR, cbor.DecodeBlock) -} - -// MaxLinks is the max number of links that, when serialized fit into a block -const MaxLinks = 5984 -const hashFn = mh.SHA2_256 - -// CborDataToNode parses cbor data into a clusterDAG node while making a few -// checks -func CborDataToNode(raw []byte, format string) (ipld.Node, error) { - if format != "cbor" { - return nil, fmt.Errorf("unexpected shard node format %s", format) - } - shardCid, err := cid.NewPrefixV1(cid.DagCBOR, hashFn).Sum(raw) - if err != nil { - return nil, err - } - shardBlk, err := blocks.NewBlockWithCid(raw, shardCid) - if err != nil { - return nil, err - } - shardNode, err := ipld.Decode(shardBlk) - if err != nil { - return nil, err - } - return shardNode, nil -} - -func makeDAGSimple(ctx context.Context, dagObj map[string]cid.Cid) (ipld.Node, error) { - node, err := cbor.WrapObject( - dagObj, - hashFn, mh.DefaultLengths[hashFn], - ) - if err != nil { - return nil, err - } - return node, err -} - -// makeDAG parses a dagObj which stores all of the node-links a shardDAG -// is responsible for tracking. In general a single node of links may exceed -// the capacity of an ipfs block. In this case an indirect node in the -// shardDAG is constructed that references "leaf shardNodes" that themselves -// carry links to the data nodes being tracked. The head of the output slice -// is always the root of the shardDAG, i.e. the ipld node that should be -// recursively pinned to track the shard -func makeDAG(ctx context.Context, dagObj map[string]cid.Cid) ([]ipld.Node, error) { - // FIXME: We have a 4MB limit on the block size enforced by bitswap: - // https://github.com/libp2p/go-libp2p/core/blob/master/network/network.go#L23 - - // No indirect node - if len(dagObj) <= MaxLinks { - n, err := makeDAGSimple(ctx, dagObj) - return []ipld.Node{n}, err - } - // Indirect node required - leafNodes := make([]ipld.Node, 0) // shardNodes with links to data - indirectObj := make(map[string]cid.Cid) // shardNode with links to shardNodes - numFullLeaves := len(dagObj) / MaxLinks - for i := 0; i <= numFullLeaves; i++ { - leafObj := make(map[string]cid.Cid) - for j := 0; j < MaxLinks; j++ { - c, ok := dagObj[fmt.Sprintf("%d", i*MaxLinks+j)] - if !ok { // finished with this leaf before filling all the way - if i != numFullLeaves { - panic("bad state, should never be here") - } - break - } - leafObj[fmt.Sprintf("%d", j)] = c - } - leafNode, err := makeDAGSimple(ctx, leafObj) - if err != nil { - return nil, err - } - indirectObj[fmt.Sprintf("%d", i)] = leafNode.Cid() - leafNodes = append(leafNodes, leafNode) - } - indirectNode, err := makeDAGSimple(ctx, indirectObj) - if err != nil { - return nil, err - } - nodes := append([]ipld.Node{indirectNode}, leafNodes...) - return nodes, nil -} - -// TODO: decide whether this is worth including. Is precision important for -// most usecases? Is being a little over the shard size a serious problem? -// Is precision worth the cost to maintain complex accounting for metadata -// size (cid sizes will vary in general, cluster dag cbor format may -// grow to vary unpredictably in size) -// byteCount returns the number of bytes the dagObj will occupy when -//serialized into an ipld DAG -/*func byteCount(obj dagObj) uint64 { - // 1 byte map overhead - // for each entry: - // 1 byte indicating text - // 1 byte*(number digits) for key - // 2 bytes for link tag - // 35 bytes for each cid - count := 1 - for key := range obj { - count += fixedPerLink - count += len(key) - } - return uint64(count) + indirectCount(len(obj)) -} - -// indirectCount returns the number of bytes needed to serialize the indirect -// node structure of the shardDAG based on the number of links being tracked. -func indirectCount(linkNum int) uint64 { - q := linkNum / MaxLinks - if q == 0 { // no indirect node needed - return 0 - } - dummyIndirect := make(map[string]cid.Cid) - for key := 0; key <= q; key++ { - dummyIndirect[fmt.Sprintf("%d", key)] = nil - } - // Count bytes of entries of single indirect node and add the map - // overhead for all leaf nodes other than the original - return byteCount(dummyIndirect) + uint64(q) -} - -// Return the number of bytes added to the total shard node metadata DAG when -// adding a new link to the given dagObj. -func deltaByteCount(obj dagObj) uint64 { - linkNum := len(obj) - q1 := linkNum / MaxLinks - q2 := (linkNum + 1) / MaxLinks - count := uint64(fixedPerLink) - count += uint64(len(fmt.Sprintf("%d", len(obj)))) - - // new shard nodes created by adding link - if q1 != q2 { - // first new leaf node created, i.e. indirect created too - if q2 == 1 { - count++ // map overhead of indirect node - count += 1 + fixedPerLink // fixedPerLink + len("0") - } - - // added to indirect node - count += fixedPerLink - count += uint64(len(fmt.Sprintf("%d", q2))) - - // overhead of new leaf node - count++ - } - return count -} -*/ diff --git a/packages/networking/ipfs-cluster/adder/sharding/dag_service.go b/packages/networking/ipfs-cluster/adder/sharding/dag_service.go deleted file mode 100644 index b26beb2..0000000 --- a/packages/networking/ipfs-cluster/adder/sharding/dag_service.go +++ /dev/null @@ -1,315 +0,0 @@ -// Package sharding implements a sharding ClusterDAGService places -// content in different shards while it's being added, creating -// a final Cluster DAG and pinning it. -package sharding - -import ( - "context" - "errors" - "fmt" - - "time" - - "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/api" - - humanize "github.com/dustin/go-humanize" - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var logger = logging.Logger("shardingdags") - -// DAGService is an implementation of a ClusterDAGService which -// shards content while adding among several IPFS Cluster peers, -// creating a Cluster DAG to track and pin that content selectively -// in the IPFS daemons allocated to it. -type DAGService struct { - adder.BaseDAGService - - ctx context.Context - rpcClient *rpc.Client - - addParams api.AddParams - output chan<- api.AddedOutput - - addedSet *cid.Set - - // Current shard being built - currentShard *shard - // Last flushed shard CID - previousShard cid.Cid - - // shard tracking - shards map[string]cid.Cid - - startTime time.Time - totalSize uint64 -} - -// New returns a new ClusterDAGService, which uses the given rpc client to perform -// Allocate, IPFSStream and Pin requests to other cluster components. -func New(ctx context.Context, rpc *rpc.Client, opts api.AddParams, out chan<- api.AddedOutput) *DAGService { - // use a default value for this regardless of what is provided. - opts.Mode = api.PinModeRecursive - return &DAGService{ - ctx: ctx, - rpcClient: rpc, - addParams: opts, - output: out, - addedSet: cid.NewSet(), - shards: make(map[string]cid.Cid), - startTime: time.Now(), - } -} - -// Add puts the given node in its corresponding shard and sends it to the -// destination peers. -func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error { - // FIXME: This will grow in memory - if !dgs.addedSet.Visit(node.Cid()) { - return nil - } - - return dgs.ingestBlock(ctx, node) -} - -// Finalize finishes sharding, creates the cluster DAG and pins it along -// with the meta pin for the root node of the content. -func (dgs *DAGService) Finalize(ctx context.Context, dataRoot api.Cid) (api.Cid, error) { - lastCid, err := dgs.flushCurrentShard(ctx) - if err != nil { - return api.NewCid(lastCid), err - } - - if !lastCid.Equals(dataRoot.Cid) { - logger.Warnf("the last added CID (%s) is not the IPFS data root (%s). This is only normal when adding a single file without wrapping in directory.", lastCid, dataRoot) - } - - clusterDAGNodes, err := makeDAG(ctx, dgs.shards) - if err != nil { - return dataRoot, err - } - - // PutDAG to ourselves - blocks := make(chan api.NodeWithMeta, 256) - go func() { - defer close(blocks) - for _, n := range clusterDAGNodes { - select { - case <-ctx.Done(): - logger.Error(ctx.Err()) - return //abort - case blocks <- adder.IpldNodeToNodeWithMeta(n): - } - } - }() - - // Stream these blocks and wait until we are done. - bs := adder.NewBlockStreamer(ctx, dgs.rpcClient, []peer.ID{""}, blocks) - select { - case <-ctx.Done(): - return dataRoot, ctx.Err() - case <-bs.Done(): - } - - if err := bs.Err(); err != nil { - return dataRoot, err - } - - clusterDAG := clusterDAGNodes[0].Cid() - - dgs.sendOutput(api.AddedOutput{ - Name: fmt.Sprintf("%s-clusterDAG", dgs.addParams.Name), - Cid: api.NewCid(clusterDAG), - Size: dgs.totalSize, - Allocations: nil, - }) - - // Pin the ClusterDAG - clusterDAGPin := api.PinWithOpts(api.NewCid(clusterDAG), dgs.addParams.PinOptions) - clusterDAGPin.ReplicationFactorMin = -1 - clusterDAGPin.ReplicationFactorMax = -1 - clusterDAGPin.MaxDepth = 0 // pin direct - clusterDAGPin.Name = fmt.Sprintf("%s-clusterDAG", dgs.addParams.Name) - clusterDAGPin.Type = api.ClusterDAGType - clusterDAGPin.Reference = &dataRoot - // Update object with response. - err = adder.Pin(ctx, dgs.rpcClient, clusterDAGPin) - if err != nil { - return dataRoot, err - } - - // Pin the META pin - metaPin := api.PinWithOpts(dataRoot, dgs.addParams.PinOptions) - metaPin.Type = api.MetaType - ref := api.NewCid(clusterDAG) - metaPin.Reference = &ref - metaPin.MaxDepth = 0 // irrelevant. Meta-pins are not pinned - err = adder.Pin(ctx, dgs.rpcClient, metaPin) - if err != nil { - return dataRoot, err - } - - // Log some stats - dgs.logStats(metaPin.Cid, clusterDAGPin.Cid) - - // Consider doing this? Seems like overkill - // - // // Amend ShardPins to reference clusterDAG root hash as a Parent - // shardParents := cid.NewSet() - // shardParents.Add(clusterDAG) - // for shardN, shard := range dgs.shardNodes { - // pin := api.PinWithOpts(shard, dgs.addParams) - // pin.Name := fmt.Sprintf("%s-shard-%s", pin.Name, shardN) - // pin.Type = api.ShardType - // pin.Parents = shardParents - // // FIXME: We don't know anymore the shard pin maxDepth - // // so we'd need to get the pin first. - // err := dgs.pin(pin) - // if err != nil { - // return err - // } - // } - - return dataRoot, nil -} - -// Allocations returns the current allocations for the current shard. -func (dgs *DAGService) Allocations() []peer.ID { - // FIXME: this is probably not safe in concurrency? However, there is - // no concurrent execution of any code in the DAGService I think. - if dgs.currentShard != nil { - return dgs.currentShard.Allocations() - } - return nil -} - -// ingests a block to the current shard. If it get's full, it -// Flushes the shard and retries with a new one. -func (dgs *DAGService) ingestBlock(ctx context.Context, n ipld.Node) error { - shard := dgs.currentShard - - // if we have no currentShard, create one - if shard == nil { - logger.Infof("new shard for '%s': #%d", dgs.addParams.Name, len(dgs.shards)) - var err error - // important: shards use the DAGService context. - shard, err = newShard(dgs.ctx, ctx, dgs.rpcClient, dgs.addParams.PinOptions) - if err != nil { - return err - } - dgs.currentShard = shard - } - - logger.Debugf("ingesting block %s in shard %d (%s)", n.Cid(), len(dgs.shards), dgs.addParams.Name) - - // this is not same as n.Size() - size := uint64(len(n.RawData())) - - // add the block to it if it fits and return - if shard.Size()+size < shard.Limit() { - shard.AddLink(ctx, n.Cid(), size) - return dgs.currentShard.sendBlock(ctx, n) - } - - logger.Debugf("shard %d full: block: %d. shard: %d. limit: %d", - len(dgs.shards), - size, - shard.Size(), - shard.Limit(), - ) - - // ------- - // Below: block DOES NOT fit in shard - // Flush and retry - - // if shard is empty, error - if shard.Size() == 0 { - return errors.New("block doesn't fit in empty shard: shard size too small?") - } - - _, err := dgs.flushCurrentShard(ctx) - if err != nil { - return err - } - return dgs.ingestBlock(ctx, n) // <-- retry ingest -} - -func (dgs *DAGService) logStats(metaPin, clusterDAGPin api.Cid) { - duration := time.Since(dgs.startTime) - seconds := uint64(duration) / uint64(time.Second) - var rate string - if seconds == 0 { - rate = "∞ B" - } else { - rate = humanize.Bytes(dgs.totalSize / seconds) - } - - statsFmt := `sharding session successful: -CID: %s -ClusterDAG: %s -Total shards: %d -Total size: %s -Total time: %s -Ingest Rate: %s/s -` - - logger.Infof( - statsFmt, - metaPin, - clusterDAGPin, - len(dgs.shards), - humanize.Bytes(dgs.totalSize), - duration, - rate, - ) - -} - -func (dgs *DAGService) sendOutput(ao api.AddedOutput) { - if dgs.output != nil { - dgs.output <- ao - } -} - -// flushes the dgs.currentShard and returns the LastLink() -func (dgs *DAGService) flushCurrentShard(ctx context.Context) (cid.Cid, error) { - shard := dgs.currentShard - if shard == nil { - return cid.Undef, errors.New("cannot flush a nil shard") - } - - lens := len(dgs.shards) - - shardCid, err := shard.Flush(ctx, lens, dgs.previousShard) - if err != nil { - return shardCid, err - } - dgs.totalSize += shard.Size() - dgs.shards[fmt.Sprintf("%d", lens)] = shardCid - dgs.previousShard = shardCid - dgs.currentShard = nil - dgs.sendOutput(api.AddedOutput{ - Name: fmt.Sprintf("shard-%d", lens), - Cid: api.NewCid(shardCid), - Size: shard.Size(), - Allocations: shard.Allocations(), - }) - - return shard.LastLink(), nil -} - -// AddMany calls Add for every given node. -func (dgs *DAGService) AddMany(ctx context.Context, nodes []ipld.Node) error { - for _, node := range nodes { - err := dgs.Add(ctx, node) - if err != nil { - return err - } - } - return nil -} diff --git a/packages/networking/ipfs-cluster/adder/sharding/dag_service_test.go b/packages/networking/ipfs-cluster/adder/sharding/dag_service_test.go deleted file mode 100644 index ddb3d48..0000000 --- a/packages/networking/ipfs-cluster/adder/sharding/dag_service_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package sharding - -import ( - "context" - "errors" - "mime/multipart" - "sync" - "testing" - - adder "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -func init() { - logging.SetLogLevel("shardingdags", "INFO") - logging.SetLogLevel("adder", "INFO") -} - -type testRPC struct { - blocks sync.Map - pins sync.Map -} - -func (rpcs *testRPC) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error { - defer close(out) - for n := range in { - rpcs.blocks.Store(n.Cid.String(), n.Data) - } - return nil -} - -func (rpcs *testRPC) Pin(ctx context.Context, in api.Pin, out *api.Pin) error { - rpcs.pins.Store(in.Cid.String(), in) - *out = in - return nil -} - -func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error { - if in.ReplicationFactorMin > 1 { - return errors.New("we can only replicate to 1 peer") - } - // it does not matter since we use host == nil for RPC, so it uses the - // local one in all cases - *out = []peer.ID{test.PeerID1} - return nil -} - -func (rpcs *testRPC) PinGet(ctx context.Context, c api.Cid) (api.Pin, error) { - pI, ok := rpcs.pins.Load(c.String()) - if !ok { - return api.Pin{}, errors.New("not found") - } - return pI.(api.Pin), nil -} - -func (rpcs *testRPC) BlockGet(ctx context.Context, c api.Cid) ([]byte, error) { - bI, ok := rpcs.blocks.Load(c.String()) - if !ok { - return nil, errors.New("not found") - } - return bI.([]byte), nil -} - -func makeAdder(t *testing.T, params api.AddParams) (*adder.Adder, *testRPC) { - rpcObj := &testRPC{} - server := rpc.NewServer(nil, "mock") - err := server.RegisterName("Cluster", rpcObj) - if err != nil { - t.Fatal(err) - } - err = server.RegisterName("IPFSConnector", rpcObj) - if err != nil { - t.Fatal(err) - } - client := rpc.NewClientWithServer(nil, "mock", server) - - out := make(chan api.AddedOutput, 1) - - dags := New(context.Background(), client, params, out) - add := adder.New(dags, params, out) - - go func() { - for v := range out { - t.Logf("Output: Name: %s. Cid: %s. Size: %d", v.Name, v.Cid, v.Size) - } - }() - - return add, rpcObj -} - -func TestFromMultipart(t *testing.T) { - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - t.Run("Test tree", func(t *testing.T) { - p := api.DefaultAddParams() - // Total data is about - p.ShardSize = 1024 * 300 // 300kB - p.Name = "testingFile" - p.Shard = true - p.ReplicationFactorMin = 1 - p.ReplicationFactorMax = 2 - - add, rpcObj := makeAdder(t, p) - _ = rpcObj - - mr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mr, mr.Boundary()) - - rootCid, err := add.FromMultipart(context.Background(), r) - if err != nil { - t.Fatal(err) - } - - // Print all pins - // rpcObj.pins.Range(func(k, v interface{}) bool { - // p := v.(*api.Pin) - // j, _ := config.DefaultJSONMarshal(p) - // fmt.Printf("%s", j) - // return true - // }) - - if rootCid.String() != test.ShardingDirBalancedRootCID { - t.Fatal("bad root CID") - } - - // 14 has been obtained by carefully observing the logs - // making sure that splitting happens in the right place. - shardBlocks, err := VerifyShards(t, rootCid, rpcObj, rpcObj, 14) - if err != nil { - t.Fatal(err) - } - for _, ci := range test.ShardingDirCids { - _, ok := shardBlocks[ci] - if !ok { - t.Fatal("shards are missing a block:", ci) - } - } - - if len(test.ShardingDirCids) != len(shardBlocks) { - t.Fatal("shards have some extra blocks") - } - for _, ci := range test.ShardingDirCids { - _, ok := shardBlocks[ci] - if !ok { - t.Fatal("shards are missing a block:", ci) - } - } - - if len(test.ShardingDirCids) != len(shardBlocks) { - t.Fatal("shards have some extra blocks") - } - - }) - - t.Run("Test file", func(t *testing.T) { - p := api.DefaultAddParams() - // Total data is about - p.ShardSize = 1024 * 1024 * 2 // 2MB - p.Name = "testingFile" - p.Shard = true - p.ReplicationFactorMin = 1 - p.ReplicationFactorMax = 2 - - add, rpcObj := makeAdder(t, p) - _ = rpcObj - - mr, closer := sth.GetRandFileMultiReader(t, 1024*50) // 50 MB - defer closer.Close() - r := multipart.NewReader(mr, mr.Boundary()) - - rootCid, err := add.FromMultipart(context.Background(), r) - if err != nil { - t.Fatal(err) - } - - shardBlocks, err := VerifyShards(t, rootCid, rpcObj, rpcObj, 29) - if err != nil { - t.Fatal(err) - } - _ = shardBlocks - }) - -} - -func TestFromMultipart_Errors(t *testing.T) { - type testcase struct { - name string - params api.AddParams - } - - tcs := []*testcase{ - { - name: "bad chunker", - params: api.AddParams{ - Format: "", - IPFSAddParams: api.IPFSAddParams{ - Chunker: "aweee", - RawLeaves: false, - }, - Hidden: false, - Shard: true, - PinOptions: api.PinOptions{ - ReplicationFactorMin: -1, - ReplicationFactorMax: -1, - Name: "test", - ShardSize: 1024 * 1024, - }, - }, - }, - { - name: "shard size too small", - params: api.AddParams{ - Format: "", - IPFSAddParams: api.IPFSAddParams{ - Chunker: "", - RawLeaves: false, - }, - Hidden: false, - Shard: true, - PinOptions: api.PinOptions{ - ReplicationFactorMin: -1, - ReplicationFactorMax: -1, - Name: "test", - ShardSize: 200, - }, - }, - }, - { - name: "replication too high", - params: api.AddParams{ - Format: "", - IPFSAddParams: api.IPFSAddParams{ - Chunker: "", - RawLeaves: false, - }, - Hidden: false, - Shard: true, - PinOptions: api.PinOptions{ - ReplicationFactorMin: 2, - ReplicationFactorMax: 3, - Name: "test", - ShardSize: 1024 * 1024, - }, - }, - }, - } - - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - for _, tc := range tcs { - add, rpcObj := makeAdder(t, tc.params) - _ = rpcObj - - f := sth.GetTreeSerialFile(t) - - _, err := add.FromFiles(context.Background(), f) - if err == nil { - t.Error(tc.name, ": expected an error") - } else { - t.Log(tc.name, ":", err) - } - f.Close() - } -} diff --git a/packages/networking/ipfs-cluster/adder/sharding/shard.go b/packages/networking/ipfs-cluster/adder/sharding/shard.go deleted file mode 100644 index 0bb01e6..0000000 --- a/packages/networking/ipfs-cluster/adder/sharding/shard.go +++ /dev/null @@ -1,166 +0,0 @@ -package sharding - -import ( - "context" - "fmt" - - ipld "github.com/ipfs/go-ipld-format" - "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/api" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - - humanize "github.com/dustin/go-humanize" -) - -// a shard represents a set of blocks (or bucket) which have been assigned -// a peer to be block-put and will be part of the same shard in the -// cluster DAG. -type shard struct { - ctx context.Context - rpc *rpc.Client - allocations []peer.ID - pinOptions api.PinOptions - bs *adder.BlockStreamer - blocks chan api.NodeWithMeta - // dagNode represents a node with links and will be converted - // to Cbor. - dagNode map[string]cid.Cid - currentSize uint64 - sizeLimit uint64 -} - -func newShard(globalCtx context.Context, ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard, error) { - allocs, err := adder.BlockAllocate(ctx, rpc, opts) - if err != nil { - return nil, err - } - - if opts.ReplicationFactorMin > 0 && len(allocs) == 0 { - // This would mean that the empty cid is part of the shared state somehow. - panic("allocations for new shard cannot be empty without error") - } - - if opts.ReplicationFactorMin < 0 { - logger.Warn("Shard is set to replicate everywhere ,which doesn't make sense for sharding") - } - - // TODO (hector): get latest metrics for allocations, adjust sizeLimit - // to minimum. This can be done later. - - blocks := make(chan api.NodeWithMeta, 256) - - return &shard{ - ctx: globalCtx, - rpc: rpc, - allocations: allocs, - pinOptions: opts, - bs: adder.NewBlockStreamer(globalCtx, rpc, allocs, blocks), - blocks: blocks, - dagNode: make(map[string]cid.Cid), - currentSize: 0, - sizeLimit: opts.ShardSize, - }, nil -} - -// AddLink tries to add a new block to this shard if it's not full. -// Returns true if the block was added -func (sh *shard) AddLink(ctx context.Context, c cid.Cid, s uint64) { - linkN := len(sh.dagNode) - linkName := fmt.Sprintf("%d", linkN) - logger.Debugf("shard: add link: %s", linkName) - - sh.dagNode[linkName] = c - sh.currentSize += s -} - -// Allocations returns the peer IDs on which blocks are put for this shard. -func (sh *shard) Allocations() []peer.ID { - if len(sh.allocations) == 1 && sh.allocations[0] == "" { - return nil - } - return sh.allocations -} - -func (sh *shard) sendBlock(ctx context.Context, n ipld.Node) error { - select { - case <-ctx.Done(): - return ctx.Err() - case sh.blocks <- adder.IpldNodeToNodeWithMeta(n): - return nil - } -} - -// Flush completes the allocation of this shard by building a CBOR node -// and adding it to IPFS, then pinning it in cluster. It returns the Cid of the -// shard. -func (sh *shard) Flush(ctx context.Context, shardN int, prev cid.Cid) (cid.Cid, error) { - logger.Debugf("shard %d: flush", shardN) - nodes, err := makeDAG(ctx, sh.dagNode) - if err != nil { - return cid.Undef, err - } - - for _, n := range nodes { - err = sh.sendBlock(ctx, n) - if err != nil { - close(sh.blocks) - return cid.Undef, err - } - } - close(sh.blocks) - select { - case <-ctx.Done(): - return cid.Undef, ctx.Err() - case <-sh.bs.Done(): - } - - if err := sh.bs.Err(); err != nil { - return cid.Undef, err - } - - rootCid := nodes[0].Cid() - pin := api.PinWithOpts(api.NewCid(rootCid), sh.pinOptions) - pin.Name = fmt.Sprintf("%s-shard-%d", sh.pinOptions.Name, shardN) - // this sets allocations as priority allocation - pin.Allocations = sh.allocations - pin.Type = api.ShardType - ref := api.NewCid(prev) - pin.Reference = &ref - pin.MaxDepth = 1 - pin.ShardSize = sh.Size() // use current size, not the limit - if len(nodes) > len(sh.dagNode)+1 { // using an indirect graph - pin.MaxDepth = 2 - } - - logger.Infof("shard #%d (%s) completed. Total size: %s. Links: %d", - shardN, - rootCid, - humanize.Bytes(sh.Size()), - len(sh.dagNode), - ) - - return rootCid, adder.Pin(ctx, sh.rpc, pin) -} - -// Size returns this shard's current size. -func (sh *shard) Size() uint64 { - return sh.currentSize -} - -// Size returns this shard's size limit. -func (sh *shard) Limit() uint64 { - return sh.sizeLimit -} - -// Last returns the last added link. When finishing sharding, -// the last link of the last shard is the data root for the -// full sharded DAG (the CID that would have resulted from -// adding the content to a single IPFS daemon). -func (sh *shard) LastLink() cid.Cid { - l := len(sh.dagNode) - lastLink := fmt.Sprintf("%d", l-1) - return sh.dagNode[lastLink] -} diff --git a/packages/networking/ipfs-cluster/adder/sharding/verify.go b/packages/networking/ipfs-cluster/adder/sharding/verify.go deleted file mode 100644 index 4af15c5..0000000 --- a/packages/networking/ipfs-cluster/adder/sharding/verify.go +++ /dev/null @@ -1,106 +0,0 @@ -package sharding - -import ( - "context" - "errors" - "fmt" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -// MockPinStore is used in VerifyShards -type MockPinStore interface { - // Gets a pin - PinGet(context.Context, api.Cid) (api.Pin, error) -} - -// MockBlockStore is used in VerifyShards -type MockBlockStore interface { - // Gets a block - BlockGet(context.Context, api.Cid) ([]byte, error) -} - -// VerifyShards checks that a sharded CID has been correctly formed and stored. -// This is a helper function for testing. It returns a map with all the blocks -// from all shards. -func VerifyShards(t *testing.T, rootCid api.Cid, pins MockPinStore, ipfs MockBlockStore, expectedShards int) (map[string]struct{}, error) { - ctx := context.Background() - metaPin, err := pins.PinGet(ctx, rootCid) - if err != nil { - return nil, fmt.Errorf("meta pin was not pinned: %s", err) - } - - if api.PinType(metaPin.Type) != api.MetaType { - return nil, fmt.Errorf("bad MetaPin type") - } - - if metaPin.Reference == nil { - return nil, errors.New("metaPin.Reference is unset") - } - - clusterPin, err := pins.PinGet(ctx, *metaPin.Reference) - if err != nil { - return nil, fmt.Errorf("cluster pin was not pinned: %s", err) - } - if api.PinType(clusterPin.Type) != api.ClusterDAGType { - return nil, fmt.Errorf("bad ClusterDAGPin type") - } - - if !clusterPin.Reference.Equals(metaPin.Cid) { - return nil, fmt.Errorf("clusterDAG should reference the MetaPin") - } - - clusterDAGBlock, err := ipfs.BlockGet(ctx, clusterPin.Cid) - if err != nil { - return nil, fmt.Errorf("cluster pin was not stored: %s", err) - } - - clusterDAGNode, err := CborDataToNode(clusterDAGBlock, "cbor") - if err != nil { - return nil, err - } - - shards := clusterDAGNode.Links() - if len(shards) != expectedShards { - return nil, fmt.Errorf("bad number of shards") - } - - shardBlocks := make(map[string]struct{}) - var ref api.Cid - // traverse shards in order - for i := 0; i < len(shards); i++ { - sh, _, err := clusterDAGNode.ResolveLink([]string{fmt.Sprintf("%d", i)}) - if err != nil { - return nil, err - } - - shardPin, err := pins.PinGet(ctx, api.NewCid(sh.Cid)) - if err != nil { - return nil, fmt.Errorf("shard was not pinned: %s %s", sh.Cid, err) - } - - if ref != api.CidUndef && !shardPin.Reference.Equals(ref) { - t.Errorf("Ref (%s) should point to previous shard (%s)", ref, shardPin.Reference) - } - ref = shardPin.Cid - - shardBlock, err := ipfs.BlockGet(ctx, shardPin.Cid) - if err != nil { - return nil, fmt.Errorf("shard block was not stored: %s", err) - } - shardNode, err := CborDataToNode(shardBlock, "cbor") - if err != nil { - return nil, err - } - for _, l := range shardNode.Links() { - ci := l.Cid.String() - _, ok := shardBlocks[ci] - if ok { - return nil, fmt.Errorf("block belongs to two shards: %s", ci) - } - shardBlocks[ci] = struct{}{} - } - } - return shardBlocks, nil -} diff --git a/packages/networking/ipfs-cluster/adder/single/dag_service.go b/packages/networking/ipfs-cluster/adder/single/dag_service.go deleted file mode 100644 index a9c57ac..0000000 --- a/packages/networking/ipfs-cluster/adder/single/dag_service.go +++ /dev/null @@ -1,178 +0,0 @@ -// Package single implements a ClusterDAGService that chunks and adds content -// to cluster without sharding, before pinning it. -package single - -import ( - "context" - - adder "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/api" - - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var logger = logging.Logger("singledags") -var _ = logger // otherwise unused - -// DAGService is an implementation of an adder.ClusterDAGService which -// puts the added blocks directly in the peers allocated to them (without -// sharding). -type DAGService struct { - adder.BaseDAGService - - ctx context.Context - rpcClient *rpc.Client - - dests []peer.ID - addParams api.AddParams - local bool - - bs *adder.BlockStreamer - blocks chan api.NodeWithMeta - recentBlocks *recentBlocks -} - -// New returns a new Adder with the given rpc Client. The client is used -// to perform calls to IPFS.BlockStream and Pin content on Cluster. -func New(ctx context.Context, rpc *rpc.Client, opts api.AddParams, local bool) *DAGService { - // ensure don't Add something and pin it in direct mode. - opts.Mode = api.PinModeRecursive - return &DAGService{ - ctx: ctx, - rpcClient: rpc, - dests: nil, - addParams: opts, - local: local, - blocks: make(chan api.NodeWithMeta, 256), - recentBlocks: &recentBlocks{}, - } -} - -// Add puts the given node in the destination peers. -func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error { - // Avoid adding the same node multiple times in a row. - // This is done by the ipfsadd-er, because some nodes are added - // via dagbuilder, then via MFS, and root nodes once more. - if dgs.recentBlocks.Has(node) { - return nil - } - - // FIXME: can't this happen on initialization? Perhaps the point here - // is the adder only allocates and starts streaming when the first - // block arrives and not on creation. - if dgs.dests == nil { - dests, err := adder.BlockAllocate(ctx, dgs.rpcClient, dgs.addParams.PinOptions) - if err != nil { - return err - } - - hasLocal := false - localPid := dgs.rpcClient.ID() - for i, d := range dests { - if d == localPid || d == "" { - hasLocal = true - // ensure our allocs do not carry an empty peer - // mostly an issue with testing mocks - dests[i] = localPid - } - } - - dgs.dests = dests - - if dgs.local { - // If this is a local pin, make sure that the local - // peer is among the allocations.. - // UNLESS user-allocations are defined! - if !hasLocal && localPid != "" && len(dgs.addParams.UserAllocations) == 0 { - // replace last allocation with local peer - dgs.dests[len(dgs.dests)-1] = localPid - } - - dgs.bs = adder.NewBlockStreamer(dgs.ctx, dgs.rpcClient, []peer.ID{localPid}, dgs.blocks) - } else { - dgs.bs = adder.NewBlockStreamer(dgs.ctx, dgs.rpcClient, dgs.dests, dgs.blocks) - } - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-dgs.ctx.Done(): - return ctx.Err() - case dgs.blocks <- adder.IpldNodeToNodeWithMeta(node): - dgs.recentBlocks.Add(node) - return nil - } -} - -// Finalize pins the last Cid added to this DAGService. -func (dgs *DAGService) Finalize(ctx context.Context, root api.Cid) (api.Cid, error) { - close(dgs.blocks) - - select { - case <-dgs.ctx.Done(): - return root, ctx.Err() - case <-ctx.Done(): - return root, ctx.Err() - case <-dgs.bs.Done(): - } - - // If the streamer failed to put blocks. - if err := dgs.bs.Err(); err != nil { - return root, err - } - - // Do not pin, just block put. - // Why? Because some people are uploading CAR files with partial DAGs - // and ideally they should be pinning only when the last partial CAR - // is uploaded. This gives them that option. - if dgs.addParams.NoPin { - return root, nil - } - - // Cluster pin the result - rootPin := api.PinWithOpts(root, dgs.addParams.PinOptions) - rootPin.Allocations = dgs.dests - - return root, adder.Pin(ctx, dgs.rpcClient, rootPin) -} - -// Allocations returns the add destinations decided by the DAGService. -func (dgs *DAGService) Allocations() []peer.ID { - // using rpc clients without a host results in an empty peer - // which cannot be parsed to peer.ID on deserialization. - if len(dgs.dests) == 1 && dgs.dests[0] == "" { - return nil - } - return dgs.dests -} - -// AddMany calls Add for every given node. -func (dgs *DAGService) AddMany(ctx context.Context, nodes []ipld.Node) error { - for _, node := range nodes { - err := dgs.Add(ctx, node) - if err != nil { - return err - } - } - return nil -} - -type recentBlocks struct { - blocks [2]cid.Cid - cur int -} - -func (rc *recentBlocks) Add(n ipld.Node) { - rc.blocks[rc.cur] = n.Cid() - rc.cur = (rc.cur + 1) % 2 -} - -func (rc *recentBlocks) Has(n ipld.Node) bool { - c := n.Cid() - return rc.blocks[0].Equals(c) || rc.blocks[1].Equals(c) -} diff --git a/packages/networking/ipfs-cluster/adder/single/dag_service_test.go b/packages/networking/ipfs-cluster/adder/single/dag_service_test.go deleted file mode 100644 index 0f30d97..0000000 --- a/packages/networking/ipfs-cluster/adder/single/dag_service_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package single - -import ( - "context" - "errors" - "mime/multipart" - "sync" - "testing" - - adder "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -type testIPFSRPC struct { - blocks sync.Map -} - -type testClusterRPC struct { - pins sync.Map -} - -func (rpcs *testIPFSRPC) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error { - defer close(out) - for n := range in { - rpcs.blocks.Store(n.Cid.String(), n) - } - return nil -} - -func (rpcs *testClusterRPC) Pin(ctx context.Context, in api.Pin, out *api.Pin) error { - rpcs.pins.Store(in.Cid.String(), in) - *out = in - return nil -} - -func (rpcs *testClusterRPC) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error { - if in.ReplicationFactorMin > 1 { - return errors.New("we can only replicate to 1 peer") - } - // it does not matter since we use host == nil for RPC, so it uses the - // local one in all cases. - *out = []peer.ID{test.PeerID1} - return nil -} - -func TestAdd(t *testing.T) { - t.Run("balanced", func(t *testing.T) { - clusterRPC := &testClusterRPC{} - ipfsRPC := &testIPFSRPC{} - server := rpc.NewServer(nil, "mock") - err := server.RegisterName("Cluster", clusterRPC) - if err != nil { - t.Fatal(err) - } - err = server.RegisterName("IPFSConnector", ipfsRPC) - if err != nil { - t.Fatal(err) - } - client := rpc.NewClientWithServer(nil, "mock", server) - params := api.DefaultAddParams() - params.Wrap = true - - dags := New(context.Background(), client, params, false) - add := adder.New(dags, params, nil) - - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - mr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mr, mr.Boundary()) - - rootCid, err := add.FromMultipart(context.Background(), r) - if err != nil { - t.Fatal(err) - } - - if rootCid.String() != test.ShardingDirBalancedRootCIDWrapped { - t.Fatal("bad root cid: ", rootCid) - } - - expected := test.ShardingDirCids[:] - for _, c := range expected { - _, ok := ipfsRPC.blocks.Load(c) - if !ok { - t.Error("block was not added to IPFS", c) - } - } - - _, ok := clusterRPC.pins.Load(test.ShardingDirBalancedRootCIDWrapped) - if !ok { - t.Error("the tree wasn't pinned") - } - }) - - t.Run("trickle", func(t *testing.T) { - clusterRPC := &testClusterRPC{} - ipfsRPC := &testIPFSRPC{} - server := rpc.NewServer(nil, "mock") - err := server.RegisterName("Cluster", clusterRPC) - if err != nil { - t.Fatal(err) - } - err = server.RegisterName("IPFSConnector", ipfsRPC) - if err != nil { - t.Fatal(err) - } - client := rpc.NewClientWithServer(nil, "mock", server) - params := api.DefaultAddParams() - params.Layout = "trickle" - - dags := New(context.Background(), client, params, false) - add := adder.New(dags, params, nil) - - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - mr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mr, mr.Boundary()) - - rootCid, err := add.FromMultipart(context.Background(), r) - if err != nil { - t.Fatal(err) - } - - if rootCid.String() != test.ShardingDirTrickleRootCID { - t.Fatal("bad root cid") - } - - _, ok := clusterRPC.pins.Load(test.ShardingDirTrickleRootCID) - if !ok { - t.Error("the tree wasn't pinned") - } - }) -} diff --git a/packages/networking/ipfs-cluster/adder/util.go b/packages/networking/ipfs-cluster/adder/util.go deleted file mode 100644 index 4c25d56..0000000 --- a/packages/networking/ipfs-cluster/adder/util.go +++ /dev/null @@ -1,180 +0,0 @@ -package adder - -import ( - "context" - "errors" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "go.uber.org/multierr" - - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -// ErrBlockAdder is returned when adding a to multiple destinations -// block fails on all of them. -var ErrBlockAdder = errors.New("failed to put block on all destinations") - -// BlockStreamer helps streaming nodes to multiple destinations, as long as -// one of them is still working. -type BlockStreamer struct { - dests []peer.ID - rpcClient *rpc.Client - blocks <-chan api.NodeWithMeta - - ctx context.Context - cancel context.CancelFunc - errMu sync.Mutex - err error -} - -// NewBlockStreamer creates a BlockStreamer given an rpc client, allocated -// peers and a channel on which the blocks to stream are received. -func NewBlockStreamer(ctx context.Context, rpcClient *rpc.Client, dests []peer.ID, blocks <-chan api.NodeWithMeta) *BlockStreamer { - bsCtx, cancel := context.WithCancel(ctx) - - bs := BlockStreamer{ - ctx: bsCtx, - cancel: cancel, - dests: dests, - rpcClient: rpcClient, - blocks: blocks, - err: nil, - } - - go bs.streamBlocks() - return &bs -} - -// Done returns a channel which gets closed when the BlockStreamer has -// finished. -func (bs *BlockStreamer) Done() <-chan struct{} { - return bs.ctx.Done() -} - -func (bs *BlockStreamer) setErr(err error) { - bs.errMu.Lock() - bs.err = err - bs.errMu.Unlock() -} - -// Err returns any errors that happened after the operation of the -// BlockStreamer, for example when blocks could not be put to all nodes. -func (bs *BlockStreamer) Err() error { - bs.errMu.Lock() - defer bs.errMu.Unlock() - return bs.err -} - -func (bs *BlockStreamer) streamBlocks() { - defer bs.cancel() - - // Nothing should be sent on out. - // We drain though - out := make(chan struct{}) - go func() { - for range out { - } - }() - - errs := bs.rpcClient.MultiStream( - bs.ctx, - bs.dests, - "IPFSConnector", - "BlockStream", - bs.blocks, - out, - ) - - combinedErrors := multierr.Combine(errs...) - - // FIXME: replicate everywhere. - if len(multierr.Errors(combinedErrors)) == len(bs.dests) { - logger.Error(combinedErrors) - bs.setErr(ErrBlockAdder) - } else if combinedErrors != nil { - logger.Warning("there were errors streaming blocks, but at least one destination succeeded") - logger.Warning(combinedErrors) - } -} - -// IpldNodeToNodeWithMeta converts an ipld.Node to api.NodeWithMeta. -func IpldNodeToNodeWithMeta(n ipld.Node) api.NodeWithMeta { - size, err := n.Size() - if err != nil { - logger.Warn(err) - } - - return api.NodeWithMeta{ - Cid: api.NewCid(n.Cid()), - Data: n.RawData(), - CumSize: size, - } -} - -// BlockAllocate helps allocating blocks to peers. -func BlockAllocate(ctx context.Context, rpc *rpc.Client, pinOpts api.PinOptions) ([]peer.ID, error) { - // Find where to allocate this file - var allocsStr []peer.ID - err := rpc.CallContext( - ctx, - "", - "Cluster", - "BlockAllocate", - api.PinWithOpts(api.CidUndef, pinOpts), - &allocsStr, - ) - return allocsStr, err -} - -// Pin helps sending local RPC pin requests. -func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error { - if pin.ReplicationFactorMin < 0 { - pin.Allocations = []peer.ID{} - } - logger.Debugf("adder pinning %+v", pin) - var pinResp api.Pin - return rpc.CallContext( - ctx, - "", // use ourself to pin - "Cluster", - "Pin", - pin, - &pinResp, - ) -} - -// ErrDAGNotFound is returned whenever we try to get a block from the DAGService. -var ErrDAGNotFound = errors.New("dagservice: a Get operation was attempted while cluster-adding (this is likely a bug)") - -// BaseDAGService partially implements an ipld.DAGService. -// It provides the methods which are not needed by ClusterDAGServices -// (Get*, Remove*) so that they can save adding this code. -type BaseDAGService struct { -} - -// Get always returns errNotFound -func (dag BaseDAGService) Get(ctx context.Context, key cid.Cid) (ipld.Node, error) { - return nil, ErrDAGNotFound -} - -// GetMany returns an output channel that always emits an error -func (dag BaseDAGService) GetMany(ctx context.Context, keys []cid.Cid) <-chan *ipld.NodeOption { - out := make(chan *ipld.NodeOption, 1) - out <- &ipld.NodeOption{Err: ErrDAGNotFound} - close(out) - return out -} - -// Remove is a nop -func (dag BaseDAGService) Remove(ctx context.Context, key cid.Cid) error { - return nil -} - -// RemoveMany is a nop -func (dag BaseDAGService) RemoveMany(ctx context.Context, keys []cid.Cid) error { - return nil -} diff --git a/packages/networking/ipfs-cluster/allocate.go b/packages/networking/ipfs-cluster/allocate.go deleted file mode 100644 index 521dd57..0000000 --- a/packages/networking/ipfs-cluster/allocate.go +++ /dev/null @@ -1,270 +0,0 @@ -package ipfscluster - -import ( - "context" - "errors" - "fmt" - - peer "github.com/libp2p/go-libp2p/core/peer" - - "go.opencensus.io/trace" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -// This file gathers allocation logic used when pinning or re-pinning -// to find which peers should be allocated to a Cid. Allocation is constrained -// by ReplicationFactorMin and ReplicationFactorMax parameters obtained -// from the Pin object. - -// The allocation process has several steps: -// -// * Find which peers are pinning a CID -// * Obtain the last values for the configured informer metrics from the -// monitor component -// * Divide the metrics between "current" (peers already pinning the CID) -// and "candidates" (peers that could pin the CID), as long as their metrics -// are valid. -// * Given the candidates: -// * Check if we are overpinning an item -// * Check if there are not enough candidates for the "needed" replication -// factor. -// * If there are enough candidates: -// * Call the configured allocator, which sorts the candidates (and -// may veto some depending on the allocation strategy. -// * The allocator returns a list of final candidate peers sorted by -// order of preference. -// * Take as many final candidates from the list as we can, until -// ReplicationFactorMax is reached. Error if there are less than -// ReplicationFactorMin. - -// A wrapper to carry peer metrics that have been classified. -type classifiedMetrics struct { - current api.MetricsSet - currentPeers []peer.ID - candidate api.MetricsSet - candidatePeers []peer.ID - priority api.MetricsSet - priorityPeers []peer.ID -} - -// allocate finds peers to allocate a hash using the informer and the monitor -// it should only be used with valid replicationFactors (if rplMin and rplMax -// are > 0, then rplMin <= rplMax). -// It always returns allocations, but if no new allocations are needed, -// it will return the current ones. Note that allocate() does not take -// into account if the given CID was previously in a "pin everywhere" mode, -// and will consider such Pins as currently unallocated ones, providing -// new allocations as available. -func (c *Cluster) allocate(ctx context.Context, hash api.Cid, currentPin api.Pin, rplMin, rplMax int, blacklist []peer.ID, priorityList []peer.ID) ([]peer.ID, error) { - ctx, span := trace.StartSpan(ctx, "cluster/allocate") - defer span.End() - - if (rplMin + rplMax) == 0 { - return nil, fmt.Errorf("bad replication factors: %d/%d", rplMin, rplMax) - } - - if rplMin < 0 && rplMax < 0 { // allocate everywhere - return []peer.ID{}, nil - } - - // Figure out who is holding the CID - var currentAllocs []peer.ID - if currentPin.Defined() { - currentAllocs = currentPin.Allocations - } - - // Get Metrics that the allocator is interested on - mSet := make(api.MetricsSet) - metrics := c.allocator.Metrics() - for _, metricName := range metrics { - mSet[metricName] = c.monitor.LatestMetrics(ctx, metricName) - } - - // Filter and divide metrics. The resulting sets only have peers that - // have all the metrics needed and are not blacklisted. - classified := c.filterMetrics( - ctx, - mSet, - len(metrics), - currentAllocs, - priorityList, - blacklist, - ) - - newAllocs, err := c.obtainAllocations( - ctx, - hash, - rplMin, - rplMax, - classified, - ) - if err != nil { - return newAllocs, err - } - - // if current allocations are above the minimal threshold, - // obtainAllocations returns nil and we just leave things as they are. - // This is what makes repinning do nothing if items are still above - // rmin. - if newAllocs == nil { - newAllocs = currentAllocs - } - return newAllocs, nil -} - -// Given metrics from all informers, split them into 3 MetricsSet: -// - Those corresponding to currently allocated peers -// - Those corresponding to priority allocations -// - Those corresponding to "candidate" allocations -// And return also an slice of the peers in those groups. -// -// Peers from untrusted peers are left out if configured. -// -// For a metric/peer to be included in a group, it is necessary that it has -// metrics for all informers. -func (c *Cluster) filterMetrics(ctx context.Context, mSet api.MetricsSet, numMetrics int, currentAllocs, priorityList, blacklist []peer.ID) classifiedMetrics { - curPeersMap := make(map[peer.ID][]api.Metric) - candPeersMap := make(map[peer.ID][]api.Metric) - prioPeersMap := make(map[peer.ID][]api.Metric) - - // Divide the metric by current/candidate/prio and by peer - for _, metrics := range mSet { - for _, m := range metrics { - switch { - case containsPeer(blacklist, m.Peer): - // discard blacklisted peers - continue - case c.config.PinOnlyOnTrustedPeers && !c.consensus.IsTrustedPeer(ctx, m.Peer): - // discard peer that are not trusted when - // configured. - continue - case containsPeer(currentAllocs, m.Peer): - curPeersMap[m.Peer] = append(curPeersMap[m.Peer], m) - case containsPeer(priorityList, m.Peer): - prioPeersMap[m.Peer] = append(prioPeersMap[m.Peer], m) - default: - candPeersMap[m.Peer] = append(candPeersMap[m.Peer], m) - } - } - } - - fillMetricsSet := func(peersMap map[peer.ID][]api.Metric) (api.MetricsSet, []peer.ID) { - mSet := make(api.MetricsSet) - peers := make([]peer.ID, 0, len(peersMap)) - - // Put the metrics in their sets if peers have metrics for all - // informers Record peers. This relies on LatestMetrics - // returning exactly one metric per peer. Thus, a peer with - // all the needed metrics should have exactly numMetrics. - // Otherwise, they are ignored. - for p, metrics := range peersMap { - if len(metrics) == numMetrics { - for _, m := range metrics { - mSet[m.Name] = append(mSet[m.Name], m) - } - peers = append(peers, p) - } // otherwise this peer will be ignored. - } - return mSet, peers - } - - curSet, curPeers := fillMetricsSet(curPeersMap) - candSet, candPeers := fillMetricsSet(candPeersMap) - prioSet, prioPeers := fillMetricsSet(prioPeersMap) - - return classifiedMetrics{ - current: curSet, - currentPeers: curPeers, - candidate: candSet, - candidatePeers: candPeers, - priority: prioSet, - priorityPeers: prioPeers, - } -} - -// allocationError logs an allocation error -func allocationError(hash api.Cid, needed, wanted int, candidatesValid []peer.ID) error { - logger.Errorf("Not enough candidates to allocate %s:", hash) - logger.Errorf(" Needed: %d", needed) - logger.Errorf(" Wanted: %d", wanted) - logger.Errorf(" Available candidates: %d:", len(candidatesValid)) - for _, c := range candidatesValid { - logger.Errorf(" - %s", c.Pretty()) - } - errorMsg := "not enough peers to allocate CID. " - errorMsg += fmt.Sprintf("Needed at least: %d. ", needed) - errorMsg += fmt.Sprintf("Wanted at most: %d. ", wanted) - errorMsg += fmt.Sprintf("Available candidates: %d. ", len(candidatesValid)) - errorMsg += "See logs for more info." - return errors.New(errorMsg) -} - -func (c *Cluster) obtainAllocations( - ctx context.Context, - hash api.Cid, - rplMin, rplMax int, - metrics classifiedMetrics, -) ([]peer.ID, error) { - ctx, span := trace.StartSpan(ctx, "cluster/obtainAllocations") - defer span.End() - - nCurrentValid := len(metrics.currentPeers) - nAvailableValid := len(metrics.candidatePeers) + len(metrics.priorityPeers) - needed := rplMin - nCurrentValid // The minimum we need - wanted := rplMax - nCurrentValid // The maximum we want - - logger.Debugf("obtainAllocations: current: %d", nCurrentValid) - logger.Debugf("obtainAllocations: available: %d", nAvailableValid) - logger.Debugf("obtainAllocations: candidates: %d", len(metrics.candidatePeers)) - logger.Debugf("obtainAllocations: priority: %d", len(metrics.priorityPeers)) - logger.Debugf("obtainAllocations: Needed: %d", needed) - logger.Debugf("obtainAllocations: Wanted: %d", wanted) - - // Reminder: rplMin <= rplMax AND >0 - - if wanted < 0 { // allocations above maximum threshold: drop some - // This could be done more intelligently by dropping them - // according to the allocator order (i.e. free-ing peers - // with most used space first). - return metrics.currentPeers[0 : len(metrics.currentPeers)+wanted], nil - } - - if needed <= 0 { // allocations are above minimal threshold - // We don't provide any new allocations - return nil, nil - } - - if nAvailableValid < needed { // not enough candidates - return nil, allocationError(hash, needed, wanted, append(metrics.priorityPeers, metrics.candidatePeers...)) - } - - // We can allocate from this point. Use the allocator to decide - // on the priority of candidates grab as many as "wanted" - - // the allocator returns a list of peers ordered by priority - finalAllocs, err := c.allocator.Allocate( - ctx, - hash, - metrics.current, - metrics.candidate, - metrics.priority, - ) - if err != nil { - return nil, logError(err.Error()) - } - - logger.Debugf("obtainAllocations: allocate(): %s", finalAllocs) - - // check that we have enough as the allocator may have returned - // less candidates than provided. - if got := len(finalAllocs); got < needed { - return nil, allocationError(hash, needed, wanted, finalAllocs) - } - - allocationsToUse := minInt(wanted, len(finalAllocs)) - - // the final result is the currently valid allocations - // along with the ones provided by the allocator - return append(metrics.currentPeers, finalAllocs[0:allocationsToUse]...), nil -} diff --git a/packages/networking/ipfs-cluster/allocator/balanced/balanced.go b/packages/networking/ipfs-cluster/allocator/balanced/balanced.go deleted file mode 100644 index 4872615..0000000 --- a/packages/networking/ipfs-cluster/allocator/balanced/balanced.go +++ /dev/null @@ -1,327 +0,0 @@ -// Package balanced implements an allocator that can sort allocations -// based on multiple metrics, where metrics may be an arbitrary way to -// partition a set of peers. -// -// For example, allocating by ["tag:region", "disk"] the resulting peer -// candidate order will balanced between regions and ordered by the value of -// the weight of the disk metric. -package balanced - -import ( - "context" - "fmt" - "sort" - - api "github.com/ipfs-cluster/ipfs-cluster/api" - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var logger = logging.Logger("allocator") - -// Allocator is an allocator that partitions metrics and orders -// the final list of allocation by selecting for each partition. -type Allocator struct { - config *Config - rpcClient *rpc.Client -} - -// New returns an initialized Allocator. -func New(cfg *Config) (*Allocator, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - return &Allocator{ - config: cfg, - }, nil -} - -// SetClient provides us with an rpc.Client which allows -// contacting other components in the cluster. -func (a *Allocator) SetClient(c *rpc.Client) { - a.rpcClient = c -} - -// Shutdown is called on cluster shutdown. We just invalidate -// any metrics from this point. -func (a *Allocator) Shutdown(ctx context.Context) error { - a.rpcClient = nil - return nil -} - -type partitionedMetric struct { - metricName string - curChoosingIndex int - noMore bool - partitions []*partition // they are in order of their values -} - -type partition struct { - value string - weight int64 - aggregatedWeight int64 - peers map[peer.ID]bool // the bool tracks whether the peer has been picked already out of the partition when doing the final sort. - sub *partitionedMetric // all peers in sub-partitions will have the same value for this metric -} - -// Returns a partitionedMetric which has partitions and subpartitions based -// on the metrics and values given by the "by" slice. The partitions -// are ordered based on the cumulative weight. -func partitionMetrics(set api.MetricsSet, by []string) *partitionedMetric { - rootMetric := by[0] - pnedMetric := &partitionedMetric{ - metricName: rootMetric, - partitions: partitionValues(set[rootMetric]), - } - - // For sorting based on weight (more to less) - lessF := func(i, j int) bool { - wi := pnedMetric.partitions[i].weight - wj := pnedMetric.partitions[j].weight - - // if weight is equal, sort by aggregated weight of - // all sub-partitions. - if wi == wj { - awi := pnedMetric.partitions[i].aggregatedWeight - awj := pnedMetric.partitions[j].aggregatedWeight - // If subpartitions weight the same, do strict order - // based on value string - if awi == awj { - return pnedMetric.partitions[i].value < pnedMetric.partitions[j].value - } - return awj < awi - - } - // Descending! - return wj < wi - } - - if len(by) == 1 { // we are done - sort.Slice(pnedMetric.partitions, lessF) - return pnedMetric - } - - // process sub-partitions - for _, partition := range pnedMetric.partitions { - filteredSet := make(api.MetricsSet) - for k, v := range set { - if k == rootMetric { // not needed anymore - continue - } - for _, m := range v { - // only leave metrics for peers in current partition - if _, ok := partition.peers[m.Peer]; ok { - filteredSet[k] = append(filteredSet[k], m) - } - } - } - - partition.sub = partitionMetrics(filteredSet, by[1:]) - - // Add the aggregated weight of the subpartitions - for _, subp := range partition.sub.partitions { - partition.aggregatedWeight += subp.aggregatedWeight - } - } - sort.Slice(pnedMetric.partitions, lessF) - return pnedMetric -} - -func partitionValues(metrics []api.Metric) []*partition { - partitions := []*partition{} - - if len(metrics) <= 0 { - return partitions - } - - // We group peers with the same value in the same partition. - partitionsByValue := make(map[string]*partition) - - for _, m := range metrics { - // Sometimes two metrics have the same value / weight, but we - // still want to put them in different partitions. Otherwise - // their weights get added and they form a bucket and - // therefore not they are not selected in order: 3 peers with - // freespace=100 and one peer with freespace=200 would result - // in one of the peers with freespace 100 being chosen first - // because the partition's weight is 300. - // - // We are going to call these metrics (like free-space), - // non-partitionable metrics. This is going to be the default - // (for backwards compat reasons). - // - // The informers must set the Partitionable field accordingly - // when two metrics with the same value must be grouped in the - // same partition. - // - // Note: aggregatedWeight is the same as weight here (sum of - // weight of all metrics in partitions), and gets updated - // later in partitionMetrics with the aggregated weight of - // sub-partitions. - if !m.Partitionable { - partitions = append(partitions, &partition{ - value: m.Value, - weight: m.GetWeight(), - aggregatedWeight: m.GetWeight(), - peers: map[peer.ID]bool{ - m.Peer: false, - }, - }) - continue - } - - // Any other case, we partition by value. - if p, ok := partitionsByValue[m.Value]; ok { - p.peers[m.Peer] = false - p.weight += m.GetWeight() - p.aggregatedWeight += m.GetWeight() - } else { - partitionsByValue[m.Value] = &partition{ - value: m.Value, - weight: m.GetWeight(), - aggregatedWeight: m.GetWeight(), - peers: map[peer.ID]bool{ - m.Peer: false, - }, - } - } - - } - for _, p := range partitionsByValue { - partitions = append(partitions, p) - } - return partitions -} - -// Returns a list of peers sorted by never choosing twice from the same -// partition if there is some other partition to choose from. -func (pnedm *partitionedMetric) sortedPeers() []peer.ID { - peers := []peer.ID{} - for { - peer := pnedm.chooseNext() - if peer == "" { // This means we are done. - break - } - peers = append(peers, peer) - } - return peers -} - -func (pnedm *partitionedMetric) chooseNext() peer.ID { - lenp := len(pnedm.partitions) - if lenp == 0 { - return "" - } - - if pnedm.noMore { - return "" - } - - var peer peer.ID - - curPartition := pnedm.partitions[pnedm.curChoosingIndex] - done := 0 - for { - if curPartition.sub != nil { - // Choose something from the sub-partitionedMetric - peer = curPartition.sub.chooseNext() - } else { - // We are a bottom-partition. Choose one of our peers - for pid, used := range curPartition.peers { - if !used { - peer = pid - curPartition.peers[pid] = true // mark as used - break - } - } - } - // look in next partition next time - pnedm.curChoosingIndex = (pnedm.curChoosingIndex + 1) % lenp - curPartition = pnedm.partitions[pnedm.curChoosingIndex] - done++ - - if peer != "" { - break - } - - // no peer and we have looked in as many partitions as we have - if done == lenp { - pnedm.noMore = true - break - } - } - - return peer -} - -// Allocate produces a sorted list of cluster peer IDs based on different -// metrics provided for those peer IDs. -// It works as follows: -// -// - First, it buckets each peer metrics based on the AllocateBy list. The -// metric name must match the bucket name, otherwise they are put at the end. -// - Second, based on the AllocateBy order, it orders the first bucket and -// groups peers by ordered value. -// - Third, it selects metrics on the second bucket for the most prioritary -// peers of the first bucket and orders their metrics. Then for the peers in -// second position etc. -// - It repeats the process until there is no more buckets to sort. -// - Finally, it returns the first peer of the first -// - Third, based on the AllocateBy order, it select the first metric -func (a *Allocator) Allocate( - ctx context.Context, - c api.Cid, - current, candidates, priority api.MetricsSet, -) ([]peer.ID, error) { - - // For the allocation to work well, there have to be metrics of all - // the types for all the peers. There cannot be a metric of one type - // for a peer that does not appear in the other types. - // - // Removing such occurrences is done in allocate.go, before the - // allocator is called. - // - // Otherwise, the sorting might be funny. - - candidatePartition := partitionMetrics(candidates, a.config.AllocateBy) - priorityPartition := partitionMetrics(priority, a.config.AllocateBy) - - logger.Debugf("Balanced allocator partitions:\n%s\n", printPartition(candidatePartition, 0)) - //fmt.Println(printPartition(candidatePartition, 0)) - - first := priorityPartition.sortedPeers() - last := candidatePartition.sortedPeers() - - return append(first, last...), nil -} - -// Metrics returns the names of the metrics that have been registered -// with this allocator. -func (a *Allocator) Metrics() []string { - return a.config.AllocateBy -} - -func printPartition(m *partitionedMetric, ind int) string { - str := "" - indent := func() { - for i := 0; i < ind+2; i++ { - str += " " - } - } - - for _, p := range m.partitions { - indent() - str += fmt.Sprintf(" | %s:%s - %d - [", m.metricName, p.value, p.weight) - for p, u := range p.peers { - str += fmt.Sprintf("%s|%t, ", p, u) - } - str += "]\n" - if p.sub != nil { - str += printPartition(p.sub, ind+2) - } - } - return str -} diff --git a/packages/networking/ipfs-cluster/allocator/balanced/balanced_test.go b/packages/networking/ipfs-cluster/allocator/balanced/balanced_test.go deleted file mode 100644 index 9099987..0000000 --- a/packages/networking/ipfs-cluster/allocator/balanced/balanced_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package balanced - -import ( - "context" - "testing" - "time" - - api "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func makeMetric(name, value string, weight int64, peer peer.ID, partitionable bool) api.Metric { - return api.Metric{ - Name: name, - Value: value, - Weight: weight, - Peer: peer, - Valid: true, - Partitionable: partitionable, - Expire: time.Now().Add(time.Minute).UnixNano(), - } -} - -func TestAllocate(t *testing.T) { - alloc, err := New(&Config{ - AllocateBy: []string{ - "region", - "az", - "pinqueue", - "freespace", - }, - }) - if err != nil { - t.Fatal(err) - } - - candidates := api.MetricsSet{ - "abc": []api.Metric{ // don't want anything in results - makeMetric("abc", "a", 0, test.PeerID1, true), - makeMetric("abc", "b", 0, test.PeerID2, true), - }, - "region": []api.Metric{ - makeMetric("region", "a-us", 0, test.PeerID1, true), - makeMetric("region", "a-us", 0, test.PeerID2, true), - - makeMetric("region", "b-eu", 0, test.PeerID3, true), - makeMetric("region", "b-eu", 0, test.PeerID4, true), - makeMetric("region", "b-eu", 0, test.PeerID5, true), - - makeMetric("region", "c-au", 0, test.PeerID6, true), - makeMetric("region", "c-au", 0, test.PeerID7, true), - makeMetric("region", "c-au", 0, test.PeerID8, true), // I don't want to see this in results - }, - "az": []api.Metric{ - makeMetric("az", "us1", 0, test.PeerID1, true), - makeMetric("az", "us2", 0, test.PeerID2, true), - - makeMetric("az", "eu1", 0, test.PeerID3, true), - makeMetric("az", "eu1", 0, test.PeerID4, true), - makeMetric("az", "eu2", 0, test.PeerID5, true), - - makeMetric("az", "au1", 0, test.PeerID6, true), - makeMetric("az", "au1", 0, test.PeerID7, true), - }, - "pinqueue": []api.Metric{ - makeMetric("pinqueue", "100", 0, test.PeerID1, true), - makeMetric("pinqueue", "200", 0, test.PeerID2, true), - - makeMetric("pinqueue", "100", 0, test.PeerID3, true), - makeMetric("pinqueue", "200", 0, test.PeerID4, true), - makeMetric("pinqueue", "300", 0, test.PeerID5, true), - - makeMetric("pinqueue", "100", 0, test.PeerID6, true), - makeMetric("pinqueue", "1000", -1, test.PeerID7, true), - }, - "freespace": []api.Metric{ - makeMetric("freespace", "100", 100, test.PeerID1, false), - makeMetric("freespace", "500", 500, test.PeerID2, false), - - makeMetric("freespace", "200", 200, test.PeerID3, false), - makeMetric("freespace", "400", 400, test.PeerID4, false), - makeMetric("freespace", "10", 10, test.PeerID5, false), - - makeMetric("freespace", "50", 50, test.PeerID6, false), - makeMetric("freespace", "600", 600, test.PeerID7, false), - - makeMetric("freespace", "10000", 10000, test.PeerID8, false), - }, - } - - // Regions weights: a-us (pids 1,2): 600. b-eu (pids 3,4,5): 610. c-au (pids 6,7): 649 - // Az weights: us1: 100. us2: 500. eu1: 600. eu2: 10. au1: 649 - // Based on the algorithm it should choose: - // - // - c-au (most-weight)->au1->pinqueue(0)->pid6 - // - b-eu->eu1->pid4 - // - a-us->us2->pid2 - // - - // - c-au->au1 (nowhere else to choose)->pid7 (region exausted) - // - b-eu->eu2 (already had in eu1)->pid5 - // - a-us->us1 (already had in us2)->pid1 - // - - // - b-eu->eu1->pid3 (only peer left) - - peers, err := alloc.Allocate(context.Background(), - test.Cid1, - nil, - candidates, - nil, - ) - if err != nil { - t.Fatal(err) - } - - if len(peers) < 7 { - t.Fatalf("not enough peers: %s", peers) - } - - for i, p := range peers { - t.Logf("%d - %s", i, p) - switch i { - case 0: - if p != test.PeerID6 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - case 1: - if p != test.PeerID4 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - case 2: - if p != test.PeerID2 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - case 3: - if p != test.PeerID7 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - case 4: - if p != test.PeerID5 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - case 5: - if p != test.PeerID1 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - case 6: - if p != test.PeerID3 { - t.Errorf("wrong id in pos %d: %s", i, p) - } - default: - t.Error("too many peers") - } - } -} diff --git a/packages/networking/ipfs-cluster/allocator/balanced/config.go b/packages/networking/ipfs-cluster/allocator/balanced/config.go deleted file mode 100644 index 44c015c..0000000 --- a/packages/networking/ipfs-cluster/allocator/balanced/config.go +++ /dev/null @@ -1,103 +0,0 @@ -package balanced - -import ( - "encoding/json" - "errors" - - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" -) - -const configKey = "balanced" -const envConfigKey = "cluster_balanced" - -// These are the default values for a Config. -var ( - DefaultAllocateBy = []string{"tag:group", "freespace"} -) - -// Config allows to initialize the Allocator. -type Config struct { - config.Saver - - AllocateBy []string -} - -type jsonConfig struct { - AllocateBy []string `json:"allocate_by"` -} - -// ConfigKey returns a human-friendly identifier for this -// Config's type. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.AllocateBy = DefaultAllocateBy - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this configuration have -// sensible values. -func (cfg *Config) Validate() error { - if len(cfg.AllocateBy) <= 0 { - return errors.New("metricalloc.allocate_by is invalid") - } - - return nil -} - -// LoadJSON parses a raw JSON byte-slice as generated by ToJSON(). -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - // When unset, leave default - if len(jcfg.AllocateBy) > 0 { - cfg.AllocateBy = jcfg.AllocateBy - } - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - return &jsonConfig{ - AllocateBy: cfg.AllocateBy, - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/allocator/balanced/config_test.go b/packages/networking/ipfs-cluster/allocator/balanced/config_test.go deleted file mode 100644 index d8c092d..0000000 --- a/packages/networking/ipfs-cluster/allocator/balanced/config_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package balanced - -import ( - "os" - "testing" -) - -var cfgJSON = []byte(` -{ - "allocate_by": ["tag", "disk"] -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } - if len(cfg.AllocateBy) != 2 { - t.Error("configuration was lost in serialization/deserialization") - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.AllocateBy = nil - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_BALANCED_ALLOCATEBY", "a,b,c") - cfg := &Config{} - cfg.ApplyEnvVars() - - if len(cfg.AllocateBy) != 3 { - t.Fatal("failed to override allocate_by with env var") - } -} diff --git a/packages/networking/ipfs-cluster/api/add.go b/packages/networking/ipfs-cluster/api/add.go deleted file mode 100644 index 0786d23..0000000 --- a/packages/networking/ipfs-cluster/api/add.go +++ /dev/null @@ -1,261 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "net/url" - "strconv" - - cid "github.com/ipfs/go-cid" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// DefaultShardSize is the shard size for params objects created with DefaultParams(). -var DefaultShardSize = uint64(100 * 1024 * 1024) // 100 MB - -// AddedOutput carries information for displaying the standard ipfs output -// indicating a node of a file has been added. -type AddedOutput struct { - Name string `json:"name" codec:"n,omitempty"` - Cid Cid `json:"cid" codec:"c"` - Bytes uint64 `json:"bytes,omitempty" codec:"b,omitempty"` - Size uint64 `json:"size,omitempty" codec:"s,omitempty"` - Allocations []peer.ID `json:"allocations,omitempty" codec:"a,omitempty"` -} - -// IPFSAddParams groups options specific to the ipfs-adder, which builds -// UnixFS dags with the input files. This struct is embedded in AddParams. -type IPFSAddParams struct { - Layout string - Chunker string - RawLeaves bool - Progress bool - CidVersion int - HashFun string - NoCopy bool -} - -// AddParams contains all of the configurable parameters needed to specify the -// importing process of a file being added to an ipfs-cluster -type AddParams struct { - PinOptions - - Local bool - Recursive bool - Hidden bool - Wrap bool - Shard bool - StreamChannels bool - Format string // selects with adder - NoPin bool - - IPFSAddParams -} - -// DefaultAddParams returns a AddParams object with standard defaults -func DefaultAddParams() AddParams { - return AddParams{ - Local: false, - Recursive: false, - - Hidden: false, - Wrap: false, - Shard: false, - - StreamChannels: true, - - Format: "unixfs", - NoPin: false, - PinOptions: PinOptions{ - ReplicationFactorMin: 0, - ReplicationFactorMax: 0, - Name: "", - Mode: PinModeRecursive, - ShardSize: DefaultShardSize, - Metadata: make(map[string]string), - Origins: nil, - }, - IPFSAddParams: IPFSAddParams{ - Layout: "", // corresponds to balanced layout - Chunker: "size-262144", - RawLeaves: false, - Progress: false, - CidVersion: 0, - HashFun: "sha2-256", - NoCopy: false, - }, - } -} - -func parseBoolParam(q url.Values, name string, dest *bool) error { - if v := q.Get(name); v != "" { - b, err := strconv.ParseBool(v) - if err != nil { - return fmt.Errorf("parameter %s invalid", name) - } - *dest = b - } - return nil -} - -func parseIntParam(q url.Values, name string, dest *int) error { - if v := q.Get(name); v != "" { - i, err := strconv.Atoi(v) - if err != nil { - return fmt.Errorf("parameter %s invalid", name) - } - *dest = i - } - return nil -} - -// AddParamsFromQuery parses the AddParams object from -// a URL.Query(). -func AddParamsFromQuery(query url.Values) (AddParams, error) { - params := DefaultAddParams() - - opts := &PinOptions{} - err := opts.FromQuery(query) - if err != nil { - return params, err - } - params.PinOptions = *opts - params.PinUpdate.Cid = cid.Undef // hardcode as does not make sense for adding - - layout := query.Get("layout") - switch layout { - case "trickle", "balanced", "": - // nothing - default: - return params, errors.New("layout parameter is invalid") - } - params.Layout = layout - - chunker := query.Get("chunker") - if chunker != "" { - params.Chunker = chunker - } - - hashF := query.Get("hash") - if hashF != "" { - params.HashFun = hashF - } - - format := query.Get("format") - switch format { - case "car", "unixfs", "": - default: - return params, errors.New("format parameter is invalid") - } - params.Format = format - - err = parseBoolParam(query, "local", ¶ms.Local) - if err != nil { - return params, err - } - - err = parseBoolParam(query, "recursive", ¶ms.Recursive) - if err != nil { - return params, err - } - - err = parseBoolParam(query, "hidden", ¶ms.Hidden) - if err != nil { - return params, err - } - err = parseBoolParam(query, "wrap-with-directory", ¶ms.Wrap) - if err != nil { - return params, err - } - err = parseBoolParam(query, "shard", ¶ms.Shard) - if err != nil { - return params, err - } - - err = parseBoolParam(query, "progress", ¶ms.Progress) - if err != nil { - return params, err - } - - err = parseIntParam(query, "cid-version", ¶ms.CidVersion) - if err != nil { - return params, err - } - - // This mimics go-ipfs behavior. - if params.CidVersion > 0 { - params.RawLeaves = true - } - - // If the raw-leaves param is empty, the default RawLeaves value will - // take place (which may be true or false depending on - // CidVersion). Otherwise, it will be explicitly set. - err = parseBoolParam(query, "raw-leaves", ¶ms.RawLeaves) - if err != nil { - return params, err - } - - err = parseBoolParam(query, "stream-channels", ¶ms.StreamChannels) - if err != nil { - return params, err - } - - err = parseBoolParam(query, "nocopy", ¶ms.NoCopy) - if err != nil { - return params, err - } - - err = parseBoolParam(query, "no-pin", ¶ms.NoPin) - if err != nil { - return params, err - } - - return params, nil -} - -// ToQueryString returns a url query string (key=value&key2=value2&...) -func (p AddParams) ToQueryString() (string, error) { - pinOptsQuery, err := p.PinOptions.ToQuery() - if err != nil { - return "", err - } - query, err := url.ParseQuery(pinOptsQuery) - if err != nil { - return "", err - } - query.Set("shard", fmt.Sprintf("%t", p.Shard)) - query.Set("local", fmt.Sprintf("%t", p.Local)) - query.Set("recursive", fmt.Sprintf("%t", p.Recursive)) - query.Set("layout", p.Layout) - query.Set("chunker", p.Chunker) - query.Set("raw-leaves", fmt.Sprintf("%t", p.RawLeaves)) - query.Set("hidden", fmt.Sprintf("%t", p.Hidden)) - query.Set("wrap-with-directory", fmt.Sprintf("%t", p.Wrap)) - query.Set("progress", fmt.Sprintf("%t", p.Progress)) - query.Set("cid-version", fmt.Sprintf("%d", p.CidVersion)) - query.Set("hash", p.HashFun) - query.Set("stream-channels", fmt.Sprintf("%t", p.StreamChannels)) - query.Set("nocopy", fmt.Sprintf("%t", p.NoCopy)) - query.Set("format", p.Format) - query.Set("no-pin", fmt.Sprintf("%t", p.NoPin)) - return query.Encode(), nil -} - -// Equals checks if p equals p2. -func (p AddParams) Equals(p2 AddParams) bool { - return p.PinOptions.Equals(p2.PinOptions) && - p.Local == p2.Local && - p.Recursive == p2.Recursive && - p.Shard == p2.Shard && - p.Layout == p2.Layout && - p.Chunker == p2.Chunker && - p.RawLeaves == p2.RawLeaves && - p.Hidden == p2.Hidden && - p.Wrap == p2.Wrap && - p.CidVersion == p2.CidVersion && - p.HashFun == p2.HashFun && - p.StreamChannels == p2.StreamChannels && - p.NoCopy == p2.NoCopy && - p.Format == p2.Format && - p.NoPin == p2.NoPin -} diff --git a/packages/networking/ipfs-cluster/api/add_test.go b/packages/networking/ipfs-cluster/api/add_test.go deleted file mode 100644 index cf54dff..0000000 --- a/packages/networking/ipfs-cluster/api/add_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package api - -import ( - "net/url" - "testing" -) - -func TestAddParams_FromQuery(t *testing.T) { - qStr := "layout=balanced&chunker=size-262144&name=test&raw-leaves=true&hidden=true&shard=true&replication-min=2&replication-max=4&shard-size=1" - - q, err := url.ParseQuery(qStr) - if err != nil { - t.Fatal(err) - } - - p, err := AddParamsFromQuery(q) - if err != nil { - t.Fatal(err) - } - if p.Layout != "balanced" || - p.Chunker != "size-262144" || - p.Name != "test" || - !p.RawLeaves || !p.Hidden || !p.Shard || - p.ReplicationFactorMin != 2 || - p.ReplicationFactorMax != 4 || - p.ShardSize != 1 { - t.Fatal("did not parse the query correctly") - } -} - -func TestAddParams_FromQueryRawLeaves(t *testing.T) { - qStr := "cid-version=1" - - q, err := url.ParseQuery(qStr) - if err != nil { - t.Fatal(err) - } - - p, err := AddParamsFromQuery(q) - if err != nil { - t.Fatal(err) - } - if !p.RawLeaves { - t.Error("RawLeaves should be true with cid-version=1") - } - - qStr = "cid-version=1&raw-leaves=false" - - q, err = url.ParseQuery(qStr) - if err != nil { - t.Fatal(err) - } - - p, err = AddParamsFromQuery(q) - if err != nil { - t.Fatal(err) - } - if p.RawLeaves { - t.Error("RawLeaves should be false when explicitally set") - } - - qStr = "cid-version=0&raw-leaves=true" - - q, err = url.ParseQuery(qStr) - if err != nil { - t.Fatal(err) - } - - p, err = AddParamsFromQuery(q) - if err != nil { - t.Fatal(err) - } - if !p.RawLeaves { - t.Error("RawLeaves should be true when explicitly set") - } -} - -func TestAddParams_ToQueryString(t *testing.T) { - p := DefaultAddParams() - p.ReplicationFactorMin = 3 - p.ReplicationFactorMax = 6 - p.Name = "something" - p.RawLeaves = true - p.ShardSize = 1020 - qstr, err := p.ToQueryString() - if err != nil { - t.Fatal(err) - } - q, err := url.ParseQuery(qstr) - if err != nil { - t.Fatal(err) - } - - p2, err := AddParamsFromQuery(q) - if err != nil { - t.Fatal(err) - } - - if !p.Equals(p2) { - t.Error("generated and parsed params should be equal") - } -} diff --git a/packages/networking/ipfs-cluster/api/common/api.go b/packages/networking/ipfs-cluster/api/common/api.go deleted file mode 100644 index 1267ab5..0000000 --- a/packages/networking/ipfs-cluster/api/common/api.go +++ /dev/null @@ -1,835 +0,0 @@ -// Package common implements all the things that an IPFS Cluster API component -// must do, except the actual routes that it handles. -// -// This is meant for re-use when implementing actual REST APIs by saving most -// of the efforts and automatically getting a lot of the setup and things like -// authentication handled. -// -// The API exposes the routes in two ways: the first is through a regular -// HTTP(s) listener. The second is by tunneling HTTP through a libp2p stream -// (thus getting an encrypted channel without the need to setup TLS). Both -// ways can be used at the same time, or disabled. -// -// This is used by rest and pinsvc packages. -package common - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "math/rand" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" - - jwt "github.com/golang-jwt/jwt/v4" - types "github.com/ipfs-cluster/ipfs-cluster/api" - state "github.com/ipfs-cluster/ipfs-cluster/state" - logging "github.com/ipfs/go-log/v2" - gopath "github.com/ipfs/go-path" - libp2p "github.com/libp2p/go-libp2p" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - gostream "github.com/libp2p/go-libp2p-gostream" - p2phttp "github.com/libp2p/go-libp2p-http" - noise "github.com/libp2p/go-libp2p/p2p/security/noise" - libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" - manet "github.com/multiformats/go-multiaddr/net" - - handlers "github.com/gorilla/handlers" - mux "github.com/gorilla/mux" - "github.com/rs/cors" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/trace" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// StreamChannelSize is used to define buffer sizes for channels. -const StreamChannelSize = 1024 - -// Common errors -var ( - // ErrNoEndpointEnabled is returned when the API is created but - // no HTTPListenAddr, nor libp2p configuration fields, nor a libp2p - // Host are provided. - ErrNoEndpointsEnabled = errors.New("neither the libp2p nor the HTTP endpoints are enabled") - - // ErrHTTPEndpointNotEnabled is returned when trying to perform - // operations that rely on the HTTPEndpoint but it is disabled. - ErrHTTPEndpointNotEnabled = errors.New("the HTTP endpoint is not enabled") -) - -// SetStatusAutomatically can be passed to SendResponse(), so that it will -// figure out which http status to set by itself. -const SetStatusAutomatically = -1 - -// API implements an API and aims to provides -// a RESTful HTTP API for Cluster. -type API struct { - ctx context.Context - cancel func() - - config *Config - - rpcClient *rpc.Client - rpcReady chan struct{} - router *mux.Router - routes func(*rpc.Client) []Route - - server *http.Server - host host.Host - - httpListeners []net.Listener - libp2pListener net.Listener - - shutdownLock sync.Mutex - shutdown bool - wg sync.WaitGroup -} - -// Route defines a REST endpoint supported by this API. -type Route struct { - Name string - Method string - Pattern string - HandlerFunc http.HandlerFunc -} - -type jwtToken struct { - Token string `json:"token"` -} - -type logWriter struct { - logger *logging.ZapEventLogger -} - -func (lw logWriter) Write(b []byte) (int, error) { - lw.logger.Info(string(b)) - return len(b), nil -} - -// NewAPI creates a new common API component with the given configuration. -func NewAPI(ctx context.Context, cfg *Config, routes func(*rpc.Client) []Route) (*API, error) { - return NewAPIWithHost(ctx, cfg, nil, routes) -} - -// NewAPIWithHost creates a new common API component and enables -// the libp2p-http endpoint using the given Host, if not nil. -func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host, routes func(*rpc.Client) []Route) (*API, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - ctx, cancel := context.WithCancel(ctx) - - api := &API{ - ctx: ctx, - cancel: cancel, - config: cfg, - host: h, - routes: routes, - rpcReady: make(chan struct{}, 2), - } - - // Our handler is a gorilla router wrapped with: - // - a custom strictSlashHandler that uses 307 redirects (#1415) - // - the cors handler, - // - the basic auth handler. - // - // Requests will need to have valid credentials first, except - // cors-preflight requests (OPTIONS). Then requests are handled by - // CORS and potentially need to comply with it. Then they may be - // redirected if the path ends with a "/". Finally they hit one of our - // routes and handlers. - router := mux.NewRouter() - handler := api.authHandler( - cors.New(*cfg.CorsOptions()). - Handler( - strictSlashHandler(router), - ), - cfg.Logger, - ) - if cfg.Tracing { - handler = &ochttp.Handler{ - IsPublicEndpoint: true, - Propagation: &tracecontext.HTTPFormat{}, - Handler: handler, - StartOptions: trace.StartOptions{SpanKind: trace.SpanKindServer}, - FormatSpanName: func(req *http.Request) string { return req.Host + ":" + req.URL.Path + ":" + req.Method }, - } - } - - writer, err := cfg.LogWriter() - if err != nil { - cancel() - return nil, err - } - - s := &http.Server{ - ReadTimeout: cfg.ReadTimeout, - ReadHeaderTimeout: cfg.ReadHeaderTimeout, - WriteTimeout: cfg.WriteTimeout, - IdleTimeout: cfg.IdleTimeout, - Handler: handlers.LoggingHandler(writer, handler), - MaxHeaderBytes: cfg.MaxHeaderBytes, - } - - // See: https://github.com/ipfs/go-ipfs/issues/5168 - // See: https://github.com/ipfs-cluster/ipfs-cluster/issues/548 - // on why this is re-enabled. - s.SetKeepAlivesEnabled(true) - s.MaxHeaderBytes = cfg.MaxHeaderBytes - - api.server = s - api.router = router - - // Set up api.httpListeners if enabled - err = api.setupHTTP() - if err != nil { - return nil, err - } - - // Set up api.libp2pListeners if enabled - err = api.setupLibp2p() - if err != nil { - return nil, err - } - - if len(api.httpListeners) == 0 && api.libp2pListener == nil { - return nil, ErrNoEndpointsEnabled - } - - api.run(ctx) - return api, nil -} - -func (api *API) setupHTTP() error { - if len(api.config.HTTPListenAddr) == 0 { - return nil - } - - for _, listenMAddr := range api.config.HTTPListenAddr { - n, addr, err := manet.DialArgs(listenMAddr) - if err != nil { - return err - } - - var l net.Listener - if api.config.TLS != nil { - l, err = tls.Listen(n, addr, api.config.TLS) - } else { - l, err = net.Listen(n, addr) - } - if err != nil { - return err - } - api.httpListeners = append(api.httpListeners, l) - } - return nil -} - -func (api *API) setupLibp2p() error { - // Make new host. Override any provided existing one - // if we have config for a custom one. - if len(api.config.Libp2pListenAddr) > 0 { - // We use a new host context. We will call - // Close() on shutdown(). Avoids things like: - // https://github.com/ipfs-cluster/ipfs-cluster/issues/853 - h, err := libp2p.New( - libp2p.Identity(api.config.PrivateKey), - libp2p.ListenAddrs(api.config.Libp2pListenAddr...), - libp2p.Security(noise.ID, noise.New), - libp2p.Security(libp2ptls.ID, libp2ptls.New), - ) - if err != nil { - return err - } - api.host = h - } - - if api.host == nil { - return nil - } - - l, err := gostream.Listen(api.host, p2phttp.DefaultP2PProtocol) - if err != nil { - return err - } - api.libp2pListener = l - return nil -} - -func (api *API) addRoutes() { - for _, route := range api.routes(api.rpcClient) { - api.router. - Methods(route.Method). - Path(route.Pattern). - Name(route.Name). - Handler( - ochttp.WithRouteTag( - http.HandlerFunc(route.HandlerFunc), - "/"+route.Name, - ), - ) - } - api.router.NotFoundHandler = ochttp.WithRouteTag( - http.HandlerFunc(api.notFoundHandler), - "/notfound", - ) -} - -// authHandler takes care of authentication either using basicAuth or JWT bearer tokens. -func (api *API) authHandler(h http.Handler, lggr *logging.ZapEventLogger) http.Handler { - - credentials := api.config.BasicAuthCredentials - - // If no credentials are set, we do nothing. - if credentials == nil { - return h - } - - wrap := func(w http.ResponseWriter, r *http.Request) { - // We let CORS preflight requests pass through the next - // handler. - if r.Method == http.MethodOptions { - h.ServeHTTP(w, r) - return - } - - username, password, okBasic := r.BasicAuth() - tokenString, okToken := parseBearerToken(r.Header.Get("Authorization")) - - switch { - case okBasic: - ok := verifyBasicAuth(credentials, username, password) - if !ok { - w.Header().Set("WWW-Authenticate", wwwAuthenticate("Basic", "Restricted IPFS Cluster API", "", "")) - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized: access denied"), nil) - return - } - case okToken: - _, err := verifyToken(credentials, tokenString) - if err != nil { - lggr.Debug(err) - - w.Header().Set("WWW-Authenticate", wwwAuthenticate("Bearer", "Restricted IPFS Cluster API", "invalid_token", "")) - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized: invalid token"), nil) - return - } - default: - // No authentication provided, but needed - w.Header().Add("WWW-Authenticate", wwwAuthenticate("Bearer", "Restricted IPFS Cluster API", "", "")) - w.Header().Add("WWW-Authenticate", wwwAuthenticate("Basic", "Restricted IPFS Cluster API", "", "")) - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized: no auth provided"), nil) - return - } - - // If we are here, authentication worked. - h.ServeHTTP(w, r) - } - return http.HandlerFunc(wrap) -} - -func parseBearerToken(authHeader string) (string, bool) { - const prefix = "Bearer " - if len(authHeader) < len(prefix) || !strings.EqualFold(authHeader[:len(prefix)], prefix) { - return "", false - } - - return authHeader[len(prefix):], true -} - -func wwwAuthenticate(auth, realm, error, description string) string { - str := auth + ` realm="` + realm + `"` - if len(error) > 0 { - str += `, error="` + error + `"` - } - if len(description) > 0 { - str += `, error_description="` + description + `"` - } - return str -} - -func verifyBasicAuth(credentials map[string]string, username, password string) bool { - if username == "" || password == "" { - return false - } - for u, p := range credentials { - if u == username && p == password { - return true - } - } - return false -} - -// verify that a Bearer JWT token is valid. -func verifyToken(credentials map[string]string, tokenString string) (*jwt.Token, error) { - // The token should be signed with the basic auth credential password - // of the issuer, and should have valid standard claims otherwise. - token, err := jwt.ParseWithClaims(tokenString, &jwt.RegisteredClaims{}, func(token *jwt.Token) (interface{}, error) { - if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { - return nil, errors.New("unexpected token signing method (not HMAC)") - } - - if claims, ok := token.Claims.(*jwt.RegisteredClaims); ok { - key, ok := credentials[claims.Issuer] - if !ok { - return nil, errors.New("issuer not found") - } - return []byte(key), nil - } - return nil, errors.New("no issuer set") - }) - - if err != nil { - return nil, err - } - - if !token.Valid { - return nil, errors.New("invalid token") - } - return token, nil -} - -// The Gorilla muxer StrictSlash option uses a 301 permanent redirect, which -// results in POST requests becoming GET requests in most clients. Thus we -// use our own middleware that performs a 307 redirect. See issue #1415 for -// more details. -func strictSlashHandler(h http.Handler) http.Handler { - wrap := func(w http.ResponseWriter, r *http.Request) { - path := r.URL.Path - if strings.HasSuffix(path, "/") { - u, _ := url.Parse(r.URL.String()) - u.Path = u.Path[:len(u.Path)-1] - http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect) - return - } - h.ServeHTTP(w, r) - } - - return http.HandlerFunc(wrap) -} - -func (api *API) run(ctx context.Context) { - api.wg.Add(len(api.httpListeners)) - for _, l := range api.httpListeners { - go func(l net.Listener) { - defer api.wg.Done() - api.runHTTPServer(ctx, l) - }(l) - } - - if api.libp2pListener != nil { - api.wg.Add(1) - go func() { - defer api.wg.Done() - api.runLibp2pServer(ctx) - }() - } -} - -// runs in goroutine from run() -func (api *API) runHTTPServer(ctx context.Context, l net.Listener) { - select { - case <-api.rpcReady: - case <-api.ctx.Done(): - return - } - - maddr, err := manet.FromNetAddr(l.Addr()) - if err != nil { - api.config.Logger.Error(err) - } - - var authInfo string - if api.config.BasicAuthCredentials != nil { - authInfo = " - authenticated" - } - - api.config.Logger.Infof(strings.ToUpper(api.config.ConfigKey)+" (HTTP"+authInfo+"): %s", maddr) - err = api.server.Serve(l) - if err != nil && !strings.Contains(err.Error(), "closed network connection") { - api.config.Logger.Error(err) - } -} - -// runs in goroutine from run() -func (api *API) runLibp2pServer(ctx context.Context) { - select { - case <-api.rpcReady: - case <-api.ctx.Done(): - return - } - - listenMsg := "" - for _, a := range api.host.Addrs() { - listenMsg += fmt.Sprintf(" %s/p2p/%s\n", a, api.host.ID().Pretty()) - } - - api.config.Logger.Infof(strings.ToUpper(api.config.ConfigKey)+" (libp2p-http): ENABLED. Listening on:\n%s\n", listenMsg) - - err := api.server.Serve(api.libp2pListener) - if err != nil && !strings.Contains(err.Error(), "context canceled") { - api.config.Logger.Error(err) - } -} - -// Shutdown stops any API listeners. -func (api *API) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "api/Shutdown") - defer span.End() - - api.shutdownLock.Lock() - defer api.shutdownLock.Unlock() - - if api.shutdown { - api.config.Logger.Debug("already shutdown") - return nil - } - - api.config.Logger.Info("stopping Cluster API") - - api.cancel() - close(api.rpcReady) - - // Cancel any outstanding ops - api.server.SetKeepAlivesEnabled(false) - - for _, l := range api.httpListeners { - l.Close() - } - - if api.libp2pListener != nil { - api.libp2pListener.Close() - } - - api.wg.Wait() - - // This means we created the host - if api.config.Libp2pListenAddr != nil { - api.host.Close() - } - api.shutdown = true - return nil -} - -// SetClient makes the component ready to perform RPC -// requests. -func (api *API) SetClient(c *rpc.Client) { - api.rpcClient = c - api.addRoutes() - - // One notification for http server and one for libp2p server. - api.rpcReady <- struct{}{} - api.rpcReady <- struct{}{} -} - -func (api *API) notFoundHandler(w http.ResponseWriter, r *http.Request) { - api.SendResponse(w, http.StatusNotFound, errors.New("not found"), nil) -} - -// Context returns the API context -func (api *API) Context() context.Context { - return api.ctx -} - -// ParsePinPathOrFail parses a pin path and returns it or makes the request -// fail. -func (api *API) ParsePinPathOrFail(w http.ResponseWriter, r *http.Request) types.PinPath { - vars := mux.Vars(r) - urlpath := "/" + vars["keyType"] + "/" + strings.TrimSuffix(vars["path"], "/") - - path, err := gopath.ParsePath(urlpath) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, errors.New("error parsing path: "+err.Error()), nil) - return types.PinPath{} - } - - pinPath := types.PinPath{Path: path.String()} - err = pinPath.PinOptions.FromQuery(r.URL.Query()) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, err, nil) - } - return pinPath -} - -// ParseCidOrFail parses a Cid and returns it or makes the request fail. -func (api *API) ParseCidOrFail(w http.ResponseWriter, r *http.Request) types.Pin { - vars := mux.Vars(r) - hash := vars["hash"] - - c, err := types.DecodeCid(hash) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding Cid: "+err.Error()), nil) - return types.Pin{} - } - - opts := types.PinOptions{} - err = opts.FromQuery(r.URL.Query()) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, err, nil) - } - pin := types.PinWithOpts(c, opts) - pin.MaxDepth = -1 // For now, all pins are recursive - return pin -} - -// ParsePidOrFail parses a PID and returns it or makes the request fail. -func (api *API) ParsePidOrFail(w http.ResponseWriter, r *http.Request) peer.ID { - vars := mux.Vars(r) - idStr := vars["peer"] - pid, err := peer.Decode(idStr) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding Peer ID: "+err.Error()), nil) - return "" - } - return pid -} - -// GenerateTokenHandler is a handle to obtain a new JWT token -func (api *API) GenerateTokenHandler(w http.ResponseWriter, r *http.Request) { - if api.config.BasicAuthCredentials == nil { - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil) - return - } - - var issuer string - - // We do not verify as we assume it is already done! - user, _, okBasic := r.BasicAuth() - tokenString, okToken := parseBearerToken(r.Header.Get("Authorization")) - - if okBasic { - issuer = user - } else if okToken { - token, err := verifyToken(api.config.BasicAuthCredentials, tokenString) - if err != nil { // I really hope not because it should be verified - api.config.Logger.Error("verify token failed in GetTokenHandler!") - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil) - return - } - if claims, ok := token.Claims.(*jwt.RegisteredClaims); ok { - issuer = claims.Issuer - } else { - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil) - return - } - } else { // no issuer - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil) - return - } - - pass, okPass := api.config.BasicAuthCredentials[issuer] - if !okPass { // another place that should never be reached - api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil) - return - } - - ss, err := generateSignedTokenString(issuer, pass) - if err != nil { - api.SendResponse(w, SetStatusAutomatically, err, nil) - return - } - tokenObj := jwtToken{Token: ss} - - api.SendResponse(w, SetStatusAutomatically, nil, tokenObj) -} - -func generateSignedTokenString(issuer, pass string) (string, error) { - key := []byte(pass) - claims := jwt.RegisteredClaims{ - Issuer: issuer, - } - - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - return token.SignedString(key) -} - -// SendResponse wraps all the logic for writing the response to a request: -// * Write configured headers -// * Write application/json content type -// * Write status: determined automatically if given "SetStatusAutomatically" -// * Write an error if there is or write the response if there is -func (api *API) SendResponse( - w http.ResponseWriter, - status int, - err error, - resp interface{}, -) { - - api.SetHeaders(w) - enc := json.NewEncoder(w) - - // Send an error - if err != nil { - if status == SetStatusAutomatically || status < 400 { - if err.Error() == state.ErrNotFound.Error() { - status = http.StatusNotFound - } else { - status = http.StatusInternalServerError - } - } - w.WriteHeader(status) - - errorResp := api.config.APIErrorFunc(err, status) - api.config.Logger.Errorf("sending error response: %d: %s", status, err.Error()) - - if err := enc.Encode(errorResp); err != nil { - api.config.Logger.Error(err) - } - return - } - - // Send a body - if resp != nil { - if status == SetStatusAutomatically { - status = http.StatusOK - } - - w.WriteHeader(status) - - if err = enc.Encode(resp); err != nil { - api.config.Logger.Error(err) - } - return - } - - // Empty response - if status == SetStatusAutomatically { - status = http.StatusNoContent - } - - w.WriteHeader(status) -} - -// StreamIterator is a function that returns the next item. It is used in -// StreamResponse. -type StreamIterator func() (interface{}, bool, error) - -// StreamResponse reads from an iterator and sends the response. -func (api *API) StreamResponse(w http.ResponseWriter, next StreamIterator, errCh chan error) { - api.SetHeaders(w) - enc := json.NewEncoder(w) - flusher, flush := w.(http.Flusher) - w.Header().Set("Trailer", "X-Stream-Error") - - total := 0 - var err error - var ok bool - var item interface{} - for { - item, ok, err = next() - if total == 0 { - if err != nil { - st := http.StatusInternalServerError - w.WriteHeader(st) - errorResp := api.config.APIErrorFunc(err, st) - api.config.Logger.Errorf("sending error response: %d: %s", st, err.Error()) - - if err := enc.Encode(errorResp); err != nil { - api.config.Logger.Error(err) - } - return - } - if !ok { // but no error. - w.WriteHeader(http.StatusNoContent) - return - } - w.WriteHeader(http.StatusOK) - } - if err != nil { - break - } - - // finish just fine - if !ok { - break - } - - // we have an item - total++ - err = enc.Encode(item) - if err != nil { - api.config.Logger.Error(err) - break - } - if flush { - flusher.Flush() - } - } - - if err != nil { - w.Header().Set("X-Stream-Error", err.Error()) - } else { - // Due to some Javascript-browser-land stuff, we set the header - // even when there is no error. - w.Header().Set("X-Stream-Error", "") - } - // check for function errors - for funcErr := range errCh { - if funcErr != nil { - w.Header().Add("X-Stream-Error", funcErr.Error()) - } - } -} - -// SetHeaders sets all the headers that are common to all responses -// from this API. Called automatically from SendResponse(). -func (api *API) SetHeaders(w http.ResponseWriter) { - for header, values := range api.config.Headers { - for _, val := range values { - w.Header().Add(header, val) - } - } - - w.Header().Add("Content-Type", "application/json") -} - -// These functions below are mostly used in tests. - -// HTTPAddresses returns the HTTP(s) listening address -// in host:port format. Useful when configured to start -// on a random port (0). Returns error when the HTTP endpoint -// is not enabled. -func (api *API) HTTPAddresses() ([]string, error) { - if len(api.httpListeners) == 0 { - return nil, ErrHTTPEndpointNotEnabled - } - var addrs []string - for _, l := range api.httpListeners { - addrs = append(addrs, l.Addr().String()) - } - - return addrs, nil -} - -// Host returns the libp2p Host used by the API, if any. -// The result is either the host provided during initialization, -// a default Host created with options from the configuration object, -// or nil. -func (api *API) Host() host.Host { - return api.host -} - -// Headers returns the configured Headers. -// Useful for testing. -func (api *API) Headers() map[string][]string { - return api.config.Headers -} - -// SetKeepAlivesEnabled controls the HTTP server Keep Alive settings. Useful -// for testing. -func (api *API) SetKeepAlivesEnabled(b bool) { - api.server.SetKeepAlivesEnabled(b) -} diff --git a/packages/networking/ipfs-cluster/api/common/api_test.go b/packages/networking/ipfs-cluster/api/common/api_test.go deleted file mode 100644 index 02c0474..0000000 --- a/packages/networking/ipfs-cluster/api/common/api_test.go +++ /dev/null @@ -1,644 +0,0 @@ -package common - -import ( - "context" - "fmt" - "io" - "math/rand" - "net/http" - "net/http/httputil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/common/test" - rpctest "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - rpc "github.com/libp2p/go-libp2p-gorpc" - ma "github.com/multiformats/go-multiaddr" -) - -const ( - SSLCertFile = "test/server.crt" - SSLKeyFile = "test/server.key" - validUserName = "validUserName" - validUserPassword = "validUserPassword" - adminUserName = "adminUserName" - adminUserPassword = "adminUserPassword" - invalidUserName = "invalidUserName" - invalidUserPassword = "invalidUserPassword" -) - -var ( - validToken, _ = generateSignedTokenString(validUserName, validUserPassword) - invalidToken, _ = generateSignedTokenString(invalidUserName, invalidUserPassword) -) - -func routes(c *rpc.Client) []Route { - return []Route{ - { - "Test", - "GET", - "/test", - func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/json") - w.Write([]byte(`{ "thisis": "atest" }`)) - }, - }, - } - -} - -func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API { - ctx := context.Background() - apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - h, err := libp2p.New(libp2p.ListenAddrs(apiMAddr)) - if err != nil { - t.Fatal(err) - } - - cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr} - - rest, err := NewAPIWithHost(ctx, cfg, h, routes) - if err != nil { - t.Fatalf("should be able to create a new %s API: %s", name, err) - } - - // No keep alive for tests - rest.server.SetKeepAlivesEnabled(false) - rest.SetClient(rpctest.NewMockRPCClient(t)) - - return rest -} - -func testAPI(t *testing.T) *API { - cfg := newDefaultTestConfig(t) - cfg.CORSAllowedOrigins = []string{test.ClientOrigin} - cfg.CORSAllowedMethods = []string{"GET", "POST", "DELETE"} - //cfg.CORSAllowedHeaders = []string{"Content-Type"} - cfg.CORSMaxAge = 10 * time.Minute - - return testAPIwithConfig(t, cfg, "basic") -} - -func testHTTPSAPI(t *testing.T) *API { - cfg := newDefaultTestConfig(t) - cfg.PathSSLCertFile = SSLCertFile - cfg.PathSSLKeyFile = SSLKeyFile - var err error - cfg.TLS, err = newTLSConfig(cfg.PathSSLCertFile, cfg.PathSSLKeyFile) - if err != nil { - t.Fatal(err) - } - - return testAPIwithConfig(t, cfg, "https") -} - -func testAPIwithBasicAuth(t *testing.T) *API { - cfg := newDefaultTestConfig(t) - cfg.BasicAuthCredentials = map[string]string{ - validUserName: validUserPassword, - adminUserName: adminUserPassword, - } - - return testAPIwithConfig(t, cfg, "Basic Authentication") -} - -func TestAPIShutdown(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - err := rest.Shutdown(ctx) - if err != nil { - t.Error("should shutdown cleanly: ", err) - } - // test shutting down twice - rest.Shutdown(ctx) - -} - -func TestHTTPSTestEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - httpsrest := testHTTPSAPI(t) - defer rest.Shutdown(ctx) - defer httpsrest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - r := make(map[string]string) - test.MakeGet(t, rest, url(rest)+"/test", &r) - if r["thisis"] != "atest" { - t.Error("expected correct body") - } - } - - httpstf := func(t *testing.T, url test.URLFunc) { - r := make(map[string]string) - test.MakeGet(t, httpsrest, url(httpsrest)+"/test", &r) - if r["thisis"] != "atest" { - t.Error("expected correct body") - } - } - - test.BothEndpoints(t, tf) - test.HTTPSEndPoint(t, httpstf) -} - -func TestAPILogging(t *testing.T) { - ctx := context.Background() - cfg := newDefaultTestConfig(t) - - logFile, err := filepath.Abs("http.log") - if err != nil { - t.Fatal(err) - } - cfg.HTTPLogFile = logFile - - rest := testAPIwithConfig(t, cfg, "log_enabled") - defer os.Remove(cfg.HTTPLogFile) - - info, err := os.Stat(cfg.HTTPLogFile) - if err != nil { - t.Fatal(err) - } - if info.Size() > 0 { - t.Errorf("expected empty log file") - } - - id := api.ID{} - test.MakeGet(t, rest, test.HTTPURL(rest)+"/test", &id) - - info, err = os.Stat(cfg.HTTPLogFile) - if err != nil { - t.Fatal(err) - } - size1 := info.Size() - if size1 == 0 { - t.Error("did not expect an empty log file") - } - - // Restart API and make sure that logs are being appended - rest.Shutdown(ctx) - - rest = testAPIwithConfig(t, cfg, "log_enabled") - defer rest.Shutdown(ctx) - - test.MakeGet(t, rest, test.HTTPURL(rest)+"/id", &id) - - info, err = os.Stat(cfg.HTTPLogFile) - if err != nil { - t.Fatal(err) - } - size2 := info.Size() - if size2 == 0 { - t.Error("did not expect an empty log file") - } - - if !(size2 > size1) { - t.Error("logs were not appended") - } - -} - -func TestNotFoundHandler(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - bytes := make([]byte, 10) - for i := 0; i < 10; i++ { - bytes[i] = byte(65 + rand.Intn(25)) //A=65 and Z = 65+25 - } - - var errResp api.Error - test.MakePost(t, rest, url(rest)+"/"+string(bytes), []byte{}, &errResp) - if errResp.Code != 404 { - t.Errorf("expected error not found: %+v", errResp) - } - - var errResp1 api.Error - test.MakeGet(t, rest, url(rest)+"/"+string(bytes), &errResp1) - if errResp1.Code != 404 { - t.Errorf("expected error not found: %+v", errResp) - } - } - - test.BothEndpoints(t, tf) -} - -func TestCORS(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - type testcase struct { - method string - path string - } - - tf := func(t *testing.T, url test.URLFunc) { - reqHeaders := make(http.Header) - reqHeaders.Set("Origin", "myorigin") - reqHeaders.Set("Access-Control-Request-Headers", "Content-Type") - - for _, tc := range []testcase{ - {"GET", "/test"}, - // testcase{}, - } { - reqHeaders.Set("Access-Control-Request-Method", tc.method) - headers := test.MakeOptions(t, rest, url(rest)+tc.path, reqHeaders) - aorigin := headers.Get("Access-Control-Allow-Origin") - amethods := headers.Get("Access-Control-Allow-Methods") - aheaders := headers.Get("Access-Control-Allow-Headers") - acreds := headers.Get("Access-Control-Allow-Credentials") - maxage := headers.Get("Access-Control-Max-Age") - - if aorigin != "myorigin" { - t.Error("Bad ACA-Origin:", aorigin) - } - - if amethods != tc.method { - t.Error("Bad ACA-Methods:", amethods) - } - - if aheaders != "Content-Type" { - t.Error("Bad ACA-Headers:", aheaders) - } - - if acreds != "true" { - t.Error("Bad ACA-Credentials:", acreds) - } - - if maxage != "600" { - t.Error("Bad AC-Max-Age:", maxage) - } - } - - } - - test.BothEndpoints(t, tf) -} - -type responseChecker func(*http.Response) error -type requestShaper func(*http.Request) error - -type httpTestcase struct { - method string - path string - header http.Header - body io.ReadCloser - shaper requestShaper - checker responseChecker -} - -func httpStatusCodeChecker(resp *http.Response, expectedStatus int) error { - if resp.StatusCode == expectedStatus { - return nil - } - return fmt.Errorf("unexpected HTTP status code: %d", resp.StatusCode) -} - -func assertHTTPStatusIsUnauthoriazed(resp *http.Response) error { - return httpStatusCodeChecker(resp, http.StatusUnauthorized) -} - -func assertHTTPStatusIsTooLarge(resp *http.Response) error { - return httpStatusCodeChecker(resp, http.StatusRequestHeaderFieldsTooLarge) -} - -func makeHTTPStatusNegatedAssert(checker responseChecker) responseChecker { - return func(resp *http.Response) error { - if checker(resp) == nil { - return fmt.Errorf("unexpected HTTP status code: %d", resp.StatusCode) - } - return nil - } -} - -func (tc *httpTestcase) getTestFunction(api *API) test.Func { - return func(t *testing.T, prefixMaker test.URLFunc) { - h := test.MakeHost(t, api) - defer h.Close() - url := prefixMaker(api) + tc.path - c := test.HTTPClient(t, h, test.IsHTTPS(url)) - req, err := http.NewRequest(tc.method, url, tc.body) - if err != nil { - t.Fatal("Failed to assemble a HTTP request: ", err) - } - if tc.header != nil { - req.Header = tc.header - } - if tc.shaper != nil { - err := tc.shaper(req) - if err != nil { - t.Fatal("Failed to shape a HTTP request: ", err) - } - } - resp, err := c.Do(req) - if err != nil { - t.Fatal("Failed to make a HTTP request: ", err) - } - if tc.checker != nil { - if err := tc.checker(resp); err != nil { - r, e := httputil.DumpRequest(req, true) - if e != nil { - t.Errorf("Assertion failed with: %q", err) - } else { - t.Errorf("Assertion failed with: %q on request: \n%.100s", err, r) - } - } - } - } -} - -func makeBasicAuthRequestShaper(username, password string) requestShaper { - return func(req *http.Request) error { - req.SetBasicAuth(username, password) - return nil - } -} - -func makeTokenAuthRequestShaper(token string) requestShaper { - return func(req *http.Request) error { - req.Header.Set("Authorization", "Bearer "+token) - return nil - } -} - -func makeLongHeaderShaper(size int) requestShaper { - return func(req *http.Request) error { - for sz := size; sz > 0; sz -= 8 { - req.Header.Add("Foo", "bar") - } - return nil - } -} - -func TestBasicAuth(t *testing.T) { - ctx := context.Background() - rest := testAPIwithBasicAuth(t) - defer rest.Shutdown(ctx) - - for _, tc := range []httpTestcase{ - {}, - { - method: "", - path: "", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "POST", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "DELETE", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "HEAD", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "OPTIONS", // Always allowed for CORS - path: "/foo", - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "PUT", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "TRACE", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "CONNECT", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "BAR", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeBasicAuthRequestShaper(invalidUserName, invalidUserPassword), - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeBasicAuthRequestShaper(validUserName, invalidUserPassword), - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeBasicAuthRequestShaper(invalidUserName, validUserPassword), - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeBasicAuthRequestShaper(adminUserName, validUserPassword), - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "POST", - path: "/foo", - shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "DELETE", - path: "/foo", - shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "BAR", - path: "/foo", - shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "GET", - path: "/test", - shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - } { - test.BothEndpoints(t, tc.getTestFunction(rest)) - } -} - -func TestTokenAuth(t *testing.T) { - ctx := context.Background() - rest := testAPIwithBasicAuth(t) - defer rest.Shutdown(ctx) - - for _, tc := range []httpTestcase{ - {}, - { - method: "", - path: "", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "POST", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "DELETE", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "HEAD", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "OPTIONS", // Always allowed for CORS - path: "/foo", - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "PUT", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "TRACE", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "CONNECT", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "BAR", - path: "/foo", - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeTokenAuthRequestShaper(invalidToken), - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeTokenAuthRequestShaper(invalidToken), - checker: assertHTTPStatusIsUnauthoriazed, - }, - { - method: "GET", - path: "/foo", - shaper: makeTokenAuthRequestShaper(validToken), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "POST", - path: "/foo", - shaper: makeTokenAuthRequestShaper(validToken), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "DELETE", - path: "/foo", - shaper: makeTokenAuthRequestShaper(validToken), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "BAR", - path: "/foo", - shaper: makeTokenAuthRequestShaper(validToken), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - { - method: "GET", - path: "/test", - shaper: makeTokenAuthRequestShaper(validToken), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed), - }, - } { - test.BothEndpoints(t, tc.getTestFunction(rest)) - } -} - -func TestLimitMaxHeaderSize(t *testing.T) { - maxHeaderBytes := 4 * DefaultMaxHeaderBytes - cfg := newTestConfig() - cfg.MaxHeaderBytes = maxHeaderBytes - ctx := context.Background() - rest := testAPIwithConfig(t, cfg, "http with maxHeaderBytes") - defer rest.Shutdown(ctx) - - for _, tc := range []httpTestcase{ - { - method: "GET", - path: "/foo", - shaper: makeLongHeaderShaper(maxHeaderBytes * 2), - checker: assertHTTPStatusIsTooLarge, - }, - { - method: "GET", - path: "/foo", - shaper: makeLongHeaderShaper(maxHeaderBytes / 2), - checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsTooLarge), - }, - } { - test.BothEndpoints(t, tc.getTestFunction(rest)) - } -} diff --git a/packages/networking/ipfs-cluster/api/common/config.go b/packages/networking/ipfs-cluster/api/common/config.go deleted file mode 100644 index 46d204c..0000000 --- a/packages/networking/ipfs-cluster/api/common/config.go +++ /dev/null @@ -1,480 +0,0 @@ -package common - -import ( - "crypto/tls" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "time" - - logging "github.com/ipfs/go-log/v2" - crypto "github.com/libp2p/go-libp2p/core/crypto" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - - "github.com/kelseyhightower/envconfig" - "github.com/rs/cors" - - "github.com/ipfs-cluster/ipfs-cluster/config" -) - -const minMaxHeaderBytes = 4096 - -const defaultMaxHeaderBytes = minMaxHeaderBytes - -// Config provides common API configuration values and allows to customize its -// behavior. It implements most of the config.ComponentConfig interface -// (except the Default() and ConfigKey() methods). Config should be embedded -// in a Config object that implements the missing methods and sets the -// meta options. -type Config struct { - config.Saver - - // These are meta-options and should be set by actual Config - // implementations as early as possible. - DefaultFunc func(*Config) error - ConfigKey string - EnvConfigKey string - Logger *logging.ZapEventLogger - RequestLogger *logging.ZapEventLogger - APIErrorFunc func(err error, status int) error - - // Listen address for the HTTP REST API endpoint. - HTTPListenAddr []ma.Multiaddr - - // TLS configuration for the HTTP listener - TLS *tls.Config - - // pathSSLCertFile is a path to a certificate file used to secure the - // HTTP API endpoint. We track it so we can write it in the JSON. - PathSSLCertFile string - - // pathSSLKeyFile is a path to the private key corresponding to the - // SSLKeyFile. We track it so we can write it in the JSON. - PathSSLKeyFile string - - // Maximum duration before timing out reading a full request - ReadTimeout time.Duration - - // Maximum duration before timing out reading the headers of a request - ReadHeaderTimeout time.Duration - - // Maximum duration before timing out write of the response - WriteTimeout time.Duration - - // Server-side amount of time a Keep-Alive connection will be - // kept idle before being reused - IdleTimeout time.Duration - - // Maximum cumulative size of HTTP request headers in bytes - // accepted by the server - MaxHeaderBytes int - - // Listen address for the Libp2p REST API endpoint. - Libp2pListenAddr []ma.Multiaddr - - // ID and PrivateKey are used to create a libp2p host if we - // want the API component to do it (not by default). - ID peer.ID - PrivateKey crypto.PrivKey - - // BasicAuthCredentials is a map of username-password pairs - // which are authorized to use Basic Authentication - BasicAuthCredentials map[string]string - - // HTTPLogFile is path of the file that would save HTTP API logs. If this - // path is empty, HTTP logs would be sent to standard output. This path - // should either be absolute or relative to cluster base directory. Its - // default value is empty. - HTTPLogFile string - - // Headers provides customization for the headers returned - // by the API on existing routes. - Headers map[string][]string - - // CORS header management - CORSAllowedOrigins []string - CORSAllowedMethods []string - CORSAllowedHeaders []string - CORSExposedHeaders []string - CORSAllowCredentials bool - CORSMaxAge time.Duration - - // Tracing flag used to skip tracing specific paths when not enabled. - Tracing bool -} - -type jsonConfig struct { - HTTPListenMultiaddress config.Strings `json:"http_listen_multiaddress"` - SSLCertFile string `json:"ssl_cert_file,omitempty"` - SSLKeyFile string `json:"ssl_key_file,omitempty"` - ReadTimeout string `json:"read_timeout"` - ReadHeaderTimeout string `json:"read_header_timeout"` - WriteTimeout string `json:"write_timeout"` - IdleTimeout string `json:"idle_timeout"` - MaxHeaderBytes int `json:"max_header_bytes"` - - Libp2pListenMultiaddress config.Strings `json:"libp2p_listen_multiaddress,omitempty"` - ID string `json:"id,omitempty"` - PrivateKey string `json:"private_key,omitempty" hidden:"true"` - - BasicAuthCredentials map[string]string `json:"basic_auth_credentials" hidden:"true"` - HTTPLogFile string `json:"http_log_file"` - Headers map[string][]string `json:"headers"` - - CORSAllowedOrigins []string `json:"cors_allowed_origins"` - CORSAllowedMethods []string `json:"cors_allowed_methods"` - CORSAllowedHeaders []string `json:"cors_allowed_headers"` - CORSExposedHeaders []string `json:"cors_exposed_headers"` - CORSAllowCredentials bool `json:"cors_allow_credentials"` - CORSMaxAge string `json:"cors_max_age"` -} - -// GetHTTPLogPath gets full path of the file where http logs should be -// saved. -func (cfg *Config) GetHTTPLogPath() string { - if filepath.IsAbs(cfg.HTTPLogFile) { - return cfg.HTTPLogFile - } - - if cfg.BaseDir == "" { - return "" - } - - return filepath.Join(cfg.BaseDir, cfg.HTTPLogFile) -} - -// ApplyEnvVars fills in any Config fields found as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return err - } - - err = envconfig.Process(cfg.EnvConfigKey, jcfg) - if err != nil { - return err - } - return cfg.applyJSONConfig(jcfg) -} - -// Validate makes sure that all fields in this Config have -// working values, at least in appearance. -func (cfg *Config) Validate() error { - if cfg.Logger == nil || cfg.RequestLogger == nil { - return errors.New("config loggers not set") - } - - switch { - case cfg.ReadTimeout < 0: - return errors.New(cfg.ConfigKey + ".read_timeout is invalid") - case cfg.ReadHeaderTimeout < 0: - return errors.New(cfg.ConfigKey + ".read_header_timeout is invalid") - case cfg.WriteTimeout < 0: - return errors.New(cfg.ConfigKey + ".write_timeout is invalid") - case cfg.IdleTimeout < 0: - return errors.New(cfg.ConfigKey + ".idle_timeout invalid") - case cfg.MaxHeaderBytes < minMaxHeaderBytes: - return fmt.Errorf(cfg.ConfigKey+".max_header_bytes must be not less then %d", minMaxHeaderBytes) - case cfg.BasicAuthCredentials != nil && len(cfg.BasicAuthCredentials) == 0: - return errors.New(cfg.ConfigKey + ".basic_auth_creds should be null or have at least one entry") - case (cfg.PathSSLCertFile != "" || cfg.PathSSLKeyFile != "") && cfg.TLS == nil: - return errors.New(cfg.ConfigKey + ": missing TLS configuration") - case (cfg.CORSMaxAge < 0): - return errors.New(cfg.ConfigKey + ".cors_max_age is invalid") - } - - return cfg.validateLibp2p() -} - -func (cfg *Config) validateLibp2p() error { - if cfg.ID != "" || cfg.PrivateKey != nil || len(cfg.Libp2pListenAddr) > 0 { - // if one is set, all should be - if cfg.ID == "" || cfg.PrivateKey == nil || len(cfg.Libp2pListenAddr) == 0 { - return errors.New("all ID, private_key and libp2p_listen_multiaddress should be set") - } - if !cfg.ID.MatchesPrivateKey(cfg.PrivateKey) { - return errors.New(cfg.ConfigKey + ".ID does not match private_key") - } - } - - return nil -} - -// LoadJSON parses a raw JSON byte slice created by ToJSON() and sets the -// configuration fields accordingly. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - cfg.Logger.Error(cfg.ConfigKey + ": error unmarshaling config") - return err - } - - if cfg.DefaultFunc == nil { - return errors.New("default config generation not set. This is a bug") - } - cfg.DefaultFunc(cfg) - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - err := cfg.loadHTTPOptions(jcfg) - if err != nil { - return err - } - - err = cfg.loadLibp2pOptions(jcfg) - if err != nil { - return err - } - - // Other options - cfg.BasicAuthCredentials = jcfg.BasicAuthCredentials - cfg.HTTPLogFile = jcfg.HTTPLogFile - cfg.Headers = jcfg.Headers - - return cfg.Validate() -} - -func (cfg *Config) loadHTTPOptions(jcfg *jsonConfig) error { - if addresses := jcfg.HTTPListenMultiaddress; len(addresses) > 0 { - cfg.HTTPListenAddr = make([]ma.Multiaddr, 0, len(addresses)) - for _, addr := range addresses { - httpAddr, err := ma.NewMultiaddr(addr) - if err != nil { - err = fmt.Errorf("error parsing %s.http_listen_multiaddress: %s", cfg.ConfigKey, err) - return err - } - cfg.HTTPListenAddr = append(cfg.HTTPListenAddr, httpAddr) - } - } - - err := cfg.tlsOptions(jcfg) - if err != nil { - return err - } - - if jcfg.MaxHeaderBytes == 0 { - cfg.MaxHeaderBytes = defaultMaxHeaderBytes - } else { - cfg.MaxHeaderBytes = jcfg.MaxHeaderBytes - } - - // CORS - cfg.CORSAllowedOrigins = jcfg.CORSAllowedOrigins - cfg.CORSAllowedMethods = jcfg.CORSAllowedMethods - cfg.CORSAllowedHeaders = jcfg.CORSAllowedHeaders - cfg.CORSExposedHeaders = jcfg.CORSExposedHeaders - cfg.CORSAllowCredentials = jcfg.CORSAllowCredentials - if jcfg.CORSMaxAge == "" { // compatibility - jcfg.CORSMaxAge = "0s" - } - - return config.ParseDurations( - cfg.ConfigKey, - &config.DurationOpt{Duration: jcfg.ReadTimeout, Dst: &cfg.ReadTimeout, Name: "read_timeout"}, - &config.DurationOpt{Duration: jcfg.ReadHeaderTimeout, Dst: &cfg.ReadHeaderTimeout, Name: "read_header_timeout"}, - &config.DurationOpt{Duration: jcfg.WriteTimeout, Dst: &cfg.WriteTimeout, Name: "write_timeout"}, - &config.DurationOpt{Duration: jcfg.IdleTimeout, Dst: &cfg.IdleTimeout, Name: "idle_timeout"}, - &config.DurationOpt{Duration: jcfg.CORSMaxAge, Dst: &cfg.CORSMaxAge, Name: "cors_max_age"}, - ) -} - -func (cfg *Config) tlsOptions(jcfg *jsonConfig) error { - cert := jcfg.SSLCertFile - key := jcfg.SSLKeyFile - - if cert+key == "" { - return nil - } - - cfg.PathSSLCertFile = cert - cfg.PathSSLKeyFile = key - - if !filepath.IsAbs(cert) { - cert = filepath.Join(cfg.BaseDir, cert) - } - - if !filepath.IsAbs(key) { - key = filepath.Join(cfg.BaseDir, key) - } - - cfg.Logger.Debug("baseDir: ", cfg.BaseDir) - cfg.Logger.Debug("cert path: ", cert) - cfg.Logger.Debug("key path: ", key) - - tlsCfg, err := newTLSConfig(cert, key) - if err != nil { - return err - } - cfg.TLS = tlsCfg - return nil -} - -func (cfg *Config) loadLibp2pOptions(jcfg *jsonConfig) error { - if addresses := jcfg.Libp2pListenMultiaddress; len(addresses) > 0 { - cfg.Libp2pListenAddr = make([]ma.Multiaddr, 0, len(addresses)) - for _, addr := range addresses { - libp2pAddr, err := ma.NewMultiaddr(addr) - if err != nil { - err = fmt.Errorf("error parsing %s.libp2p_listen_multiaddress: %s", cfg.ConfigKey, err) - return err - } - cfg.Libp2pListenAddr = append(cfg.Libp2pListenAddr, libp2pAddr) - } - } - - if jcfg.PrivateKey != "" { - pkb, err := base64.StdEncoding.DecodeString(jcfg.PrivateKey) - if err != nil { - return fmt.Errorf("error decoding %s.private_key: %s", cfg.ConfigKey, err) - } - pKey, err := crypto.UnmarshalPrivateKey(pkb) - if err != nil { - return fmt.Errorf("error parsing %s.private_key ID: %s", cfg.ConfigKey, err) - } - cfg.PrivateKey = pKey - } - - if jcfg.ID != "" { - id, err := peer.Decode(jcfg.ID) - if err != nil { - return fmt.Errorf("error parsing %s.ID: %s", cfg.ConfigKey, err) - } - cfg.ID = id - } - return nil -} - -// ToJSON produce a human-friendly JSON representation of the Config -// object. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return - } - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) { - // Multiaddress String() may panic - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%s", r) - } - }() - - httpAddresses := make([]string, 0, len(cfg.HTTPListenAddr)) - for _, addr := range cfg.HTTPListenAddr { - httpAddresses = append(httpAddresses, addr.String()) - } - - libp2pAddresses := make([]string, 0, len(cfg.Libp2pListenAddr)) - for _, addr := range cfg.Libp2pListenAddr { - libp2pAddresses = append(libp2pAddresses, addr.String()) - } - - jcfg = &jsonConfig{ - HTTPListenMultiaddress: httpAddresses, - SSLCertFile: cfg.PathSSLCertFile, - SSLKeyFile: cfg.PathSSLKeyFile, - ReadTimeout: cfg.ReadTimeout.String(), - ReadHeaderTimeout: cfg.ReadHeaderTimeout.String(), - WriteTimeout: cfg.WriteTimeout.String(), - IdleTimeout: cfg.IdleTimeout.String(), - MaxHeaderBytes: cfg.MaxHeaderBytes, - BasicAuthCredentials: cfg.BasicAuthCredentials, - HTTPLogFile: cfg.HTTPLogFile, - Headers: cfg.Headers, - CORSAllowedOrigins: cfg.CORSAllowedOrigins, - CORSAllowedMethods: cfg.CORSAllowedMethods, - CORSAllowedHeaders: cfg.CORSAllowedHeaders, - CORSExposedHeaders: cfg.CORSExposedHeaders, - CORSAllowCredentials: cfg.CORSAllowCredentials, - CORSMaxAge: cfg.CORSMaxAge.String(), - } - - if cfg.ID != "" { - jcfg.ID = cfg.ID.String() - } - if cfg.PrivateKey != nil { - pkeyBytes, err := crypto.MarshalPrivateKey(cfg.PrivateKey) - if err == nil { - pKey := base64.StdEncoding.EncodeToString(pkeyBytes) - jcfg.PrivateKey = pKey - } - } - if len(libp2pAddresses) > 0 { - jcfg.Libp2pListenMultiaddress = libp2pAddresses - } - - return -} - -// CorsOptions returns cors.Options setup from the configured values. -func (cfg *Config) CorsOptions() *cors.Options { - maxAgeSeconds := int(cfg.CORSMaxAge / time.Second) - - return &cors.Options{ - AllowedOrigins: cfg.CORSAllowedOrigins, - AllowedMethods: cfg.CORSAllowedMethods, - AllowedHeaders: cfg.CORSAllowedHeaders, - ExposedHeaders: cfg.CORSExposedHeaders, - AllowCredentials: cfg.CORSAllowCredentials, - MaxAge: maxAgeSeconds, - Debug: false, - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return nil, err - } - - return config.DisplayJSON(jcfg) -} - -// LogWriter returns a writer to write logs to. If a log path is configured, -// it creates a file. Otherwise, uses the given logger. -func (cfg *Config) LogWriter() (io.Writer, error) { - if cfg.HTTPLogFile != "" { - f, err := os.OpenFile(cfg.GetHTTPLogPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - return f, nil - } - return logWriter{ - logger: cfg.RequestLogger, - }, nil -} - -func newTLSConfig(certFile, keyFile string) (*tls.Config, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, errors.New("Error loading TLS certficate/key: " + err.Error()) - } - // based on https://github.com/denji/golang-tls - return &tls.Config{ - MinVersion: tls.VersionTLS12, - CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - Certificates: []tls.Certificate{cert}, - }, nil -} diff --git a/packages/networking/ipfs-cluster/api/common/config_test.go b/packages/networking/ipfs-cluster/api/common/config_test.go deleted file mode 100644 index b5071c2..0000000 --- a/packages/networking/ipfs-cluster/api/common/config_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package common - -import ( - "context" - "encoding/json" - "os" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - types "github.com/ipfs-cluster/ipfs-cluster/api" - crypto "github.com/libp2p/go-libp2p/core/crypto" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - ma "github.com/multiformats/go-multiaddr" -) - -// Default testing values -var ( - DefaultReadTimeout = 0 * time.Second - DefaultReadHeaderTimeout = 5 * time.Second - DefaultWriteTimeout = 0 * time.Second - DefaultIdleTimeout = 120 * time.Second - DefaultMaxHeaderBytes = minMaxHeaderBytes - DefaultHTTPListenAddrs = []string{"/ip4/127.0.0.1/tcp/9094"} - DefaultHeaders = map[string][]string{} - DefaultCORSAllowedOrigins = []string{"*"} - DefaultCORSAllowedMethods = []string{} - DefaultCORSAllowedHeaders = []string{} - DefaultCORSExposedHeaders = []string{ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length", - } - DefaultCORSAllowCredentials = true - DefaultCORSMaxAge time.Duration // 0. Means always. -) - -func defaultFunc(cfg *Config) error { - // http - addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs)) - for _, def := range DefaultHTTPListenAddrs { - httpListen, err := ma.NewMultiaddr(def) - if err != nil { - return err - } - addrs = append(addrs, httpListen) - } - cfg.HTTPListenAddr = addrs - cfg.PathSSLCertFile = "" - cfg.PathSSLKeyFile = "" - cfg.ReadTimeout = DefaultReadTimeout - cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout - cfg.WriteTimeout = DefaultWriteTimeout - cfg.IdleTimeout = DefaultIdleTimeout - cfg.MaxHeaderBytes = DefaultMaxHeaderBytes - - // libp2p - cfg.ID = "" - cfg.PrivateKey = nil - cfg.Libp2pListenAddr = nil - - // Auth - cfg.BasicAuthCredentials = nil - - // Logs - cfg.HTTPLogFile = "" - - // Headers - cfg.Headers = DefaultHeaders - - cfg.CORSAllowedOrigins = DefaultCORSAllowedOrigins - cfg.CORSAllowedMethods = DefaultCORSAllowedMethods - cfg.CORSAllowedHeaders = DefaultCORSAllowedHeaders - cfg.CORSExposedHeaders = DefaultCORSExposedHeaders - cfg.CORSAllowCredentials = DefaultCORSAllowCredentials - cfg.CORSMaxAge = DefaultCORSMaxAge - - return nil -} - -var cfgJSON = []byte(` -{ - "listen_multiaddress": "/ip4/127.0.0.1/tcp/12122", - "ssl_cert_file": "test/server.crt", - "ssl_key_file": "test/server.key", - "read_timeout": "30s", - "read_header_timeout": "5s", - "write_timeout": "1m0s", - "idle_timeout": "2m0s", - "max_header_bytes": 16384, - "basic_auth_credentials": null, - "http_log_file": "", - "cors_allowed_origins": ["myorigin"], - "cors_allowed_methods": ["GET"], - "cors_allowed_headers": ["X-Custom"], - "cors_exposed_headers": ["X-Chunked-Output"], - "cors_allow_credentials": false, - "cors_max_age": "1s" -} -`) - -func newTestConfig() *Config { - cfg := &Config{} - cfg.ConfigKey = "testapi" - cfg.EnvConfigKey = "cluster_testapi" - cfg.Logger = logging.Logger("testapi") - cfg.RequestLogger = logging.Logger("testapilog") - cfg.DefaultFunc = defaultFunc - cfg.APIErrorFunc = func(err error, status int) error { - return types.Error{Code: status, Message: err.Error()} - } - return cfg -} - -func newDefaultTestConfig(t *testing.T) *Config { - t.Helper() - cfg := newTestConfig() - if err := defaultFunc(cfg); err != nil { - t.Fatal(err) - } - return cfg -} - -func TestLoadEmptyJSON(t *testing.T) { - cfg := newTestConfig() - err := cfg.LoadJSON([]byte(`{}`)) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadJSON(t *testing.T) { - cfg := newTestConfig() - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - if cfg.ReadTimeout != 30*time.Second || - cfg.WriteTimeout != time.Minute || - cfg.ReadHeaderTimeout != 5*time.Second || - cfg.IdleTimeout != 2*time.Minute { - t.Error("error parsing timeouts") - } - - j := &jsonConfig{} - - json.Unmarshal(cfgJSON, j) - j.HTTPListenMultiaddress = []string{"abc"} - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding listen multiaddress") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.ReadTimeout = "-1" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error in read_timeout") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.BasicAuthCredentials = make(map[string]string) - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error with empty basic auth map") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.SSLCertFile = "abc" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error with TLS configuration") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.ID = "abc" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error with ID") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.Libp2pListenMultiaddress = []string{"abc"} - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error with libp2p address") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.PrivateKey = "abc" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error with private key") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.MaxHeaderBytes = minMaxHeaderBytes - 1 - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error with MaxHeaderBytes") - } -} - -func TestApplyEnvVars(t *testing.T) { - username := "admin" - password := "thisaintmypassword" - user1 := "user1" - user1pass := "user1passwd" - os.Setenv("CLUSTER_TESTAPI_BASICAUTHCREDENTIALS", username+":"+password+","+user1+":"+user1pass) - cfg := newDefaultTestConfig(t) - err := cfg.ApplyEnvVars() - if err != nil { - t.Fatal(err) - } - - if _, ok := cfg.BasicAuthCredentials[username]; !ok { - t.Fatalf("username '%s' not set in BasicAuthCreds map: %v", username, cfg.BasicAuthCredentials) - } - - if _, ok := cfg.BasicAuthCredentials[user1]; !ok { - t.Fatalf("username '%s' not set in BasicAuthCreds map: %v", user1, cfg.BasicAuthCredentials) - } - - if gotpasswd := cfg.BasicAuthCredentials[username]; gotpasswd != password { - t.Errorf("password not what was set in env var, got: %s, want: %s", gotpasswd, password) - } - - if gotpasswd := cfg.BasicAuthCredentials[user1]; gotpasswd != user1pass { - t.Errorf("password not what was set in env var, got: %s, want: %s", gotpasswd, user1pass) - } -} - -func TestLibp2pConfig(t *testing.T) { - ctx := context.Background() - cfg := newDefaultTestConfig(t) - - priv, pub, err := crypto.GenerateKeyPair(crypto.RSA, 2048) - if err != nil { - t.Fatal(err) - } - pid, err := peer.IDFromPublicKey(pub) - if err != nil { - t.Fatal(err) - } - cfg.ID = pid - cfg.PrivateKey = priv - addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - cfg.HTTPListenAddr = []ma.Multiaddr{addr} - cfg.Libp2pListenAddr = []ma.Multiaddr{addr} - - err = cfg.Validate() - if err != nil { - t.Error(err) - } - - cfgJSON, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - - err = cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - // Test creating a new API with a libp2p config - rest, err := NewAPI(ctx, cfg, - func(c *rpc.Client) []Route { return nil }) - if err != nil { - t.Fatal(err) - } - defer rest.Shutdown(ctx) - - badPid, _ := peer.Decode("QmTQ6oKHDwFjzr4ihirVCLJe8CxanxD3ZjGRYzubFuNDjE") - cfg.ID = badPid - err = cfg.Validate() - if err == nil { - t.Error("expected id-privkey mismatch") - } - cfg.ID = pid - - cfg.PrivateKey = nil - err = cfg.Validate() - if err == nil { - t.Error("expected missing private key error") - } -} - -func TestToJSON(t *testing.T) { - cfg := newTestConfig() - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = newTestConfig() - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := newDefaultTestConfig(t) - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - err := defaultFunc(cfg) - if err != nil { - t.Fatal(err) - } - cfg.IdleTimeout = -1 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} diff --git a/packages/networking/ipfs-cluster/api/common/test/helpers.go b/packages/networking/ipfs-cluster/api/common/test/helpers.go deleted file mode 100644 index 633e328..0000000 --- a/packages/networking/ipfs-cluster/api/common/test/helpers.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package test provides utility methods to test APIs based on the common -// API. -package test - -import ( - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "reflect" - "strings" - "testing" - - "github.com/libp2p/go-libp2p" - p2phttp "github.com/libp2p/go-libp2p-http" - "github.com/libp2p/go-libp2p/core/host" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" -) - -var ( - // SSLCertFile is the location of the certificate file. - // Used in HTTPClient to set the right certificate when - // creating an HTTPs client. Might need adjusting depending - // on where the tests are running. - SSLCertFile = "test/server.crt" - - // ClientOrigin sets the Origin header for requests to this. - ClientOrigin = "myorigin" -) - -// ProcessResp puts a response into a given type or fails the test. -func ProcessResp(t *testing.T, httpResp *http.Response, err error, resp interface{}) { - if err != nil { - t.Fatal("error making request: ", err) - } - body, err := io.ReadAll(httpResp.Body) - defer httpResp.Body.Close() - if err != nil { - t.Fatal("error reading body: ", err) - } - if len(body) != 0 { - err = json.Unmarshal(body, resp) - if err != nil { - t.Error(string(body)) - t.Fatal("error parsing json: ", err) - } - } -} - -// ProcessStreamingResp decodes a streaming response into the given type -// and fails the test on error. -func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp interface{}, trailerError bool) { - if err != nil { - t.Fatal("error making streaming request: ", err) - } - - if httpResp.StatusCode > 399 { - // normal response with error - ProcessResp(t, httpResp, err, resp) - return - } - - defer httpResp.Body.Close() - dec := json.NewDecoder(httpResp.Body) - - // If we passed a slice we fill it in, otherwise we just decode - // on top of the passed value. - tResp := reflect.TypeOf(resp) - if tResp.Elem().Kind() == reflect.Slice { - vSlice := reflect.MakeSlice(reflect.TypeOf(resp).Elem(), 0, 1000) - vType := tResp.Elem().Elem() - for { - v := reflect.New(vType) - err := dec.Decode(v.Interface()) - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - vSlice = reflect.Append(vSlice, v.Elem()) - } - reflect.ValueOf(resp).Elem().Set(vSlice) - } else { - for { - err := dec.Decode(resp) - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - } - } - trailerValues := httpResp.Trailer.Values("X-Stream-Error") - if trailerError && len(trailerValues) <= 1 && trailerValues[0] == "" { - t.Error("expected trailer error") - } - if !trailerError && len(trailerValues) >= 2 { - t.Error("got trailer error: ", trailerValues) - } -} - -// CheckHeaders checks that all the headers are set to what is expected. -func CheckHeaders(t *testing.T, expected map[string][]string, url string, headers http.Header) { - for k, v := range expected { - if strings.Join(v, ",") != strings.Join(headers[k], ",") { - t.Errorf("%s does not show configured headers: %s", url, k) - } - } - if headers.Get("Content-Type") != "application/json" { - t.Errorf("%s is not application/json", url) - } - - if eh := headers.Get("Access-Control-Expose-Headers"); eh == "" { - t.Error("AC-Expose-Headers not set") - } -} - -// API represents what an API is to us. -type API interface { - HTTPAddresses() ([]string, error) - Host() host.Host - Headers() map[string][]string -} - -// URLFunc is a function that given an API returns a url string. -type URLFunc func(a API) string - -// HTTPURL returns the http endpoint of the API. -func HTTPURL(a API) string { - u, _ := a.HTTPAddresses() - return fmt.Sprintf("http://%s", u[0]) -} - -// P2pURL returns the libp2p endpoint of the API. -func P2pURL(a API) string { - return fmt.Sprintf("libp2p://%s", a.Host().ID().String()) -} - -// HttpsURL returns the HTTPS endpoint of the API -func httpsURL(a API) string { - u, _ := a.HTTPAddresses() - return fmt.Sprintf("https://%s", u[0]) -} - -// IsHTTPS returns true if a url string uses HTTPS. -func IsHTTPS(url string) bool { - return strings.HasPrefix(url, "https") -} - -// HTTPClient returns a client that supporst both http/https and -// libp2p-tunneled-http. -func HTTPClient(t *testing.T, h host.Host, isHTTPS bool) *http.Client { - tr := &http.Transport{} - if isHTTPS { - certpool := x509.NewCertPool() - cert, err := os.ReadFile(SSLCertFile) - if err != nil { - t.Fatal("error reading cert for https client: ", err) - } - certpool.AppendCertsFromPEM(cert) - tr = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: certpool, - }} - } - if h != nil { - tr.RegisterProtocol("libp2p", p2phttp.NewTransport(h)) - } - return &http.Client{Transport: tr} -} - -// MakeHost makes a libp2p host that knows how to talk to the given API. -func MakeHost(t *testing.T, api API) host.Host { - h, err := libp2p.New() - if err != nil { - t.Fatal(err) - } - h.Peerstore().AddAddrs( - api.Host().ID(), - api.Host().Addrs(), - peerstore.PermanentAddrTTL, - ) - return h -} - -// MakeGet performs a GET request against the API. -func MakeGet(t *testing.T, api API, url string, resp interface{}) { - h := MakeHost(t, api) - defer h.Close() - c := HTTPClient(t, h, IsHTTPS(url)) - req, _ := http.NewRequest(http.MethodGet, url, nil) - req.Header.Set("Origin", ClientOrigin) - httpResp, err := c.Do(req) - ProcessResp(t, httpResp, err, resp) - CheckHeaders(t, api.Headers(), url, httpResp.Header) -} - -// MakePost performs a POST request against the API with the given body. -func MakePost(t *testing.T, api API, url string, body []byte, resp interface{}) { - MakePostWithContentType(t, api, url, body, "application/json", resp) -} - -// MakePostWithContentType performs a POST with the given body and content-type. -func MakePostWithContentType(t *testing.T, api API, url string, body []byte, contentType string, resp interface{}) { - h := MakeHost(t, api) - defer h.Close() - c := HTTPClient(t, h, IsHTTPS(url)) - req, _ := http.NewRequest(http.MethodPost, url, bytes.NewReader(body)) - req.Header.Set("Content-Type", contentType) - req.Header.Set("Origin", ClientOrigin) - httpResp, err := c.Do(req) - ProcessResp(t, httpResp, err, resp) - CheckHeaders(t, api.Headers(), url, httpResp.Header) -} - -// MakeDelete performs a DELETE request against the given API. -func MakeDelete(t *testing.T, api API, url string, resp interface{}) { - h := MakeHost(t, api) - defer h.Close() - c := HTTPClient(t, h, IsHTTPS(url)) - req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewReader([]byte{})) - req.Header.Set("Origin", ClientOrigin) - httpResp, err := c.Do(req) - ProcessResp(t, httpResp, err, resp) - CheckHeaders(t, api.Headers(), url, httpResp.Header) -} - -// MakeOptions performs an OPTIONS request against the given api. -func MakeOptions(t *testing.T, api API, url string, reqHeaders http.Header) http.Header { - h := MakeHost(t, api) - defer h.Close() - c := HTTPClient(t, h, IsHTTPS(url)) - req, _ := http.NewRequest(http.MethodOptions, url, nil) - req.Header = reqHeaders - httpResp, err := c.Do(req) - ProcessResp(t, httpResp, err, nil) - return httpResp.Header -} - -// MakeStreamingPost performs a POST request and uses ProcessStreamingResp -func MakeStreamingPost(t *testing.T, api API, url string, body io.Reader, contentType string, resp interface{}) { - h := MakeHost(t, api) - defer h.Close() - c := HTTPClient(t, h, IsHTTPS(url)) - req, _ := http.NewRequest(http.MethodPost, url, body) - req.Header.Set("Content-Type", contentType) - req.Header.Set("Origin", ClientOrigin) - httpResp, err := c.Do(req) - ProcessStreamingResp(t, httpResp, err, resp, false) - CheckHeaders(t, api.Headers(), url, httpResp.Header) -} - -// MakeStreamingGet performs a GET request and uses ProcessStreamingResp -func MakeStreamingGet(t *testing.T, api API, url string, resp interface{}, trailerError bool) { - h := MakeHost(t, api) - defer h.Close() - c := HTTPClient(t, h, IsHTTPS(url)) - req, _ := http.NewRequest(http.MethodGet, url, nil) - req.Header.Set("Origin", ClientOrigin) - httpResp, err := c.Do(req) - ProcessStreamingResp(t, httpResp, err, resp, trailerError) - CheckHeaders(t, api.Headers(), url, httpResp.Header) -} - -// Func is a function that runs a test with a given URL. -type Func func(t *testing.T, url URLFunc) - -// BothEndpoints runs a test.Func against the http and p2p endpoints. -func BothEndpoints(t *testing.T, test Func) { - t.Run("in-parallel", func(t *testing.T) { - t.Run("http", func(t *testing.T) { - t.Parallel() - test(t, HTTPURL) - }) - t.Run("libp2p", func(t *testing.T) { - t.Parallel() - test(t, P2pURL) - }) - }) -} - -// HTTPSEndPoint runs the given test.Func against an HTTPs endpoint. -func HTTPSEndPoint(t *testing.T, test Func) { - t.Run("in-parallel", func(t *testing.T) { - t.Run("https", func(t *testing.T) { - t.Parallel() - test(t, httpsURL) - }) - }) -} diff --git a/packages/networking/ipfs-cluster/api/common/test/server.crt b/packages/networking/ipfs-cluster/api/common/test/server.crt deleted file mode 100644 index b4f82ce..0000000 --- a/packages/networking/ipfs-cluster/api/common/test/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID7TCCAtWgAwIBAgIJAMqpHdKRMzMLMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD -VQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8xDzANBgNVBAcMBmdvbGRlbjEMMAoG -A1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3IgNzEMMAoGA1UEAwwDQm9iMSAwHgYJ -KoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9yZzAeFw0xNzA3MjExNjA5NTlaFw0y -NzA3MTkxNjA5NTlaMIGCMQswCQYDVQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8x -DzANBgNVBAcMBmdvbGRlbjEMMAoGA1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3Ig -NzEMMAoGA1UEAwwDQm9iMSAwHgYJKoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9y -ZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALuoP8PehGItmKPi3+8S -IV1qz8C3FiK85X/INxYLjyuzvpmDROtlkOvdmPCJrveKDZF7ECQpwIGApFbnKCCW -3zdOPQmAVzm4N8bvnzFtM9mTm8qKb9SwRi6ZLZ/qXo98t8C7CV6FaNKUkIw0lUes -ZiXEcmknrlPy3svaDQVoSOH8L38d0g4geqiNrMmZDaGe8FAYdpCoeYDIm/u0Ag9y -G3+XAbETxWhkfTyH3XcQ/Izg0wG9zFY8y/fyYwC+C7+xF75x4gbIzHAY2iFS2ua7 -GTKa2GZhOXtMuzJ6cf+TZW460Z+O+PkA1aH01WrGL7iCW/6Cn9gPRKL+IP6iyDnh -9HMCAwEAAaNkMGIwDwYDVR0RBAgwBocEfwAAATAdBgNVHQ4EFgQU9mXv8mv/LlAa -jwr8X9hzk52cBagwHwYDVR0jBBgwFoAU9mXv8mv/LlAajwr8X9hzk52cBagwDwYD -VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAIxqpKYzF6A9RlLso0lkF -nYfcyeVAvi03IBdiTNnpOe6ROa4gNwKH/JUJMCRDPzm/x78+srCmrcCCAJJTcqgi -b84vq3DegGPg2NXbn9qVUA1SdiXFelqMFwLitDn2KKizihEN4L5PEArHuDaNvLI+ -kMr+yZSALWTdtfydj211c7hTBvFqO8l5MYDXCmfoS9sqniorlNHIaBim/SNfDsi6 -8hAhvfRvk3e6dPjAPrIZYdQR5ROGewtD4F/anXgKY2BmBtWwd6gbGeMnnVi1SGRP -0UHc4O9aq9HrAOFL/72WVk/kyyPyJ/GtSaPYL1OFS12R/l0hNi+pER7xDtLOVHO2 -iw== ------END CERTIFICATE----- diff --git a/packages/networking/ipfs-cluster/api/common/test/server.key b/packages/networking/ipfs-cluster/api/common/test/server.key deleted file mode 100644 index 28da7be..0000000 --- a/packages/networking/ipfs-cluster/api/common/test/server.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAu6g/w96EYi2Yo+Lf7xIhXWrPwLcWIrzlf8g3FguPK7O+mYNE -62WQ692Y8Imu94oNkXsQJCnAgYCkVucoIJbfN049CYBXObg3xu+fMW0z2ZObyopv -1LBGLpktn+pej3y3wLsJXoVo0pSQjDSVR6xmJcRyaSeuU/Ley9oNBWhI4fwvfx3S -DiB6qI2syZkNoZ7wUBh2kKh5gMib+7QCD3Ibf5cBsRPFaGR9PIfddxD8jODTAb3M -VjzL9/JjAL4Lv7EXvnHiBsjMcBjaIVLa5rsZMprYZmE5e0y7Mnpx/5NlbjrRn474 -+QDVofTVasYvuIJb/oKf2A9Eov4g/qLIOeH0cwIDAQABAoIBAAOYreArG45mIU7C -wlfqmQkZSvH+kEYKKLvSMnwRrKTBxR1cDq4UPDrI/G1ftiK4Wpo3KZAH3NCejoe7 -1mEJgy2kKjdMZl+M0ETXws1Hsn6w/YNcM9h3qGCsPtuZukY1ta/T5dIR7HhcsIh/ -WX0OKMcAhNDPGeAx/2MYwrcf0IXELx0+eP1fuBllkajH14J8+ZkVrBMDhqppn8Iq -f9poVNQliJtN7VkL6lJ60HwoVNGEhFaOYphn3CR/sCc6xl+/CzV4h6c5X/RIUfDs -kjgl9mlPFuWq9S19Z+XVfLSE+sYd6LDrh0IZEx9s0OfOjucH2bUAuKNDnCq0wW70 -FzH6KoECgYEA4ZOcAMgujk8goL8nleNjuEq7d8pThAsuAy5vq9oyol8oe+p1pXHR -SHP6wHyhXeTS5g1Ej+QV6f0v9gVFS2pFqTXymc9Gxald3trcnheodZXx63YbxHm2 -H7mYWyZvq05A0qRLmmqCoSRJHUOkH2wVqgj9KsVYP1anIhdykbycansCgYEA1Pdp -uAfWt/GLZ7B0q3JPlVvusf97wBIUcoaxLHGKopvfsaFp0EY3NRxLSTaZ0NPOxTHh -W6xaIlBmKllyt6q8W609A8hrXayV1yYnVE44b5UEMhVlfRFeEdf9Sp4YdQJ8r1J0 -QA89jHCjf8VocP5pSJz5tXvWHhmaotXBthFgWGkCgYEAiy7dwenCOBKAqk5n6Wb9 -X3fVBguzzjRrtpDPXHTsax1VyGeZIXUB0bemD2CW3G1U55dmJ3ZvQwnyrtT/tZGj -280qnFa1bz6aaegW2gD082CKfWNJrMgAZMDKTeuAWW2WN6Ih9+wiH7VY25Kh0LWL -BHg5ZUuQsLwRscpP6bY7uMMCgYEAwY23hK2DJZyfEXcbIjL7R4jNMPM82nzUHp5x -6i2rTUyTitJj5Anc5SU4+2pnc5b9RtWltva22Jbvs6+mBm1jUYLqgESn5/QSHv8r -IYER47+wl4BAw+GD+H2wVB/JpJbFEWbEBvCTBM/emSKmYIOo1njsrlfFa4fjtfjG -XJ4ATXkCgYEAzeSrCCVrfPMLCmOijIYD1F7TMFthosW2JJie3bcHZMu2QEM8EIif -YzkUvMaDAXJ4VniTHkDf3ubRoUi3DwLbvJIPnoOlx3jmzz6KYiEd+uXx40Yrebb0 -V9GB2S2q1RY7wsFoCqT/mq8usQkjr3ulYMJqeIWnCTWgajXWqAHH/Mw= ------END RSA PRIVATE KEY----- diff --git a/packages/networking/ipfs-cluster/api/ipfsproxy/config.go b/packages/networking/ipfs-cluster/api/ipfsproxy/config.go deleted file mode 100644 index 0f27ed2..0000000 --- a/packages/networking/ipfs-cluster/api/ipfsproxy/config.go +++ /dev/null @@ -1,344 +0,0 @@ -package ipfsproxy - -import ( - "encoding/json" - "errors" - "fmt" - "path/filepath" - "time" - - "github.com/kelseyhightower/envconfig" - ma "github.com/multiformats/go-multiaddr" - - "github.com/ipfs-cluster/ipfs-cluster/config" -) - -const ( - configKey = "ipfsproxy" - envConfigKey = "cluster_ipfsproxy" - minMaxHeaderBytes = 4096 -) - -// DefaultListenAddrs contains the default listeners for the proxy. -var DefaultListenAddrs = []string{ - "/ip4/127.0.0.1/tcp/9095", -} - -// Default values for Config. -const ( - DefaultNodeAddr = "/ip4/127.0.0.1/tcp/5001" - DefaultNodeHTTPS = false - DefaultReadTimeout = 0 - DefaultReadHeaderTimeout = 5 * time.Second - DefaultWriteTimeout = 0 - DefaultIdleTimeout = 60 * time.Second - DefaultExtractHeadersPath = "/api/v0/version" - DefaultExtractHeadersTTL = 5 * time.Minute - DefaultMaxHeaderBytes = minMaxHeaderBytes -) - -// Config allows to customize behavior of IPFSProxy. -// It implements the config.ComponentConfig interface. -type Config struct { - config.Saver - - // Listen parameters for the IPFS Proxy. - ListenAddr []ma.Multiaddr - - // Host/Port for the IPFS daemon. - NodeAddr ma.Multiaddr - - // Should we talk to the IPFS API over HTTPS? (experimental, untested) - NodeHTTPS bool - - // LogFile is path of the file that would save Proxy API logs. If this - // path is empty, logs would be sent to standard output. This path - // should either be absolute or relative to cluster base directory. Its - // default value is empty. - LogFile string - - // Maximum duration before timing out reading a full request - ReadTimeout time.Duration - - // Maximum duration before timing out reading the headers of a request - ReadHeaderTimeout time.Duration - - // Maximum duration before timing out write of the response - WriteTimeout time.Duration - - // Maximum cumulative size of HTTP request headers in bytes - // accepted by the server - MaxHeaderBytes int - - // Server-side amount of time a Keep-Alive connection will be - // kept idle before being reused - IdleTimeout time.Duration - - // A list of custom headers that should be extracted from - // IPFS daemon responses and re-used in responses from hijacked paths. - // This is only useful if the user has configured custom headers - // in the IPFS daemon. CORS-related headers are already - // taken care of by the proxy. - ExtractHeadersExtra []string - - // If the user wants to extract some extra custom headers configured - // on the IPFS daemon so that they are used in hijacked responses, - // this request path will be used. Defaults to /version. This will - // trigger a single request to extract those headers and remember them - // for future requests (until TTL expires). - ExtractHeadersPath string - - // Establishes how long we should remember extracted headers before we - // refresh them with a new request. 0 means always. - ExtractHeadersTTL time.Duration - - // Tracing flag used to skip tracing specific paths when not enabled. - Tracing bool -} - -type jsonConfig struct { - ListenMultiaddress config.Strings `json:"listen_multiaddress"` - NodeMultiaddress string `json:"node_multiaddress"` - NodeHTTPS bool `json:"node_https,omitempty"` - - LogFile string `json:"log_file"` - - ReadTimeout string `json:"read_timeout"` - ReadHeaderTimeout string `json:"read_header_timeout"` - WriteTimeout string `json:"write_timeout"` - IdleTimeout string `json:"idle_timeout"` - MaxHeaderBytes int `json:"max_header_bytes"` - - ExtractHeadersExtra []string `json:"extract_headers_extra,omitempty"` - ExtractHeadersPath string `json:"extract_headers_path,omitempty"` - ExtractHeadersTTL string `json:"extract_headers_ttl,omitempty"` -} - -// getLogPath gets full path of the file where proxy logs should be -// saved. -func (cfg *Config) getLogPath() string { - if filepath.IsAbs(cfg.LogFile) { - return cfg.LogFile - } - - if cfg.BaseDir == "" { - return "" - } - - return filepath.Join(cfg.BaseDir, cfg.LogFile) -} - -// ConfigKey provides a human-friendly identifier for this type of Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default sets the fields of this Config to sensible default values. -func (cfg *Config) Default() error { - proxy := make([]ma.Multiaddr, 0, len(DefaultListenAddrs)) - for _, def := range DefaultListenAddrs { - a, err := ma.NewMultiaddr(def) - if err != nil { - return err - } - proxy = append(proxy, a) - } - node, err := ma.NewMultiaddr(DefaultNodeAddr) - if err != nil { - return err - } - cfg.ListenAddr = proxy - cfg.NodeAddr = node - cfg.LogFile = "" - cfg.ReadTimeout = DefaultReadTimeout - cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout - cfg.WriteTimeout = DefaultWriteTimeout - cfg.IdleTimeout = DefaultIdleTimeout - cfg.ExtractHeadersExtra = nil - cfg.ExtractHeadersPath = DefaultExtractHeadersPath - cfg.ExtractHeadersTTL = DefaultExtractHeadersTTL - cfg.MaxHeaderBytes = DefaultMaxHeaderBytes - - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return err - } - - err = envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have sensible values, -// at least in appearance. -func (cfg *Config) Validate() error { - var err error - if len(cfg.ListenAddr) == 0 { - err = errors.New("ipfsproxy.listen_multiaddress not set") - } - if cfg.NodeAddr == nil { - err = errors.New("ipfsproxy.node_multiaddress not set") - } - - if cfg.ReadTimeout < 0 { - err = errors.New("ipfsproxy.read_timeout is invalid") - } - - if cfg.ReadHeaderTimeout < 0 { - err = errors.New("ipfsproxy.read_header_timeout is invalid") - } - - if cfg.WriteTimeout < 0 { - err = errors.New("ipfsproxy.write_timeout is invalid") - } - - if cfg.IdleTimeout < 0 { - err = errors.New("ipfsproxy.idle_timeout invalid") - } - - if cfg.ExtractHeadersPath == "" { - err = errors.New("ipfsproxy.extract_headers_path should not be empty") - } - - if cfg.ExtractHeadersTTL < 0 { - err = errors.New("ipfsproxy.extract_headers_ttl is invalid") - } - - if cfg.MaxHeaderBytes < minMaxHeaderBytes { - err = fmt.Errorf("ipfsproxy.max_header_size must be greater or equal to %d", minMaxHeaderBytes) - } - - return err -} - -// LoadJSON parses a JSON representation of this Config as generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling ipfsproxy config") - return err - } - - err = cfg.Default() - if err != nil { - return fmt.Errorf("error setting config to default values: %s", err) - } - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - if addresses := jcfg.ListenMultiaddress; len(addresses) > 0 { - cfg.ListenAddr = make([]ma.Multiaddr, 0, len(addresses)) - for _, a := range addresses { - proxyAddr, err := ma.NewMultiaddr(a) - if err != nil { - return fmt.Errorf("error parsing proxy listen_multiaddress: %s", err) - } - cfg.ListenAddr = append(cfg.ListenAddr, proxyAddr) - } - } - if jcfg.NodeMultiaddress != "" { - nodeAddr, err := ma.NewMultiaddr(jcfg.NodeMultiaddress) - if err != nil { - return fmt.Errorf("error parsing ipfs node_multiaddress: %s", err) - } - cfg.NodeAddr = nodeAddr - } - config.SetIfNotDefault(jcfg.NodeHTTPS, &cfg.NodeHTTPS) - - config.SetIfNotDefault(jcfg.LogFile, &cfg.LogFile) - - err := config.ParseDurations( - "ipfsproxy", - &config.DurationOpt{Duration: jcfg.ReadTimeout, Dst: &cfg.ReadTimeout, Name: "read_timeout"}, - &config.DurationOpt{Duration: jcfg.ReadHeaderTimeout, Dst: &cfg.ReadHeaderTimeout, Name: "read_header_timeout"}, - &config.DurationOpt{Duration: jcfg.WriteTimeout, Dst: &cfg.WriteTimeout, Name: "write_timeout"}, - &config.DurationOpt{Duration: jcfg.IdleTimeout, Dst: &cfg.IdleTimeout, Name: "idle_timeout"}, - &config.DurationOpt{Duration: jcfg.ExtractHeadersTTL, Dst: &cfg.ExtractHeadersTTL, Name: "extract_header_ttl"}, - ) - if err != nil { - return err - } - - if jcfg.MaxHeaderBytes == 0 { - cfg.MaxHeaderBytes = DefaultMaxHeaderBytes - } else { - cfg.MaxHeaderBytes = jcfg.MaxHeaderBytes - } - - if extra := jcfg.ExtractHeadersExtra; len(extra) > 0 { - cfg.ExtractHeadersExtra = extra - } - config.SetIfNotDefault(jcfg.ExtractHeadersPath, &cfg.ExtractHeadersPath) - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return - } - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) { - // Multiaddress String() may panic - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%s", r) - } - }() - - jcfg = &jsonConfig{} - - addresses := make([]string, 0, len(cfg.ListenAddr)) - for _, a := range cfg.ListenAddr { - addresses = append(addresses, a.String()) - } - - // Set all configuration fields - jcfg.ListenMultiaddress = addresses - jcfg.NodeMultiaddress = cfg.NodeAddr.String() - jcfg.ReadTimeout = cfg.ReadTimeout.String() - jcfg.ReadHeaderTimeout = cfg.ReadHeaderTimeout.String() - jcfg.WriteTimeout = cfg.WriteTimeout.String() - jcfg.IdleTimeout = cfg.IdleTimeout.String() - jcfg.MaxHeaderBytes = cfg.MaxHeaderBytes - jcfg.NodeHTTPS = cfg.NodeHTTPS - jcfg.LogFile = cfg.LogFile - - jcfg.ExtractHeadersExtra = cfg.ExtractHeadersExtra - if cfg.ExtractHeadersPath != DefaultExtractHeadersPath { - jcfg.ExtractHeadersPath = cfg.ExtractHeadersPath - } - if ttl := cfg.ExtractHeadersTTL; ttl != DefaultExtractHeadersTTL { - jcfg.ExtractHeadersTTL = ttl.String() - } - - return -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return nil, err - } - - return config.DisplayJSON(jcfg) -} diff --git a/packages/networking/ipfs-cluster/api/ipfsproxy/config_test.go b/packages/networking/ipfs-cluster/api/ipfsproxy/config_test.go deleted file mode 100644 index cde925f..0000000 --- a/packages/networking/ipfs-cluster/api/ipfsproxy/config_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package ipfsproxy - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "log_file": "", - "read_timeout": "10m0s", - "read_header_timeout": "5s", - "write_timeout": "10m0s", - "idle_timeout": "1m0s", - "max_header_bytes": 16384, - "extract_headers_extra": [], - "extract_headers_path": "/api/v0/version", - "extract_headers_ttl": "5m" -} -`) - -func TestLoadEmptyJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON([]byte(`{}`)) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.ListenMultiaddress = []string{"abc"} - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding listen_multiaddress") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.NodeMultiaddress = "abc" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error in node_multiaddress") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.ReadTimeout = "-aber" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error in read_timeout") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.ExtractHeadersTTL = "-10" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error in extract_headers_ttl") - } - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.MaxHeaderBytes = minMaxHeaderBytes - 1 - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error in extract_headers_ttl") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.NodeAddr = nil - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ListenAddr = nil - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ReadTimeout = -1 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ReadHeaderTimeout = -2 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.IdleTimeout = -1 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.WriteTimeout = -3 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ExtractHeadersPath = "" - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_IPFSPROXY_IDLETIMEOUT", "22s") - cfg := &Config{} - cfg.Default() - cfg.ApplyEnvVars() - - if cfg.IdleTimeout != 22*time.Second { - t.Error("failed to override idle_timeout with env var") - } -} diff --git a/packages/networking/ipfs-cluster/api/ipfsproxy/headers.go b/packages/networking/ipfs-cluster/api/ipfsproxy/headers.go deleted file mode 100644 index 8b00432..0000000 --- a/packages/networking/ipfs-cluster/api/ipfsproxy/headers.go +++ /dev/null @@ -1,193 +0,0 @@ -package ipfsproxy - -import ( - "fmt" - "net/http" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/version" -) - -// This file has the collection of header-related functions - -// We will extract all these from a pre-flight OPTIONs request to IPFS to -// use in the respose of a hijacked request (usually POST). -var corsHeaders = []string{ - // These two must be returned as IPFS would return them - // for a request with the same origin. - "Access-Control-Allow-Origin", - "Vary", // seems more correctly set in OPTIONS than other requests. - - // This is returned by OPTIONS so we can take it, even if ipfs sets - // it for nothing by default. - "Access-Control-Allow-Credentials", - - // Unfortunately this one should not come with OPTIONS by default, - // but only with the real request itself. - // We use extractHeadersDefault for it, even though I think - // IPFS puts it in OPTIONS responses too. In any case, ipfs - // puts it on all requests as of 0.4.18, so it should be OK. - // "Access-Control-Expose-Headers", - - // Only for preflight responses, we do not need - // these since we will simply proxy OPTIONS requests and not - // handle them. - // - // They are here for reference about other CORS related headers. - // "Access-Control-Max-Age", - // "Access-Control-Allow-Methods", - // "Access-Control-Allow-Headers", -} - -// This can be used to hardcode header extraction from the proxy if we ever -// need to. It is appended to config.ExtractHeaderExtra. -// Maybe "X-Ipfs-Gateway" is a good candidate. -var extractHeadersDefault = []string{ - "Access-Control-Expose-Headers", -} - -const ipfsHeadersTimestampKey = "proxyHeadersTS" - -// ipfsHeaders returns all the headers we want to extract-once from IPFS: a -// concatenation of extractHeadersDefault and config.ExtractHeadersExtra. -func (proxy *Server) ipfsHeaders() []string { - return append(extractHeadersDefault, proxy.config.ExtractHeadersExtra...) -} - -// rememberIPFSHeaders extracts headers and stores them for re-use with -// setIPFSHeaders. -func (proxy *Server) rememberIPFSHeaders(hdrs http.Header) { - for _, h := range proxy.ipfsHeaders() { - proxy.ipfsHeadersStore.Store(h, hdrs[h]) - } - // use the sync map to store the ts - proxy.ipfsHeadersStore.Store(ipfsHeadersTimestampKey, time.Now()) -} - -// returns whether we can consider that whatever headers we are -// storing have a valid TTL still. -func (proxy *Server) headersWithinTTL() bool { - ttl := proxy.config.ExtractHeadersTTL - if ttl == 0 { - return true - } - - tsRaw, ok := proxy.ipfsHeadersStore.Load(ipfsHeadersTimestampKey) - if !ok { - return false - } - - ts, ok := tsRaw.(time.Time) - if !ok { - return false - } - - lifespan := time.Since(ts) - return lifespan < ttl -} - -// setIPFSHeaders adds the known IPFS Headers to the destination -// and returns true if we could set all the headers in the list and -// the TTL has not expired. -// False is used to determine if we need to make a request to try -// to extract these headers. -func (proxy *Server) setIPFSHeaders(dest http.Header) bool { - r := true - - if !proxy.headersWithinTTL() { - r = false - // still set those headers we can set in the destination. - // We do our best there, since maybe the ipfs daemon - // is down and what we have now is all we can use. - } - - for _, h := range proxy.ipfsHeaders() { - v, ok := proxy.ipfsHeadersStore.Load(h) - if !ok { - r = false - continue - } - dest[h] = v.([]string) - } - return r -} - -// copyHeadersFromIPFSWithRequest makes a request to IPFS as used by the proxy -// and copies the given list of hdrs from the response to the dest http.Header -// object. -func (proxy *Server) copyHeadersFromIPFSWithRequest( - hdrs []string, - dest http.Header, req *http.Request, -) error { - res, err := proxy.ipfsRoundTripper.RoundTrip(req) - if err != nil { - logger.Error("error making request for header extraction to ipfs: ", err) - return err - } - - for _, h := range hdrs { - dest[h] = res.Header[h] - } - return nil -} - -// setHeaders sets some headers for all hijacked endpoints: -// - First, we fix CORs headers by making an OPTIONS request to IPFS with the -// same Origin. Our objective is to get headers for non-preflight requests -// only (the ones we hijack). -// - Second, we add any of the one-time-extracted headers that we deem necessary -// or the user needs from IPFS (in case of custom headers). -// This may trigger a single POST request to ExtractHeaderPath if they -// were not extracted before or TTL has expired. -// - Third, we set our own headers. -func (proxy *Server) setHeaders(dest http.Header, srcRequest *http.Request) { - proxy.setCORSHeaders(dest, srcRequest) - proxy.setAdditionalIpfsHeaders(dest, srcRequest) - proxy.setClusterProxyHeaders(dest, srcRequest) -} - -// see setHeaders -func (proxy *Server) setCORSHeaders(dest http.Header, srcRequest *http.Request) { - // Fix CORS headers by making an OPTIONS request - - // The request URL only has a valid Path(). See http.Request docs. - srcURL := fmt.Sprintf("%s%s", proxy.nodeAddr, srcRequest.URL.Path) - req, err := http.NewRequest(http.MethodOptions, srcURL, nil) - if err != nil { // this should really not happen. - logger.Error(err) - return - } - - req.Header["Origin"] = srcRequest.Header["Origin"] - req.Header.Set("Access-Control-Request-Method", srcRequest.Method) - // error is logged. We proceed if request failed. - proxy.copyHeadersFromIPFSWithRequest(corsHeaders, dest, req) -} - -// see setHeaders -func (proxy *Server) setAdditionalIpfsHeaders(dest http.Header, srcRequest *http.Request) { - // Avoid re-requesting these if we have them - if ok := proxy.setIPFSHeaders(dest); ok { - return - } - - srcURL := fmt.Sprintf("%s%s", proxy.nodeAddr, proxy.config.ExtractHeadersPath) - req, err := http.NewRequest(http.MethodPost, srcURL, nil) - if err != nil { - logger.Error("error extracting additional headers from ipfs", err) - return - } - // error is logged. We proceed if request failed. - proxy.copyHeadersFromIPFSWithRequest( - proxy.ipfsHeaders(), - dest, - req, - ) - proxy.rememberIPFSHeaders(dest) -} - -// see setHeaders -func (proxy *Server) setClusterProxyHeaders(dest http.Header, srcRequest *http.Request) { - dest.Set("Content-Type", "application/json") - dest.Set("Server", fmt.Sprintf("ipfs-cluster/ipfsproxy/%s", version.Version)) -} diff --git a/packages/networking/ipfs-cluster/api/ipfsproxy/ipfsproxy.go b/packages/networking/ipfs-cluster/api/ipfsproxy/ipfsproxy.go deleted file mode 100644 index 3667358..0000000 --- a/packages/networking/ipfs-cluster/api/ipfsproxy/ipfsproxy.go +++ /dev/null @@ -1,819 +0,0 @@ -// Package ipfsproxy implements the Cluster API interface by providing an -// IPFS HTTP interface as exposed by the go-ipfs daemon. -// -// In this API, select endpoints like pin*, add*, and repo* endpoints are used -// to instead perform cluster operations. Requests for any other endpoints are -// passed to the underlying IPFS daemon. -package ipfsproxy - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/adder/adderutils" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/rpcutil" - "github.com/tv42/httpunix" - - handlers "github.com/gorilla/handlers" - mux "github.com/gorilla/mux" - cid "github.com/ipfs/go-cid" - cmd "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log/v2" - path "github.com/ipfs/go-path" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - "github.com/multiformats/go-multiaddr" - madns "github.com/multiformats/go-multiaddr-dns" - manet "github.com/multiformats/go-multiaddr/net" - - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/trace" -) - -// DNSTimeout is used when resolving DNS multiaddresses in this module -var DNSTimeout = 5 * time.Second - -var ( - logger = logging.Logger("ipfsproxy") - proxyLogger = logging.Logger("ipfsproxylog") -) - -// Server offers an IPFS API, hijacking some interesting requests -// and forwarding the rest to the ipfs daemon -// it proxies HTTP requests to the configured IPFS -// daemon. It is able to intercept these requests though, and -// perform extra operations on them. -type Server struct { - ctx context.Context - cancel func() - - config *Config - nodeScheme string - nodeAddr string - - rpcClient *rpc.Client - rpcReady chan struct{} - - listeners []net.Listener // proxy listener - server *http.Server // proxy server - ipfsRoundTripper http.RoundTripper // allows to talk to IPFS - - ipfsHeadersStore sync.Map - - shutdownLock sync.Mutex - shutdown bool - wg sync.WaitGroup -} - -type ipfsPinType struct { - Type string -} - -type ipfsPinLsResp struct { - Keys map[string]ipfsPinType -} - -type ipfsPinOpResp struct { - Pins []string -} - -// From https://github.com/ipfs/go-ipfs/blob/master/core/coreunix/add.go#L49 -type ipfsAddResp struct { - Name string - Hash string `json:",omitempty"` - Bytes int64 `json:",omitempty"` - Size string `json:",omitempty"` -} - -type logWriter struct { -} - -func (lw logWriter) Write(b []byte) (int, error) { - proxyLogger.Infof(string(b)) - return len(b), nil -} - -// New returns and ipfs Proxy component -func New(cfg *Config) (*Server, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - nodeMAddr := cfg.NodeAddr - // dns multiaddresses need to be resolved first - if madns.Matches(nodeMAddr) { - ctx, cancel := context.WithTimeout(context.Background(), DNSTimeout) - defer cancel() - resolvedAddrs, err := madns.Resolve(ctx, cfg.NodeAddr) - if err != nil { - logger.Error(err) - return nil, err - } - nodeMAddr = resolvedAddrs[0] - } - - _, nodeAddr, err := manet.DialArgs(nodeMAddr) - if err != nil { - return nil, err - } - - nodeScheme := "http" - if cfg.NodeHTTPS { - nodeScheme = "https" - } - - isUnixSocket := false - var unixTransport *httpunix.Transport - if unixSocketPath, err := nodeMAddr.ValueForProtocol(multiaddr.P_UNIX); err == nil { - unixTransport = &httpunix.Transport{} - unixTransport.RegisterLocation("ipfsproxyunix", unixSocketPath) - nodeAddr = "ipfsproxyunix" - - nodeScheme = nodeScheme + "+unix" - isUnixSocket = true - } - - var listeners []net.Listener - for _, addr := range cfg.ListenAddr { - proxyNet, proxyAddr, err := manet.DialArgs(addr) - if err != nil { - return nil, err - } - - l, err := net.Listen(proxyNet, proxyAddr) - if err != nil { - return nil, err - } - listeners = append(listeners, l) - } - - nodeHTTPAddr := fmt.Sprintf("%s://%s", nodeScheme, nodeAddr) - proxyURL, err := url.Parse(nodeHTTPAddr) - if err != nil { - return nil, err - } - - var handler http.Handler - router := mux.NewRouter() - handler = router - - if cfg.Tracing { - handler = &ochttp.Handler{ - IsPublicEndpoint: true, - Propagation: &tracecontext.HTTPFormat{}, - Handler: router, - StartOptions: trace.StartOptions{SpanKind: trace.SpanKindServer}, - FormatSpanName: func(req *http.Request) string { - return "proxy:" + req.Host + ":" + req.URL.Path + ":" + req.Method - }, - } - } - - var writer io.Writer - if cfg.LogFile != "" { - f, err := os.OpenFile(cfg.getLogPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - writer = f - } else { - writer = logWriter{} - } - - s := &http.Server{ - ReadTimeout: cfg.ReadTimeout, - WriteTimeout: cfg.WriteTimeout, - ReadHeaderTimeout: cfg.ReadHeaderTimeout, - IdleTimeout: cfg.IdleTimeout, - Handler: handlers.LoggingHandler(writer, handler), - MaxHeaderBytes: cfg.MaxHeaderBytes, - } - - // See: https://github.com/ipfs/go-ipfs/issues/5168 - // See: https://github.com/ipfs-cluster/ipfs-cluster/issues/548 - // on why this is re-enabled. - s.SetKeepAlivesEnabled(true) // A reminder that this can be changed - - reverseProxy := httputil.NewSingleHostReverseProxy(proxyURL) - if isUnixSocket { - t := &http.Transport{} - t.RegisterProtocol(httpunix.Scheme, unixTransport) - reverseProxy.Transport = t - } else { - reverseProxy.Transport = http.DefaultTransport - } - ctx, cancel := context.WithCancel(context.Background()) - proxy := &Server{ - ctx: ctx, - config: cfg, - cancel: cancel, - nodeAddr: nodeHTTPAddr, - nodeScheme: nodeScheme, - rpcReady: make(chan struct{}, 1), - listeners: listeners, - server: s, - ipfsRoundTripper: reverseProxy.Transport, - } - - // Ideally, we should only intercept POST requests, but - // people may be calling the API with GET or worse, PUT - // because IPFS has been allowing this traditionally. - // The main idea here is that we do not intercept - // OPTIONS requests (or HEAD). - hijackSubrouter := router. - Methods(http.MethodPost, http.MethodGet, http.MethodPut). - PathPrefix("/api/v0"). - Subrouter() - - // Add hijacked routes - hijackSubrouter. - Path("/pin/add/{arg}"). - HandlerFunc(slashHandler(proxy.pinHandler)). - Name("PinAddSlash") // supports people using the API wrong. - hijackSubrouter. - Path("/pin/add"). - HandlerFunc(proxy.pinHandler). - Name("PinAdd") - hijackSubrouter. - Path("/pin/rm/{arg}"). - HandlerFunc(slashHandler(proxy.unpinHandler)). - Name("PinRmSlash") // supports people using the API wrong. - hijackSubrouter. - Path("/pin/rm"). - HandlerFunc(proxy.unpinHandler). - Name("PinRm") - hijackSubrouter. - Path("/pin/ls/{arg}"). - HandlerFunc(slashHandler(proxy.pinLsHandler)). - Name("PinLsSlash") // supports people using the API wrong. - hijackSubrouter. - Path("/pin/ls"). - HandlerFunc(proxy.pinLsHandler). - Name("PinLs") - hijackSubrouter. - Path("/pin/update"). - HandlerFunc(proxy.pinUpdateHandler). - Name("PinUpdate") - hijackSubrouter. - Path("/add"). - HandlerFunc(proxy.addHandler). - Name("Add") - hijackSubrouter. - Path("/repo/stat"). - HandlerFunc(proxy.repoStatHandler). - Name("RepoStat") - hijackSubrouter. - Path("/repo/gc"). - HandlerFunc(proxy.repoGCHandler). - Name("RepoGC") - - // Everything else goes to the IPFS daemon. - router.PathPrefix("/").Handler(reverseProxy) - - go proxy.run() - return proxy, nil -} - -// SetClient makes the component ready to perform RPC -// requests. -func (proxy *Server) SetClient(c *rpc.Client) { - proxy.rpcClient = c - proxy.rpcReady <- struct{}{} -} - -// Shutdown stops any listeners and stops the component from taking -// any requests. -func (proxy *Server) Shutdown(ctx context.Context) error { - proxy.shutdownLock.Lock() - defer proxy.shutdownLock.Unlock() - - if proxy.shutdown { - logger.Debug("already shutdown") - return nil - } - - logger.Info("stopping IPFS Proxy") - - proxy.cancel() - close(proxy.rpcReady) - proxy.server.SetKeepAlivesEnabled(false) - for _, l := range proxy.listeners { - l.Close() - } - - proxy.wg.Wait() - proxy.shutdown = true - return nil -} - -// launches proxy when we receive the rpcReady signal. -func (proxy *Server) run() { - <-proxy.rpcReady - - // Do not shutdown while launching threads - // -- prevents race conditions with proxy.wg. - proxy.shutdownLock.Lock() - defer proxy.shutdownLock.Unlock() - - // This launches the proxy - proxy.wg.Add(len(proxy.listeners)) - for _, l := range proxy.listeners { - go func(l net.Listener) { - defer proxy.wg.Done() - - maddr, err := manet.FromNetAddr(l.Addr()) - if err != nil { - logger.Error(err) - } - - logger.Infof( - "IPFS Proxy: %s -> %s", - maddr, - proxy.config.NodeAddr, - ) - err = proxy.server.Serve(l) // hangs here - if err != nil && !strings.Contains(err.Error(), "closed network connection") { - logger.Error(err) - } - }(l) - } -} - -// ipfsErrorResponder writes an http error response just like IPFS would. -func ipfsErrorResponder(w http.ResponseWriter, errMsg string, code int) { - res := cmd.Errorf(cmd.ErrNormal, errMsg) - - resBytes, _ := json.Marshal(res) - if code > 0 { - w.WriteHeader(code) - } else { - w.WriteHeader(http.StatusInternalServerError) - } - w.Write(resBytes) -} - -func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Request) { - proxy.setHeaders(w.Header(), r) - - q := r.URL.Query() - arg := q.Get("arg") - p, err := path.ParsePath(arg) - if err != nil { - ipfsErrorResponder(w, "Error parsing IPFS Path: "+err.Error(), -1) - return - } - - pinPath := api.PinPath{Path: p.String()} - pinPath.Mode = api.PinModeFromString(q.Get("type")) - - var pin api.Pin - err = proxy.rpcClient.Call( - "", - "Cluster", - op, - pinPath, - &pin, - ) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - - res := ipfsPinOpResp{ - Pins: []string{pin.Cid.String()}, - } - resBytes, _ := json.Marshal(res) - w.WriteHeader(http.StatusOK) - w.Write(resBytes) -} - -func (proxy *Server) pinHandler(w http.ResponseWriter, r *http.Request) { - proxy.pinOpHandler("PinPath", w, r) -} - -func (proxy *Server) unpinHandler(w http.ResponseWriter, r *http.Request) { - proxy.pinOpHandler("UnpinPath", w, r) -} - -func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) { - proxy.setHeaders(w.Header(), r) - - arg := r.URL.Query().Get("arg") - - stream := false - streamArg := r.URL.Query().Get("stream") - streamArg2 := r.URL.Query().Get("s") - if streamArg == "true" || streamArg2 == "true" { - stream = true - } - - if arg != "" { - c, err := api.DecodeCid(arg) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - var pin api.Pin - err = proxy.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "PinGet", - c, - &pin, - ) - if err != nil { - ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg), -1) - return - } - if stream { - ipinfo := api.IPFSPinInfo{ - Cid: api.Cid(pin.Cid), - Type: pin.Mode.ToIPFSPinStatus(), - } - resBytes, _ := json.Marshal(ipinfo) - w.WriteHeader(http.StatusOK) - w.Write(resBytes) - } else { - pinLs := ipfsPinLsResp{} - pinLs.Keys = make(map[string]ipfsPinType) - pinLs.Keys[pin.Cid.String()] = ipfsPinType{ - Type: "recursive", - } - resBytes, _ := json.Marshal(pinLs) - w.WriteHeader(http.StatusOK) - w.Write(resBytes) - } - } else { - in := make(chan struct{}) - close(in) - - pins := make(chan api.Pin) - var err error - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - err = proxy.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "Pins", - in, - pins, - ) - }() - - if stream { - w.Header().Set("Trailer", "X-Stream-Error") - w.WriteHeader(http.StatusOK) - for pin := range pins { - ipinfo := api.IPFSPinInfo{ - Cid: api.Cid(pin.Cid), - Type: pin.Mode.ToIPFSPinStatus(), - } - resBytes, _ := json.Marshal(ipinfo) - w.Write(resBytes) - } - wg.Wait() - if err != nil { - w.Header().Add("X-Stream-Error", err.Error()) - return - } - } else { - pinLs := ipfsPinLsResp{} - pinLs.Keys = make(map[string]ipfsPinType) - - for pin := range pins { - pinLs.Keys[pin.Cid.String()] = ipfsPinType{ - Type: "recursive", - } - } - - wg.Wait() - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - resBytes, _ := json.Marshal(pinLs) - w.WriteHeader(http.StatusOK) - w.Write(resBytes) - } - } -} - -func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) { - ctx, span := trace.StartSpan(r.Context(), "ipfsproxy/pinUpdateHandler") - defer span.End() - - proxy.setHeaders(w.Header(), r) - - // Check that we have enough arguments and mimic ipfs response when not - q := r.URL.Query() - args := q["arg"] - if len(args) == 0 { - ipfsErrorResponder(w, "argument \"from-path\" is required", http.StatusBadRequest) - return - } - if len(args) == 1 { - ipfsErrorResponder(w, "argument \"to-path\" is required", http.StatusBadRequest) - return - } - - unpin := !(q.Get("unpin") == "false") - from := args[0] - to := args[1] - - // Parse paths (we will need to resolve them) - pFrom, err := path.ParsePath(from) - if err != nil { - ipfsErrorResponder(w, "error parsing \"from-path\" argument: "+err.Error(), -1) - return - } - - pTo, err := path.ParsePath(to) - if err != nil { - ipfsErrorResponder(w, "error parsing \"to-path\" argument: "+err.Error(), -1) - return - } - - // Resolve the FROM argument - var fromCid api.Cid - err = proxy.rpcClient.CallContext( - ctx, - "", - "IPFSConnector", - "Resolve", - pFrom.String(), - &fromCid, - ) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - - // Do a PinPath setting PinUpdate - pinPath := api.PinPath{Path: pTo.String()} - pinPath.PinUpdate = fromCid - - var pin api.Pin - err = proxy.rpcClient.Call( - "", - "Cluster", - "PinPath", - pinPath, - &pin, - ) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - - // If unpin != "false", unpin the FROM argument - // (it was already resolved). - var pinObj api.Pin - if unpin { - err = proxy.rpcClient.CallContext( - ctx, - "", - "Cluster", - "Unpin", - api.PinCid(fromCid), - &pinObj, - ) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - } - - res := ipfsPinOpResp{ - Pins: []string{fromCid.String(), pin.Cid.String()}, - } - resBytes, _ := json.Marshal(res) - w.WriteHeader(http.StatusOK) - w.Write(resBytes) -} - -func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) { - proxy.setHeaders(w.Header(), r) - - reader, err := r.MultipartReader() - if err != nil { - ipfsErrorResponder(w, "error reading request: "+err.Error(), -1) - return - } - - q := r.URL.Query() - if q.Get("only-hash") == "true" { - ipfsErrorResponder(w, "only-hash is not supported when adding to cluster", -1) - } - - // Luckily, most IPFS add query params are compatible with cluster's - // /add params. We can parse most of them directly from the query. - params, err := api.AddParamsFromQuery(q) - if err != nil { - ipfsErrorResponder(w, "error parsing options:"+err.Error(), -1) - return - } - trickle := q.Get("trickle") - if trickle == "true" { - params.Layout = "trickle" - } - nopin := q.Get("pin") == "false" - if nopin { - params.NoPin = true - } - - logger.Warnf("Proxy/add does not support all IPFS params. Current options: %+v", params) - - outputTransform := func(in api.AddedOutput) interface{} { - cidStr := "" - if in.Cid.Defined() { - cidStr = in.Cid.String() - } - r := &ipfsAddResp{ - Name: in.Name, - Hash: cidStr, - Bytes: int64(in.Bytes), - } - if in.Size != 0 { - r.Size = strconv.FormatUint(in.Size, 10) - } - return r - } - - _, err = adderutils.AddMultipartHTTPHandler( - proxy.ctx, - proxy.rpcClient, - params, - reader, - w, - outputTransform, - ) - if err != nil { - logger.Error(err) - } -} - -func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) { - proxy.setHeaders(w.Header(), r) - - peers := make([]peer.ID, 0) - err := proxy.rpcClient.Call( - "", - "Consensus", - "Peers", - struct{}{}, - &peers, - ) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - - ctxs, cancels := rpcutil.CtxsWithCancel(proxy.ctx, len(peers)) - defer rpcutil.MultiCancel(cancels) - - repoStats := make([]*api.IPFSRepoStat, len(peers)) - repoStatsIfaces := make([]interface{}, len(repoStats)) - for i := range repoStats { - repoStats[i] = &api.IPFSRepoStat{} - repoStatsIfaces[i] = repoStats[i] - } - - errs := proxy.rpcClient.MultiCall( - ctxs, - peers, - "IPFSConnector", - "RepoStat", - struct{}{}, - repoStatsIfaces, - ) - - totalStats := api.IPFSRepoStat{} - - for i, err := range errs { - if err != nil { - if rpc.IsAuthorizationError(err) { - logger.Debug(err) - continue - } - logger.Errorf("%s repo/stat errored: %s", peers[i], err) - continue - } - totalStats.RepoSize += repoStats[i].RepoSize - totalStats.StorageMax += repoStats[i].StorageMax - } - - resBytes, _ := json.Marshal(totalStats) - w.WriteHeader(http.StatusOK) - w.Write(resBytes) -} - -type ipfsRepoGCResp struct { - Key cid.Cid `json:",omitempty"` - Error string `json:",omitempty"` -} - -func (proxy *Server) repoGCHandler(w http.ResponseWriter, r *http.Request) { - queryValues := r.URL.Query() - streamErrors := queryValues.Get("stream-errors") == "true" - // ignoring `quiet` since it only affects text output - - proxy.setHeaders(w.Header(), r) - - w.Header().Set("Trailer", "X-Stream-Error") - var repoGC api.GlobalRepoGC - err := proxy.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "RepoGC", - struct{}{}, - &repoGC, - ) - if err != nil { - ipfsErrorResponder(w, err.Error(), -1) - return - } - - w.WriteHeader(http.StatusOK) - enc := json.NewEncoder(w) - var ipfsRepoGC ipfsRepoGCResp - mError := multiError{} - for _, gc := range repoGC.PeerMap { - for _, key := range gc.Keys { - if streamErrors { - ipfsRepoGC = ipfsRepoGCResp{Key: key.Key.Cid, Error: key.Error} - } else { - ipfsRepoGC = ipfsRepoGCResp{Key: key.Key.Cid} - if key.Error != "" { - mError.add(key.Error) - } - } - - // Cluster tags start with small letter, but IPFS tags with capital letter. - if err := enc.Encode(ipfsRepoGC); err != nil { - logger.Error(err) - } - } - } - - mErrStr := mError.Error() - if !streamErrors && mErrStr != "" { - w.Header().Set("X-Stream-Error", mErrStr) - } -} - -// slashHandler returns a handler which converts a /a/b/c/ request -// into an /a/b/c/?arg= one. And uses the given origHandler -// for it. Our handlers expect that arguments are passed in the ?arg query -// value. -func slashHandler(origHandler http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - warnMsg := "You are using an undocumented form of the IPFS API. " - warnMsg += "Consider passing your command arguments" - warnMsg += "with the '?arg=' query parameter" - logger.Error(warnMsg) - - vars := mux.Vars(r) - arg := vars["arg"] - - // IF we needed to modify the request path, we could do - // something along these lines. This is not the case - // at the moment. We just need to set the query argument. - // - // route := mux.CurrentRoute(r) - // path, err := route.GetPathTemplate() - // if err != nil { - // // I'd like to panic, but I don' want to kill a full - // // peer just because of a buggy use. - // logger.Critical("BUG: wrong use of slashHandler") - // origHandler(w, r) // proceed as nothing - // return - // } - // fixedPath := strings.TrimSuffix(path, "/{arg}") - // r.URL.Path = url.PathEscape(fixedPath) - // r.URL.RawPath = fixedPath - - q := r.URL.Query() - q.Set("arg", arg) - r.URL.RawQuery = q.Encode() - origHandler(w, r) - } -} diff --git a/packages/networking/ipfs-cluster/api/ipfsproxy/ipfsproxy_test.go b/packages/networking/ipfs-cluster/api/ipfsproxy/ipfsproxy_test.go deleted file mode 100644 index df7e81f..0000000 --- a/packages/networking/ipfs-cluster/api/ipfsproxy/ipfsproxy_test.go +++ /dev/null @@ -1,898 +0,0 @@ -package ipfsproxy - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - cmd "github.com/ipfs/go-ipfs-cmds" - logging "github.com/ipfs/go-log/v2" - ma "github.com/multiformats/go-multiaddr" -) - -func init() { - _ = logging.Logger -} - -func testIPFSProxyWithConfig(t *testing.T, cfg *Config) (*Server, *test.IpfsMock) { - mock := test.NewIpfsMock(t) - nodeMAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", - mock.Addr, mock.Port)) - proxyMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - - cfg.NodeAddr = nodeMAddr - cfg.ListenAddr = []ma.Multiaddr{proxyMAddr} - cfg.ExtractHeadersExtra = []string{ - test.IpfsCustomHeaderName, - test.IpfsTimeHeaderName, - } - - proxy, err := New(cfg) - if err != nil { - t.Fatal("creating an IPFSProxy should work: ", err) - } - - proxy.server.SetKeepAlivesEnabled(false) - proxy.SetClient(test.NewMockRPCClient(t)) - return proxy, mock -} - -func testIPFSProxy(t *testing.T) (*Server, *test.IpfsMock) { - cfg := &Config{} - cfg.Default() - return testIPFSProxyWithConfig(t, cfg) -} - -func TestIPFSProxyVersion(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - res, err := http.Post(fmt.Sprintf("%s/version", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - defer res.Body.Close() - resBytes, _ := io.ReadAll(res.Body) - if res.StatusCode != http.StatusOK { - t.Error("the request should have succeeded") - t.Fatal(string(resBytes)) - } - - var resp struct { - Version string - } - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - - if resp.Version != "m.o.c.k" { - t.Error("wrong version") - } -} - -func TestIPFSProxyPin(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - type args struct { - urlPath string - testCid string - statusCode int - } - tests := []struct { - name string - args args - want api.Cid - wantErr bool - }{ - { - "pin good cid query arg", - args{ - "/pin/add?arg=", - test.Cid1.String(), - http.StatusOK, - }, - test.Cid1, - false, - }, - { - "pin good path query arg", - args{ - "/pin/add?arg=", - test.PathIPFS2, - http.StatusOK, - }, - test.CidResolved, - false, - }, - { - "pin good cid url arg", - args{ - "/pin/add/", - test.Cid1.String(), - http.StatusOK, - }, - test.Cid1, - false, - }, - { - "pin bad cid query arg", - args{ - "/pin/add?arg=", - test.ErrorCid.String(), - http.StatusInternalServerError, - }, - api.CidUndef, - true, - }, - { - "pin bad cid url arg", - args{ - "/pin/add/", - test.ErrorCid.String(), - http.StatusInternalServerError, - }, - api.CidUndef, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - u := fmt.Sprintf( - "%s%s%s", - proxyURL(proxy), - tt.args.urlPath, - tt.args.testCid, - ) - res, err := http.Post(u, "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res.Body.Close() - - if res.StatusCode != tt.args.statusCode { - t.Errorf("statusCode: got = %v, want %v", res.StatusCode, tt.args.statusCode) - } - - resBytes, _ := io.ReadAll(res.Body) - - switch tt.wantErr { - case false: - var resp ipfsPinOpResp - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - - if len(resp.Pins) != 1 { - t.Fatalf("wrong number of pins: got = %d, want %d", len(resp.Pins), 1) - } - - if resp.Pins[0] != tt.want.String() { - t.Errorf("wrong pin cid: got = %s, want = %s", resp.Pins[0], tt.want) - } - case true: - var respErr cmd.Error - err = json.Unmarshal(resBytes, &respErr) - if err != nil { - t.Fatal(err) - } - - if respErr.Message != test.ErrBadCid.Error() { - t.Errorf("wrong response: got = %s, want = %s", respErr.Message, test.ErrBadCid.Error()) - } - } - }) - } -} - -func TestIPFSProxyUnpin(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - type args struct { - urlPath string - testCid string - statusCode int - } - tests := []struct { - name string - args args - want api.Cid - wantErr bool - }{ - { - "unpin good cid query arg", - args{ - "/pin/rm?arg=", - test.Cid1.String(), - http.StatusOK, - }, - test.Cid1, - false, - }, - { - "unpin good path query arg", - args{ - "/pin/rm?arg=", - test.PathIPFS2, - http.StatusOK, - }, - test.CidResolved, - false, - }, - { - "unpin good cid url arg", - args{ - "/pin/rm/", - test.Cid1.String(), - http.StatusOK, - }, - test.Cid1, - false, - }, - { - "unpin bad cid query arg", - args{ - "/pin/rm?arg=", - test.ErrorCid.String(), - http.StatusInternalServerError, - }, - api.CidUndef, - true, - }, - { - "unpin bad cid url arg", - args{ - "/pin/rm/", - test.ErrorCid.String(), - http.StatusInternalServerError, - }, - api.CidUndef, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - u := fmt.Sprintf("%s%s%s", proxyURL(proxy), tt.args.urlPath, tt.args.testCid) - res, err := http.Post(u, "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res.Body.Close() - - if res.StatusCode != tt.args.statusCode { - t.Errorf("statusCode: got = %v, want %v", res.StatusCode, tt.args.statusCode) - } - - resBytes, _ := io.ReadAll(res.Body) - - switch tt.wantErr { - case false: - var resp ipfsPinOpResp - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - - if len(resp.Pins) != 1 { - t.Fatalf("wrong number of pins: got = %d, want %d", len(resp.Pins), 1) - } - - if resp.Pins[0] != tt.want.String() { - t.Errorf("wrong pin cid: got = %s, want = %s", resp.Pins[0], tt.want) - } - case true: - var respErr cmd.Error - err = json.Unmarshal(resBytes, &respErr) - if err != nil { - t.Fatal(err) - } - - if respErr.Message != test.ErrBadCid.Error() { - t.Errorf("wrong response: got = %s, want = %s", respErr.Message, test.ErrBadCid.Error()) - } - } - }) - } -} - -func TestIPFSProxyPinUpdate(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - t.Run("pin/update bad args", func(t *testing.T) { - res, err := http.Post(fmt.Sprintf("%s/pin/update", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal("request should complete: ", err) - } - - defer res.Body.Close() - if res.StatusCode != http.StatusBadRequest { - t.Error("request should not be successful with a no arguments") - } - - res2, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s", proxyURL(proxy), test.PathIPFS1), "", nil) - if err != nil { - t.Fatal("request should complete: ", err) - } - - defer res2.Body.Close() - if res2.StatusCode != http.StatusBadRequest { - t.Error("request should not be successful with a single argument") - } - }) - - t.Run("pin/update", func(t *testing.T) { - res, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s&arg=%s", proxyURL(proxy), test.PathIPFS1, test.PathIPFS2), "", nil) - if err != nil { - t.Fatal("request should complete: ", err) - } - - defer res.Body.Close() - - var resp ipfsPinOpResp - resBytes, _ := io.ReadAll(res.Body) - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - if len(resp.Pins) != 2 || - resp.Pins[0] != test.Cid2.String() || - resp.Pins[1] != test.CidResolved.String() { // always resolve to the same - t.Errorf("bad response: %s", string(resBytes)) - } - }) - - t.Run("pin/update check unpin happens", func(t *testing.T) { - // passing an errorCid to unpin should return an error - // when unpinning. - - res, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s&arg=%s", proxyURL(proxy), test.ErrorCid, test.PathIPFS2), "", nil) - if err != nil { - t.Fatal("request should complete: ", err) - } - - defer res.Body.Close() - if res.StatusCode != http.StatusInternalServerError { - t.Fatal("request should error") - } - - resBytes, _ := io.ReadAll(res.Body) - var respErr cmd.Error - err = json.Unmarshal(resBytes, &respErr) - if err != nil { - t.Fatal(err) - } - - if respErr.Message != test.ErrBadCid.Error() { - t.Error("expected a bad cid error:", respErr.Message) - } - }) - - t.Run("pin/update check pin happens", func(t *testing.T) { - // passing an errorCid to pin, with unpin=false should return - // an error when pinning - - res, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s&arg=%s&unpin=false", proxyURL(proxy), test.Cid1, test.ErrorCid), "", nil) - if err != nil { - t.Fatal("request should complete: ", err) - } - - defer res.Body.Close() - if res.StatusCode != http.StatusInternalServerError { - t.Fatal("request should error") - } - - resBytes, _ := io.ReadAll(res.Body) - var respErr cmd.Error - err = json.Unmarshal(resBytes, &respErr) - if err != nil { - t.Fatal(err) - } - - if respErr.Message != test.ErrBadCid.Error() { - t.Error("expected a bad cid error:", respErr.Message) - } - }) -} - -func TestIPFSProxyPinLs(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - t.Run("pin/ls query arg", func(t *testing.T) { - res, err := http.Post(fmt.Sprintf("%s/pin/ls?arg=%s", proxyURL(proxy), test.Cid1), "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - t.Error("the request should have succeeded") - } - - resBytes, _ := io.ReadAll(res.Body) - var resp ipfsPinLsResp - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - - _, ok := resp.Keys[test.Cid1.String()] - if len(resp.Keys) != 1 || !ok { - t.Error("wrong response") - } - }) - - t.Run("pin/ls url arg", func(t *testing.T) { - res, err := http.Post(fmt.Sprintf("%s/pin/ls/%s", proxyURL(proxy), test.Cid1), "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - t.Error("the request should have succeeded") - } - - resBytes, _ := io.ReadAll(res.Body) - var resp ipfsPinLsResp - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - - _, ok := resp.Keys[test.Cid1.String()] - if len(resp.Keys) != 1 || !ok { - t.Error("wrong response") - } - }) - - t.Run("pin/ls all no arg", func(t *testing.T) { - res2, err := http.Post(fmt.Sprintf("%s/pin/ls", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res2.Body.Close() - if res2.StatusCode != http.StatusOK { - t.Error("the request should have succeeded") - } - - resBytes, _ := io.ReadAll(res2.Body) - var resp ipfsPinLsResp - err = json.Unmarshal(resBytes, &resp) - if err != nil { - t.Fatal(err) - } - - if len(resp.Keys) != 3 { - t.Error("wrong response") - } - }) - - t.Run("pin/ls bad cid query arg", func(t *testing.T) { - res3, err := http.Post(fmt.Sprintf("%s/pin/ls?arg=%s", proxyURL(proxy), test.ErrorCid), "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res3.Body.Close() - if res3.StatusCode != http.StatusInternalServerError { - t.Error("the request should have failed") - } - }) -} - -func TestProxyRepoStat(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - res, err := http.Post(fmt.Sprintf("%s/repo/stat", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal(err) - } - - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - t.Error("request should have succeeded") - } - - resBytes, _ := io.ReadAll(res.Body) - var stat api.IPFSRepoStat - err = json.Unmarshal(resBytes, &stat) - if err != nil { - t.Fatal(err) - } - - // The mockRPC returns 3 peers. Since no host is set, - // all calls are local. - if stat.RepoSize != 6000 || stat.StorageMax != 300000 { - t.Errorf("expected different stats: %+v", stat) - } - -} - -func TestProxyRepoGC(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - type testcase struct { - name string - streamErrors bool - } - - testcases := []testcase{ - { - name: "With streaming errors", - streamErrors: true, - }, - { - name: "Without streaming errors", - streamErrors: false, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - res1, err := http.Post(fmt.Sprintf("%s/repo/gc?stream-errors=%t", proxyURL(proxy), tc.streamErrors), "", nil) - if err != nil { - t.Fatal(err) - } - defer res1.Body.Close() - if res1.StatusCode != http.StatusOK { - t.Error("request should have succeeded") - } - - var repoGC []ipfsRepoGCResp - dec := json.NewDecoder(res1.Body) - for { - resp := ipfsRepoGCResp{} - - if err := dec.Decode(&resp); err != nil { - if err == io.EOF { - break - } - t.Error(err) - } - - repoGC = append(repoGC, resp) - } - - if !repoGC[0].Key.Equals(test.Cid1.Cid) { - t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC[0].Key) - } - - xStreamError, ok := res1.Trailer["X-Stream-Error"] - if !ok { - t.Error("trailer header X-Stream-Error not set") - } - if tc.streamErrors { - if repoGC[4].Error != test.ErrLinkNotFound.Error() { - t.Error("expected a different error") - } - if len(xStreamError) != 0 { - t.Error("expected X-Stream-Error header to be empty") - } - } else { - if repoGC[4].Error != "" { - t.Error("did not expect to stream error") - } - - if len(xStreamError) == 0 || xStreamError[0] != (test.ErrLinkNotFound.Error()+";") { - t.Error("expected X-Stream-Error header with link not found error") - } - } - }) - } -} - -func TestProxyAdd(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - type testcase struct { - query string - expectedCid string - } - - testcases := []testcase{ - { - query: "", - expectedCid: test.ShardingDirBalancedRootCID, - }, - { - query: "progress=true", - expectedCid: test.ShardingDirBalancedRootCID, - }, - { - query: "wrap-with-directory=true", - expectedCid: test.ShardingDirBalancedRootCIDWrapped, - }, - { - query: "trickle=true", - expectedCid: test.ShardingDirTrickleRootCID, - }, - } - - reqs := make([]*http.Request, len(testcases)) - - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - for i, tc := range testcases { - mr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - cType := "multipart/form-data; boundary=" + mr.Boundary() - url := fmt.Sprintf("%s/add?"+tc.query, proxyURL(proxy)) - req, _ := http.NewRequest("POST", url, mr) - req.Header.Set("Content-Type", cType) - reqs[i] = req - } - - for i, tc := range testcases { - t.Run(tc.query, func(t *testing.T) { - res, err := http.DefaultClient.Do(reqs[i]) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - t.Fatalf("Bad response status: got = %d, want = %d", res.StatusCode, http.StatusOK) - } - - var resp ipfsAddResp - dec := json.NewDecoder(res.Body) - for dec.More() { - err := dec.Decode(&resp) - if err != nil { - t.Fatal(err) - } - } - - if resp.Hash != tc.expectedCid { - t.Logf("%+v", resp.Hash) - t.Error("expected CID does not match") - } - }) - } -} - -func TestProxyAddError(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - res, err := http.Post(fmt.Sprintf("%s/add?recursive=true", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal(err) - } - res.Body.Close() - if res.StatusCode != http.StatusInternalServerError { - t.Errorf("wrong status code: got = %d, want = %d", res.StatusCode, http.StatusInternalServerError) - } -} - -func TestProxyError(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - defer proxy.Shutdown(ctx) - - res, err := http.Post(fmt.Sprintf("%s/bad/command", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal("should have succeeded: ", err) - } - defer res.Body.Close() - if res.StatusCode != 404 { - t.Error("should have respected the status code") - } -} - -func proxyURL(c *Server) string { - addr := c.listeners[0].Addr() - return fmt.Sprintf("http://%s/api/v0", addr.String()) -} - -func TestIPFSProxy(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - defer mock.Close() - if err := proxy.Shutdown(ctx); err != nil { - t.Error("expected a clean shutdown") - } - if err := proxy.Shutdown(ctx); err != nil { - t.Error("expected a second clean shutdown") - } -} - -func TestHeaderExtraction(t *testing.T) { - ctx := context.Background() - proxy, mock := testIPFSProxy(t) - proxy.config.ExtractHeadersTTL = time.Second - defer mock.Close() - defer proxy.Shutdown(ctx) - - req, err := http.NewRequest("POST", fmt.Sprintf("%s/pin/ls", proxyURL(proxy)), nil) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Origin", test.IpfsACAOrigin) - - res, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - res.Body.Close() - - for k, v := range res.Header { - t.Logf("%s: %s", k, v) - } - - if h := res.Header.Get("Access-Control-Allow-Origin"); h != test.IpfsACAOrigin { - t.Error("We did not find out the AC-Allow-Origin header: ", h) - } - - for _, h := range corsHeaders { - if v := res.Header.Get(h); v == "" { - t.Error("We did not set CORS header: ", h) - } - } - - if res.Header.Get(test.IpfsCustomHeaderName) != test.IpfsCustomHeaderValue { - t.Error("the proxy should have extracted custom headers from ipfs") - } - - if !strings.HasPrefix(res.Header.Get("Server"), "ipfs-cluster") { - t.Error("wrong value for Server header") - } - - // Test ExtractHeaderTTL - t1 := res.Header.Get(test.IpfsTimeHeaderName) - res, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - t2 := res.Header.Get(test.IpfsTimeHeaderName) - if t1 != t2 { - t.Error("should have cached the headers during TTL") - } - time.Sleep(1200 * time.Millisecond) - res, err = http.DefaultClient.Do(req) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - res.Body.Close() - t3 := res.Header.Get(test.IpfsTimeHeaderName) - if t3 == t2 { - t.Error("should have refreshed the headers after TTL") - } -} - -func TestAttackHeaderSize(t *testing.T) { - const testHeaderSize = minMaxHeaderBytes * 4 - ctx := context.Background() - cfg := &Config{} - cfg.Default() - cfg.MaxHeaderBytes = testHeaderSize - proxy, mock := testIPFSProxyWithConfig(t, cfg) - defer mock.Close() - defer proxy.Shutdown(ctx) - - type testcase struct { - headerSize int - expectedStatus int - } - testcases := []testcase{ - {testHeaderSize / 2, http.StatusNotFound}, - {testHeaderSize * 2, http.StatusRequestHeaderFieldsTooLarge}, - } - - req, err := http.NewRequest("POST", fmt.Sprintf("%s/foo", proxyURL(proxy)), nil) - if err != nil { - t.Fatal(err) - } - for _, tc := range testcases { - for size := 0; size < tc.headerSize; size += 8 { - req.Header.Add("Foo", "bar") - } - res, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - res.Body.Close() - if res.StatusCode != tc.expectedStatus { - t.Errorf("proxy returned unexpected status %d, expected status code was %d", - res.StatusCode, tc.expectedStatus) - } - } -} - -func TestProxyLogging(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - - logFile, err := filepath.Abs("proxy.log") - if err != nil { - t.Fatal(err) - } - cfg.LogFile = logFile - - proxy, mock := testIPFSProxyWithConfig(t, cfg) - defer os.Remove(cfg.LogFile) - - info, err := os.Stat(cfg.LogFile) - if err != nil { - t.Fatal(err) - } - if info.Size() > 0 { - t.Errorf("expected empty log file") - } - - res, err := http.Post(fmt.Sprintf("%s/version", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - res.Body.Close() - - info, err = os.Stat(cfg.LogFile) - if err != nil { - t.Fatal(err) - } - size1 := info.Size() - if size1 == 0 { - t.Error("did not expect an empty log file") - } - - // Restart proxy and make sure that logs are being appended - mock.Close() - proxy.Shutdown(ctx) - - proxy, mock = testIPFSProxyWithConfig(t, cfg) - defer mock.Close() - defer proxy.Shutdown(ctx) - - res1, err := http.Post(fmt.Sprintf("%s/version", proxyURL(proxy)), "", nil) - if err != nil { - t.Fatal("should forward requests to ipfs host: ", err) - } - res1.Body.Close() - - info, err = os.Stat(cfg.LogFile) - if err != nil { - t.Fatal(err) - } - size2 := info.Size() - if size2 == 0 { - t.Error("did not expect an empty log file") - } - - if !(size2 > size1) { - t.Error("logs were not appended") - } - -} diff --git a/packages/networking/ipfs-cluster/api/ipfsproxy/util.go b/packages/networking/ipfs-cluster/api/ipfsproxy/util.go deleted file mode 100644 index fe29a2c..0000000 --- a/packages/networking/ipfs-cluster/api/ipfsproxy/util.go +++ /dev/null @@ -1,19 +0,0 @@ -package ipfsproxy - -import ( - "strings" -) - -// MultiError contains the results of multiple errors. -type multiError struct { - err strings.Builder -} - -func (e *multiError) add(err string) { - e.err.WriteString(err) - e.err.WriteString("; ") -} - -func (e *multiError) Error() string { - return e.err.String() -} diff --git a/packages/networking/ipfs-cluster/api/pb/generate.go b/packages/networking/ipfs-cluster/api/pb/generate.go deleted file mode 100644 index 4b1b2b9..0000000 --- a/packages/networking/ipfs-cluster/api/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package pb provides protobuf definitions for serialized types in Cluster. -//go:generate protoc -I=. --go_out=. types.proto -package pb diff --git a/packages/networking/ipfs-cluster/api/pb/types.pb.go b/packages/networking/ipfs-cluster/api/pb/types.pb.go deleted file mode 100644 index b35a3d9..0000000 --- a/packages/networking/ipfs-cluster/api/pb/types.pb.go +++ /dev/null @@ -1,495 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.27.1 -// protoc v3.19.2 -// source: types.proto - -package pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Pin_PinType int32 - -const ( - Pin_BadType Pin_PinType = 0 // 1 << iota - Pin_DataType Pin_PinType = 1 // 2 << iota - Pin_MetaType Pin_PinType = 2 - Pin_ClusterDAGType Pin_PinType = 3 - Pin_ShardType Pin_PinType = 4 -) - -// Enum value maps for Pin_PinType. -var ( - Pin_PinType_name = map[int32]string{ - 0: "BadType", - 1: "DataType", - 2: "MetaType", - 3: "ClusterDAGType", - 4: "ShardType", - } - Pin_PinType_value = map[string]int32{ - "BadType": 0, - "DataType": 1, - "MetaType": 2, - "ClusterDAGType": 3, - "ShardType": 4, - } -) - -func (x Pin_PinType) Enum() *Pin_PinType { - p := new(Pin_PinType) - *p = x - return p -} - -func (x Pin_PinType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Pin_PinType) Descriptor() protoreflect.EnumDescriptor { - return file_types_proto_enumTypes[0].Descriptor() -} - -func (Pin_PinType) Type() protoreflect.EnumType { - return &file_types_proto_enumTypes[0] -} - -func (x Pin_PinType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use Pin_PinType.Descriptor instead. -func (Pin_PinType) EnumDescriptor() ([]byte, []int) { - return file_types_proto_rawDescGZIP(), []int{0, 0} -} - -type Pin struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cid []byte `protobuf:"bytes,1,opt,name=Cid,proto3" json:"Cid,omitempty"` - Type Pin_PinType `protobuf:"varint,2,opt,name=Type,proto3,enum=api.pb.Pin_PinType" json:"Type,omitempty"` - Allocations [][]byte `protobuf:"bytes,3,rep,name=Allocations,proto3" json:"Allocations,omitempty"` - MaxDepth int32 `protobuf:"zigzag32,4,opt,name=MaxDepth,proto3" json:"MaxDepth,omitempty"` - Reference []byte `protobuf:"bytes,5,opt,name=Reference,proto3" json:"Reference,omitempty"` - Options *PinOptions `protobuf:"bytes,6,opt,name=Options,proto3" json:"Options,omitempty"` - Timestamp uint64 `protobuf:"varint,7,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` -} - -func (x *Pin) Reset() { - *x = Pin{} - if protoimpl.UnsafeEnabled { - mi := &file_types_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Pin) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Pin) ProtoMessage() {} - -func (x *Pin) ProtoReflect() protoreflect.Message { - mi := &file_types_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Pin.ProtoReflect.Descriptor instead. -func (*Pin) Descriptor() ([]byte, []int) { - return file_types_proto_rawDescGZIP(), []int{0} -} - -func (x *Pin) GetCid() []byte { - if x != nil { - return x.Cid - } - return nil -} - -func (x *Pin) GetType() Pin_PinType { - if x != nil { - return x.Type - } - return Pin_BadType -} - -func (x *Pin) GetAllocations() [][]byte { - if x != nil { - return x.Allocations - } - return nil -} - -func (x *Pin) GetMaxDepth() int32 { - if x != nil { - return x.MaxDepth - } - return 0 -} - -func (x *Pin) GetReference() []byte { - if x != nil { - return x.Reference - } - return nil -} - -func (x *Pin) GetOptions() *PinOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *Pin) GetTimestamp() uint64 { - if x != nil { - return x.Timestamp - } - return 0 -} - -type PinOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReplicationFactorMin int32 `protobuf:"zigzag32,1,opt,name=ReplicationFactorMin,proto3" json:"ReplicationFactorMin,omitempty"` - ReplicationFactorMax int32 `protobuf:"zigzag32,2,opt,name=ReplicationFactorMax,proto3" json:"ReplicationFactorMax,omitempty"` - Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"` - ShardSize uint64 `protobuf:"varint,4,opt,name=ShardSize,proto3" json:"ShardSize,omitempty"` - // Deprecated: Do not use. - Metadata map[string]string `protobuf:"bytes,6,rep,name=Metadata,proto3" json:"Metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - PinUpdate []byte `protobuf:"bytes,7,opt,name=PinUpdate,proto3" json:"PinUpdate,omitempty"` - ExpireAt uint64 `protobuf:"varint,8,opt,name=ExpireAt,proto3" json:"ExpireAt,omitempty"` - Origins [][]byte `protobuf:"bytes,9,rep,name=Origins,proto3" json:"Origins,omitempty"` - SortedMetadata []*Metadata `protobuf:"bytes,10,rep,name=SortedMetadata,proto3" json:"SortedMetadata,omitempty"` -} - -func (x *PinOptions) Reset() { - *x = PinOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_types_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PinOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PinOptions) ProtoMessage() {} - -func (x *PinOptions) ProtoReflect() protoreflect.Message { - mi := &file_types_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PinOptions.ProtoReflect.Descriptor instead. -func (*PinOptions) Descriptor() ([]byte, []int) { - return file_types_proto_rawDescGZIP(), []int{1} -} - -func (x *PinOptions) GetReplicationFactorMin() int32 { - if x != nil { - return x.ReplicationFactorMin - } - return 0 -} - -func (x *PinOptions) GetReplicationFactorMax() int32 { - if x != nil { - return x.ReplicationFactorMax - } - return 0 -} - -func (x *PinOptions) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *PinOptions) GetShardSize() uint64 { - if x != nil { - return x.ShardSize - } - return 0 -} - -// Deprecated: Do not use. -func (x *PinOptions) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - -func (x *PinOptions) GetPinUpdate() []byte { - if x != nil { - return x.PinUpdate - } - return nil -} - -func (x *PinOptions) GetExpireAt() uint64 { - if x != nil { - return x.ExpireAt - } - return 0 -} - -func (x *PinOptions) GetOrigins() [][]byte { - if x != nil { - return x.Origins - } - return nil -} - -func (x *PinOptions) GetSortedMetadata() []*Metadata { - if x != nil { - return x.SortedMetadata - } - return nil -} - -type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` -} - -func (x *Metadata) Reset() { - *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_types_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Metadata) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Metadata) ProtoMessage() {} - -func (x *Metadata) ProtoReflect() protoreflect.Message { - mi := &file_types_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. -func (*Metadata) Descriptor() ([]byte, []int) { - return file_types_proto_rawDescGZIP(), []int{2} -} - -func (x *Metadata) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *Metadata) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -var File_types_proto protoreflect.FileDescriptor - -var file_types_proto_rawDesc = []byte{ - 0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x62, 0x22, 0xbf, 0x02, 0x0a, 0x03, 0x50, 0x69, 0x6e, 0x12, 0x10, 0x0a, - 0x03, 0x43, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x43, 0x69, 0x64, 0x12, - 0x27, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x6c, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x41, - 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x4d, 0x61, - 0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x08, 0x4d, 0x61, - 0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, - 0x69, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x22, 0x55, 0x0a, 0x07, 0x50, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x42, - 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, - 0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x54, 0x79, - 0x70, 0x65, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x44, - 0x41, 0x47, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x22, 0xb9, 0x03, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x11, 0x52, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, - 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x11, 0x52, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x12, - 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x40, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x69, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x69, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x4f, - 0x72, 0x69, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x0e, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x0e, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x32, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_types_proto_rawDescOnce sync.Once - file_types_proto_rawDescData = file_types_proto_rawDesc -) - -func file_types_proto_rawDescGZIP() []byte { - file_types_proto_rawDescOnce.Do(func() { - file_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_types_proto_rawDescData) - }) - return file_types_proto_rawDescData -} - -var file_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_types_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_types_proto_goTypes = []interface{}{ - (Pin_PinType)(0), // 0: api.pb.Pin.PinType - (*Pin)(nil), // 1: api.pb.Pin - (*PinOptions)(nil), // 2: api.pb.PinOptions - (*Metadata)(nil), // 3: api.pb.Metadata - nil, // 4: api.pb.PinOptions.MetadataEntry -} -var file_types_proto_depIdxs = []int32{ - 0, // 0: api.pb.Pin.Type:type_name -> api.pb.Pin.PinType - 2, // 1: api.pb.Pin.Options:type_name -> api.pb.PinOptions - 4, // 2: api.pb.PinOptions.Metadata:type_name -> api.pb.PinOptions.MetadataEntry - 3, // 3: api.pb.PinOptions.SortedMetadata:type_name -> api.pb.Metadata - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_types_proto_init() } -func file_types_proto_init() { - if File_types_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Pin); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PinOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_types_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_types_proto_goTypes, - DependencyIndexes: file_types_proto_depIdxs, - EnumInfos: file_types_proto_enumTypes, - MessageInfos: file_types_proto_msgTypes, - }.Build() - File_types_proto = out.File - file_types_proto_rawDesc = nil - file_types_proto_goTypes = nil - file_types_proto_depIdxs = nil -} diff --git a/packages/networking/ipfs-cluster/api/pb/types.proto b/packages/networking/ipfs-cluster/api/pb/types.proto deleted file mode 100644 index ec39438..0000000 --- a/packages/networking/ipfs-cluster/api/pb/types.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; -package api.pb; - -option go_package=".;pb"; - -message Pin { - enum PinType { - BadType = 0; // 1 << iota - DataType = 1; // 2 << iota - MetaType = 2; - ClusterDAGType = 3; - ShardType = 4; - } - - bytes Cid = 1; - PinType Type = 2; - repeated bytes Allocations = 3; - sint32 MaxDepth = 4; - bytes Reference = 5; - PinOptions Options = 6; - uint64 Timestamp = 7; -} - -message PinOptions { - sint32 ReplicationFactorMin = 1; - sint32 ReplicationFactorMax = 2; - string Name = 3; - uint64 ShardSize = 4; - reserved 5; // reserved for UserAllocations - map Metadata = 6 [deprecated = true]; - bytes PinUpdate = 7; - uint64 ExpireAt = 8; - repeated bytes Origins = 9; - repeated Metadata SortedMetadata = 10; -} - -message Metadata { - string Key = 1; - string Value = 2; -} \ No newline at end of file diff --git a/packages/networking/ipfs-cluster/api/pinsvcapi/config.go b/packages/networking/ipfs-cluster/api/pinsvcapi/config.go deleted file mode 100644 index 077c4bd..0000000 --- a/packages/networking/ipfs-cluster/api/pinsvcapi/config.go +++ /dev/null @@ -1,131 +0,0 @@ -package pinsvcapi - -import ( - "net/http" - "time" - - ma "github.com/multiformats/go-multiaddr" - - "github.com/ipfs-cluster/ipfs-cluster/api/common" - "github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi/pinsvc" -) - -const configKey = "pinsvcapi" -const envConfigKey = "cluster_pinsvcapi" - -const minMaxHeaderBytes = 4096 - -// Default values for Config. -const ( - DefaultReadTimeout = 0 - DefaultReadHeaderTimeout = 5 * time.Second - DefaultWriteTimeout = 0 - DefaultIdleTimeout = 120 * time.Second - DefaultMaxHeaderBytes = minMaxHeaderBytes -) - -// Default values for Config. -var ( - // DefaultHTTPListenAddrs contains default listen addresses for the HTTP API. - DefaultHTTPListenAddrs = []string{"/ip4/127.0.0.1/tcp/9097"} - DefaultHeaders = map[string][]string{} -) - -// CORS defaults. -var ( - DefaultCORSAllowedOrigins = []string{"*"} - DefaultCORSAllowedMethods = []string{ - http.MethodGet, - } - // rs/cors this will set sensible defaults when empty: - // {"Origin", "Accept", "Content-Type", "X-Requested-With"} - DefaultCORSAllowedHeaders = []string{} - DefaultCORSExposedHeaders = []string{ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length", - } - DefaultCORSAllowCredentials = true - DefaultCORSMaxAge time.Duration // 0. Means always. -) - -// Config fully implements the config.ComponentConfig interface. Use -// NewConfig() to instantiate. Config embeds a common.Config object. -type Config struct { - common.Config -} - -// NewConfig creates a Config object setting the necessary meta-fields in the -// common.Config embedded object. -func NewConfig() *Config { - cfg := Config{} - cfg.Config.ConfigKey = configKey - cfg.EnvConfigKey = envConfigKey - cfg.Logger = logger - cfg.RequestLogger = apiLogger - cfg.DefaultFunc = defaultFunc - cfg.APIErrorFunc = func(err error, status int) error { - return pinsvc.APIError{ - Details: pinsvc.APIErrorDetails{ - Reason: err.Error(), - }, - } - } - return &cfg -} - -// ConfigKey returns a human-friendly identifier for this type of -// Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with working values. -func (cfg *Config) Default() error { - return defaultFunc(&cfg.Config) -} - -// Sets all defaults for this config. -func defaultFunc(cfg *common.Config) error { - // http - addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs)) - for _, def := range DefaultHTTPListenAddrs { - httpListen, err := ma.NewMultiaddr(def) - if err != nil { - return err - } - addrs = append(addrs, httpListen) - } - cfg.HTTPListenAddr = addrs - cfg.PathSSLCertFile = "" - cfg.PathSSLKeyFile = "" - cfg.ReadTimeout = DefaultReadTimeout - cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout - cfg.WriteTimeout = DefaultWriteTimeout - cfg.IdleTimeout = DefaultIdleTimeout - cfg.MaxHeaderBytes = DefaultMaxHeaderBytes - - // libp2p - cfg.ID = "" - cfg.PrivateKey = nil - cfg.Libp2pListenAddr = nil - - // Auth - cfg.BasicAuthCredentials = nil - - // Logs - cfg.HTTPLogFile = "" - - // Headers - cfg.Headers = DefaultHeaders - - cfg.CORSAllowedOrigins = DefaultCORSAllowedOrigins - cfg.CORSAllowedMethods = DefaultCORSAllowedMethods - cfg.CORSAllowedHeaders = DefaultCORSAllowedHeaders - cfg.CORSExposedHeaders = DefaultCORSExposedHeaders - cfg.CORSAllowCredentials = DefaultCORSAllowCredentials - cfg.CORSMaxAge = DefaultCORSMaxAge - - return nil -} diff --git a/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvc/pinsvc.go b/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvc/pinsvc.go deleted file mode 100644 index c71d503..0000000 --- a/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvc/pinsvc.go +++ /dev/null @@ -1,313 +0,0 @@ -// Package pinsvc contains type definitions for the Pinning Services API -package pinsvc - -import ( - "encoding/json" - "errors" - "fmt" - "net/url" - "strconv" - "strings" - "time" - - types "github.com/ipfs-cluster/ipfs-cluster/api" -) - -func init() { - // intialize trackerStatusString - stringStatus = make(map[string]Status) - for k, v := range statusString { - stringStatus[v] = k - } -} - -// APIError is returned by the API as a body when an error -// occurs. It implements the error interface. -type APIError struct { - Details APIErrorDetails `json:"error"` -} - -// APIErrorDetails contains details about the APIError. -type APIErrorDetails struct { - Reason string `json:"reason"` - Details string `json:"details,omitempty"` -} - -func (apiErr APIError) Error() string { - return apiErr.Details.Reason -} - -// PinName is a string limited to 255 chars when serializing JSON. -type PinName string - -// MarshalJSON converts the string to JSON. -func (pname PinName) MarshalJSON() ([]byte, error) { - return json.Marshal(string(pname)) -} - -// UnmarshalJSON reads the JSON string and errors if over 256 chars. -func (pname *PinName) UnmarshalJSON(data []byte) error { - if len(data) > 257 { // "a_string" 255 + 2 for quotes - return errors.New("pin name is over 255 chars") - } - var v string - err := json.Unmarshal(data, &v) - *pname = PinName(v) - return err -} - -// Pin contains basic information about a Pin and pinning options. -type Pin struct { - Cid types.Cid `json:"cid"` - Name PinName `json:"name,omitempty"` - Origins []types.Multiaddr `json:"origins,omitempty"` - Meta map[string]string `json:"meta,omitempty"` -} - -// Defined returns if the pinis empty (Cid not set). -func (p Pin) Defined() bool { - return p.Cid.Defined() -} - -// MatchesName returns in a pin status matches a name option with a given -// match strategy. -func (p Pin) MatchesName(nameOpt string, strategy MatchingStrategy) bool { - if nameOpt == "" { - return true - } - name := string(p.Name) - - switch strategy { - case MatchingStrategyUndefined: - return true - - case MatchingStrategyExact: - return nameOpt == name - case MatchingStrategyIexact: - return strings.EqualFold(name, nameOpt) - case MatchingStrategyPartial: - return strings.Contains(name, nameOpt) - case MatchingStrategyIpartial: - return strings.Contains(strings.ToLower(name), strings.ToLower(nameOpt)) - default: - return true - } -} - -// MatchesMeta returns true if the pin status metadata matches the given. The -// metadata should have all the keys in the given metaOpts and the values -// should, be the same (metadata map includes metaOpts). -func (p Pin) MatchesMeta(metaOpts map[string]string) bool { - for k, v := range metaOpts { - if p.Meta[k] != v { - return false - } - } - return true -} - -// Status represents a pin status, which defines the current state of the pin -// in the system. -type Status int - -// Values for the Status type. -const ( - StatusUndefined Status = 0 - StatusQueued = 1 << iota - StatusPinned - StatusPinning - StatusFailed -) - -var statusString = map[Status]string{ - StatusUndefined: "undefined", - StatusQueued: "queued", - StatusPinned: "pinned", - StatusPinning: "pinning", - StatusFailed: "failed", -} - -// values autofilled in init() -var stringStatus map[string]Status - -// String converts a Status into a readable string. -// If the given Status is a filter (with several -// bits set), it will return a comma-separated list. -func (st Status) String() string { - var values []string - - // simple and known composite values - if v, ok := statusString[st]; ok { - return v - } - - // other filters - for k, v := range statusString { - if st&k > 0 { - values = append(values, v) - } - } - - return strings.Join(values, ",") -} - -// Match returns true if the tracker status matches the given filter. -func (st Status) Match(filter Status) bool { - return filter == StatusUndefined || - st == StatusUndefined || - st&filter > 0 -} - -// MarshalJSON uses the string representation of Status for JSON -// encoding. -func (st Status) MarshalJSON() ([]byte, error) { - return json.Marshal(st.String()) -} - -// UnmarshalJSON sets a tracker status from its JSON representation. -func (st *Status) UnmarshalJSON(data []byte) error { - var v string - err := json.Unmarshal(data, &v) - if err != nil { - return err - } - *st = StatusFromString(v) - return nil -} - -// StatusFromString parses a string and returns the matching -// Status value. The string can be a comma-separated list -// representing a Status filter. Unknown status names are -// ignored. -func StatusFromString(str string) Status { - values := strings.Split(strings.Replace(str, " ", "", -1), ",") - status := StatusUndefined - for _, v := range values { - st, ok := stringStatus[v] - if ok { - status |= st - } - } - return status -} - -// MatchingStrategy defines a type of match for filtering pin lists. -type MatchingStrategy int - -// Values for MatchingStrategy. -const ( - MatchingStrategyUndefined MatchingStrategy = iota - MatchingStrategyExact - MatchingStrategyIexact - MatchingStrategyPartial - MatchingStrategyIpartial -) - -// MatchingStrategyFromString converts a string to its MatchingStrategy value. -func MatchingStrategyFromString(str string) MatchingStrategy { - switch str { - case "exact": - return MatchingStrategyExact - case "iexact": - return MatchingStrategyIexact - case "partial": - return MatchingStrategyPartial - case "ipartial": - return MatchingStrategyIpartial - default: - return MatchingStrategyUndefined - } -} - -// PinStatus provides information about a Pin stored by the Pinning API. -type PinStatus struct { - RequestID string `json:"requestid"` - Status Status `json:"status"` - Created time.Time `json:"created"` - Pin Pin `json:"pin"` - Delegates []types.Multiaddr `json:"delegates"` - Info map[string]string `json:"info,omitempty"` -} - -// PinList is the result of a call to List pins -type PinList struct { - Count uint64 `json:"count"` - Results []PinStatus `json:"results"` -} - -// ListOptions represents possible options given to the List endpoint. -type ListOptions struct { - Cids []types.Cid - Name string - MatchingStrategy MatchingStrategy - Status Status - Before time.Time - After time.Time - Limit uint64 - Meta map[string]string -} - -// FromQuery parses ListOptions from url.Values. -func (lo *ListOptions) FromQuery(q url.Values) error { - cidq := q.Get("cid") - if len(cidq) > 0 { - for _, cstr := range strings.Split(cidq, ",") { - c, err := types.DecodeCid(cstr) - if err != nil { - return fmt.Errorf("error decoding cid %s: %w", cstr, err) - } - lo.Cids = append(lo.Cids, c) - } - } - - n := q.Get("name") - if len(n) > 255 { - return fmt.Errorf("error in 'name' query param: longer than 255 chars") - } - lo.Name = n - - lo.MatchingStrategy = MatchingStrategyFromString(q.Get("match")) - if lo.MatchingStrategy == MatchingStrategyUndefined { - lo.MatchingStrategy = MatchingStrategyExact // default - } - statusStr := q.Get("status") - lo.Status = StatusFromString(statusStr) - // FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a - // valid "pinned" filter. - if statusStr != "" && lo.Status == StatusUndefined { - return fmt.Errorf("error decoding 'status' query param: no valid filter") - } - - if bef := q.Get("before"); bef != "" { - err := lo.Before.UnmarshalText([]byte(bef)) - if err != nil { - return fmt.Errorf("error decoding 'before' query param: %s: %w", bef, err) - } - } - - if after := q.Get("after"); after != "" { - err := lo.After.UnmarshalText([]byte(after)) - if err != nil { - return fmt.Errorf("error decoding 'after' query param: %s: %w", after, err) - } - } - - if v := q.Get("limit"); v != "" { - lim, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return fmt.Errorf("error parsing 'limit' query param: %s: %w", v, err) - } - lo.Limit = lim - } else { - lo.Limit = 10 // implicit default - } - - if meta := q.Get("meta"); meta != "" { - err := json.Unmarshal([]byte(meta), &lo.Meta) - if err != nil { - return fmt.Errorf("error unmarshalling 'meta' query param: %s: %w", meta, err) - } - } - - return nil -} diff --git a/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvcapi.go b/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvcapi.go deleted file mode 100644 index 127d167..0000000 --- a/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvcapi.go +++ /dev/null @@ -1,477 +0,0 @@ -// Package pinsvcapi implements an IPFS Cluster API component which provides -// an IPFS Pinning Services API to the cluster. -// -// The implented API is based on the common.API component (refer to module -// description there). The only thing this module does is to provide route -// handling for the otherwise common API component. -package pinsvcapi - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "sync" - - "github.com/gorilla/mux" - types "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/common" - "github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi/pinsvc" - "github.com/ipfs-cluster/ipfs-cluster/state" - "go.uber.org/multierr" - - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var ( - logger = logging.Logger("pinsvcapi") - apiLogger = logging.Logger("pinsvcapilog") -) - -var apiInfo map[string]string = map[string]string{ - "source": "IPFS cluster API", - "warning1": "CID used for requestID. Conflicts possible", - "warning2": "experimental", -} - -func trackerStatusToSvcStatus(st types.TrackerStatus) pinsvc.Status { - switch { - case st.Match(types.TrackerStatusError): - return pinsvc.StatusFailed - case st.Match(types.TrackerStatusPinQueued): - return pinsvc.StatusQueued - case st.Match(types.TrackerStatusPinning): - return pinsvc.StatusPinning - case st.Match(types.TrackerStatusPinned): - return pinsvc.StatusPinned - default: - return pinsvc.StatusUndefined - } -} - -func svcStatusToTrackerStatus(st pinsvc.Status) types.TrackerStatus { - var tst types.TrackerStatus - - if st.Match(pinsvc.StatusFailed) { - tst |= types.TrackerStatusError - } - if st.Match(pinsvc.StatusQueued) { - tst |= types.TrackerStatusPinQueued - } - if st.Match(pinsvc.StatusPinned) { - tst |= types.TrackerStatusPinned - } - if st.Match(pinsvc.StatusPinning) { - tst |= types.TrackerStatusPinning - } - return tst -} - -func svcPinToClusterPin(p pinsvc.Pin) (types.Pin, error) { - opts := types.PinOptions{ - Name: string(p.Name), - Origins: p.Origins, - Metadata: p.Meta, - Mode: types.PinModeRecursive, - } - return types.PinWithOpts(p.Cid, opts), nil -} - -func globalPinInfoToSvcPinStatus( - rID string, - gpi types.GlobalPinInfo, -) pinsvc.PinStatus { - - status := pinsvc.PinStatus{ - RequestID: rID, - } - - var statusMask types.TrackerStatus - for _, pinfo := range gpi.PeerMap { - statusMask |= pinfo.Status - } - - status.Status = trackerStatusToSvcStatus(statusMask) - status.Created = gpi.Created - status.Pin = pinsvc.Pin{ - Cid: gpi.Cid, - Name: pinsvc.PinName(gpi.Name), - Origins: gpi.Origins, - Meta: gpi.Metadata, - } - - status.Info = apiInfo - - status.Delegates = []types.Multiaddr{} - for _, pi := range gpi.PeerMap { - status.Delegates = append(status.Delegates, pi.IPFSAddresses...) - } - - return status -} - -// API implements the REST API Component. -// It embeds a common.API. -type API struct { - *common.API - - rpcClient *rpc.Client - config *Config -} - -// NewAPI creates a new REST API component. -func NewAPI(ctx context.Context, cfg *Config) (*API, error) { - return NewAPIWithHost(ctx, cfg, nil) -} - -// NewAPIWithHost creates a new REST API component using the given libp2p Host. -func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host) (*API, error) { - api := API{ - config: cfg, - } - capi, err := common.NewAPIWithHost(ctx, &cfg.Config, h, api.routes) - api.API = capi - return &api, err -} - -// Routes returns endpoints supported by this API. -func (api *API) routes(c *rpc.Client) []common.Route { - api.rpcClient = c - return []common.Route{ - { - Name: "ListPins", - Method: "GET", - Pattern: "/pins", - HandlerFunc: api.listPins, - }, - { - Name: "AddPin", - Method: "POST", - Pattern: "/pins", - HandlerFunc: api.addPin, - }, - { - Name: "GetPin", - Method: "GET", - Pattern: "/pins/{requestID}", - HandlerFunc: api.getPin, - }, - { - Name: "ReplacePin", - Method: "POST", - Pattern: "/pins/{requestID}", - HandlerFunc: api.addPin, - }, - { - Name: "RemovePin", - Method: "DELETE", - Pattern: "/pins/{requestID}", - HandlerFunc: api.removePin, - }, - { - Name: "GetToken", - Method: "POST", - Pattern: "/token", - HandlerFunc: api.GenerateTokenHandler, - }, - } -} - -func (api *API) parseBodyOrFail(w http.ResponseWriter, r *http.Request) pinsvc.Pin { - dec := json.NewDecoder(r.Body) - defer r.Body.Close() - - var pin pinsvc.Pin - err := dec.Decode(&pin) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, fmt.Errorf("error decoding request body: %w", err), nil) - return pinsvc.Pin{} - } - return pin -} - -func (api *API) parseRequestIDOrFail(w http.ResponseWriter, r *http.Request) (types.Cid, bool) { - vars := mux.Vars(r) - cStr, ok := vars["requestID"] - if !ok { - return types.CidUndef, true - } - c, err := types.DecodeCid(cStr) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding requestID: "+err.Error()), nil) - return c, false - } - return c, true -} - -func (api *API) addPin(w http.ResponseWriter, r *http.Request) { - if pin := api.parseBodyOrFail(w, r); pin.Defined() { - api.config.Logger.Debugf("addPin: %s", pin.Cid) - clusterPin, err := svcPinToClusterPin(pin) - if err != nil { - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - return - } - - if updateCid, ok := api.parseRequestIDOrFail(w, r); updateCid.Defined() && ok { - clusterPin.PinUpdate = updateCid - } - - // Pin item - var pinObj types.Pin - err = api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Pin", - clusterPin, - &pinObj, - ) - if err != nil { - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - return - } - - // Unpin old item - if clusterPin.PinUpdate.Defined() { - var oldPin types.Pin - err = api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Unpin", - types.PinCid(clusterPin.PinUpdate), - &oldPin, - ) - if err != nil { - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - return - } - } - - status := api.pinToSvcPinStatus(r.Context(), pin.Cid.String(), pinObj) - api.SendResponse(w, common.SetStatusAutomatically, nil, status) - } -} - -func (api *API) getPinSvcStatus(ctx context.Context, c types.Cid) (pinsvc.PinStatus, error) { - var pinInfo types.GlobalPinInfo - - err := api.rpcClient.CallContext( - ctx, - "", - "Cluster", - "Status", - c, - &pinInfo, - ) - if err != nil { - return pinsvc.PinStatus{}, err - } - return globalPinInfoToSvcPinStatus(c.String(), pinInfo), nil - -} - -func (api *API) getPin(w http.ResponseWriter, r *http.Request) { - c, ok := api.parseRequestIDOrFail(w, r) - if !ok { - return - } - api.config.Logger.Debugf("getPin: %s", c) - status, err := api.getPinSvcStatus(r.Context(), c) - if status.Status == pinsvc.StatusUndefined { - api.SendResponse(w, http.StatusNotFound, errors.New("pin not found"), nil) - return - } - api.SendResponse(w, common.SetStatusAutomatically, err, status) -} - -func (api *API) removePin(w http.ResponseWriter, r *http.Request) { - c, ok := api.parseRequestIDOrFail(w, r) - if !ok { - return - } - api.config.Logger.Debugf("removePin: %s", c) - var pinObj types.Pin - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Unpin", - types.PinCid(c), - &pinObj, - ) - if err != nil && err.Error() == state.ErrNotFound.Error() { - api.SendResponse(w, http.StatusNotFound, err, nil) - return - } - api.SendResponse(w, http.StatusAccepted, err, nil) -} - -func (api *API) listPins(w http.ResponseWriter, r *http.Request) { - opts := &pinsvc.ListOptions{} - err := opts.FromQuery(r.URL.Query()) - if err != nil { - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - return - } - tst := svcStatusToTrackerStatus(opts.Status) - - var pinList pinsvc.PinList - pinList.Results = []pinsvc.PinStatus{} - count := uint64(0) - - if len(opts.Cids) > 0 { - // copy approach from restapi - type statusResult struct { - st pinsvc.PinStatus - err error - } - stCh := make(chan statusResult, len(opts.Cids)) - var wg sync.WaitGroup - wg.Add(len(opts.Cids)) - - go func() { - wg.Wait() - close(stCh) - }() - - for _, ci := range opts.Cids { - go func(c types.Cid) { - defer wg.Done() - st, err := api.getPinSvcStatus(r.Context(), c) - stCh <- statusResult{st: st, err: err} - }(ci) - } - - var err error - - for stResult := range stCh { - if stResult.st.Status == pinsvc.StatusUndefined && stResult.err == nil { - // ignore things unpinning - continue - } - - if count < opts.Limit { - pinList.Results = append(pinList.Results, stResult.st) - err = multierr.Append(err, stResult.err) - } - count++ - } - - if err != nil { - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - return - } - } else { - in := make(chan types.TrackerStatus, 1) - in <- tst - close(in) - out := make(chan types.GlobalPinInfo, common.StreamChannelSize) - errCh := make(chan error, 1) - - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "StatusAll", - in, - out, - ) - }() - - for gpi := range out { - st := globalPinInfoToSvcPinStatus(gpi.Cid.String(), gpi) - if st.Status == pinsvc.StatusUndefined { - // i.e things unpinning - continue - } - if !opts.After.IsZero() && st.Created.Before(opts.After) { - continue - } - - if !opts.Before.IsZero() && st.Created.After(opts.Before) { - continue - } - - if !st.Pin.MatchesName(opts.Name, opts.MatchingStrategy) { - continue - } - if !st.Pin.MatchesMeta(opts.Meta) { - continue - } - if count < opts.Limit { - pinList.Results = append(pinList.Results, st) - } - count++ - } - - err := <-errCh - if err != nil { - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - return - } - } - - pinList.Count = count - api.SendResponse(w, common.SetStatusAutomatically, err, pinList) -} - -func (api *API) pinToSvcPinStatus(ctx context.Context, rID string, pin types.Pin) pinsvc.PinStatus { - status := pinsvc.PinStatus{ - RequestID: rID, - Status: pinsvc.StatusQueued, - Created: pin.Timestamp, - Pin: pinsvc.Pin{ - Cid: pin.Cid, - Name: pinsvc.PinName(pin.Name), - Origins: pin.Origins, - Meta: pin.Metadata, - }, - Info: apiInfo, - } - - var peers []peer.ID - - if pin.IsPinEverywhere() { // all cluster peers - err := api.rpcClient.CallContext( - ctx, - "", - "Consensus", - "Peers", - struct{}{}, - &peers, - ) - if err != nil { - logger.Error(err) - } - } else { // Delegates should come from allocations - peers = pin.Allocations - } - - status.Delegates = []types.Multiaddr{} - for _, peer := range peers { - var ipfsid types.IPFSID - err := api.rpcClient.CallContext( - ctx, - "", // call the local peer - "Cluster", - "IPFSID", - peer, // retrieve ipfs info for this peer - &ipfsid, - ) - if err != nil { - logger.Error(err) - } - status.Delegates = append(status.Delegates, ipfsid.Addresses...) - } - - return status -} diff --git a/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvcapi_test.go b/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvcapi_test.go deleted file mode 100644 index 545c4e7..0000000 --- a/packages/networking/ipfs-cluster/api/pinsvcapi/pinsvcapi_test.go +++ /dev/null @@ -1,253 +0,0 @@ -package pinsvcapi - -import ( - "context" - "encoding/json" - "strings" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/common/test" - "github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi/pinsvc" - clustertest "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - ma "github.com/multiformats/go-multiaddr" -) - -func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API { - ctx := context.Background() - apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - h, err := libp2p.New(libp2p.ListenAddrs(apiMAddr)) - if err != nil { - t.Fatal(err) - } - - cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr} - - svcapi, err := NewAPIWithHost(ctx, cfg, h) - if err != nil { - t.Fatalf("should be able to create a new %s API: %s", name, err) - } - - // No keep alive for tests - svcapi.SetKeepAlivesEnabled(false) - svcapi.SetClient(clustertest.NewMockRPCClient(t)) - - return svcapi -} - -func testAPI(t *testing.T) *API { - cfg := NewConfig() - cfg.Default() - cfg.CORSAllowedOrigins = []string{"myorigin"} - cfg.CORSAllowedMethods = []string{"GET", "POST", "DELETE"} - //cfg.CORSAllowedHeaders = []string{"Content-Type"} - cfg.CORSMaxAge = 10 * time.Minute - - return testAPIwithConfig(t, cfg, "basic") -} - -func TestAPIListEndpoint(t *testing.T) { - ctx := context.Background() - svcapi := testAPI(t) - defer svcapi.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins", &resp) - - // mockPinTracker returns 3 items for Cluster.StatusAll - if resp.Count != 3 { - t.Fatal("Count should be 3") - } - - if len(resp.Results) != 3 { - t.Fatal("There should be 3 results") - } - - results := resp.Results - if !results[0].Pin.Cid.Equals(clustertest.Cid1) || - results[1].Status != pinsvc.StatusPinning { - t.Errorf("unexpected statusAll resp: %+v", results) - } - - // Test status filters - var resp2 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=pinning", &resp2) - // mockPinTracker calls pintracker.StatusAll which returns 2 - // items. - if resp2.Count != 1 { - t.Errorf("unexpected statusAll+status=pinning resp:\n %+v", resp2) - } - - var resp3 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=queued", &resp3) - if resp3.Count != 0 { - t.Errorf("unexpected statusAll+status=queued resp:\n %+v", resp3) - } - - var resp4 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=pinned", &resp4) - if resp4.Count != 1 { - t.Errorf("unexpected statusAll+status=queued resp:\n %+v", resp4) - } - - var resp5 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=failed", &resp5) - if resp5.Count != 1 { - t.Errorf("unexpected statusAll+status=queued resp:\n %+v", resp5) - } - - var resp6 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=failed,pinned", &resp6) - if resp6.Count != 2 { - t.Errorf("unexpected statusAll+status=failed,pinned resp:\n %+v", resp6) - } - - // Test with cids - var resp7 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?cid=QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq,QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb", &resp7) - if resp7.Count != 2 { - t.Errorf("unexpected statusAll+cids resp:\n %+v", resp7) - } - - // Test with cids+limit - var resp8 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?cid=QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq,QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb&limit=1", &resp8) - if resp8.Count != 2 || len(resp8.Results) != 1 { - t.Errorf("unexpected statusAll+cids+limit resp:\n %+v", resp8) - } - - // Test with limit - var resp9 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?limit=1", &resp9) - if resp9.Count != 3 || len(resp9.Results) != 1 { - t.Errorf("unexpected statusAll+limit=1 resp:\n %+v", resp9) - } - - // Test with name-match - var resp10 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+"/pins?name=C&match=ipartial", &resp10) - if resp10.Count != 1 { - t.Errorf("unexpected statusAll+name resp:\n %+v", resp10) - } - - // Test with meta-match - var resp11 pinsvc.PinList - test.MakeGet(t, svcapi, url(svcapi)+`/pins?meta={"ccc":"3c"}`, &resp11) - if resp11.Count != 1 { - t.Errorf("unexpected statusAll+meta resp:\n %+v", resp11) - } - - var errorResp pinsvc.APIError - test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=invalid", &errorResp) - if errorResp.Details.Reason == "" { - t.Errorf("expected an error: %s", errorResp.Details.Reason) - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIPinEndpoint(t *testing.T) { - ctx := context.Background() - svcapi := testAPI(t) - defer svcapi.Shutdown(ctx) - - ma, _ := api.NewMultiaddr("/ip4/1.2.3.4/ipfs/" + clustertest.PeerID1.String()) - - tf := func(t *testing.T, url test.URLFunc) { - // test normal pin - pin := pinsvc.Pin{ - Cid: clustertest.Cid3, - Name: "testname", - Origins: []api.Multiaddr{ - ma, - }, - Meta: map[string]string{ - "meta": "data", - }, - } - var status pinsvc.PinStatus - pinJSON, err := json.Marshal(pin) - if err != nil { - t.Fatal(err) - } - test.MakePost(t, svcapi, url(svcapi)+"/pins", pinJSON, &status) - - if status.Pin.Cid != pin.Cid { - t.Error("cids should match") - } - if status.Pin.Meta["meta"] != "data" { - t.Errorf("metadata should match: %+v", status.Pin) - } - if len(status.Pin.Origins) != 1 { - t.Errorf("expected origins: %+v", status.Pin) - } - if len(status.Delegates) != 3 { - t.Errorf("expected 3 delegates: %+v", status) - } - - var errName pinsvc.APIError - pin2 := pinsvc.Pin{ - Cid: clustertest.Cid1, - Name: pinsvc.PinName(make([]byte, 256)), - } - pinJSON, err = json.Marshal(pin2) - if err != nil { - t.Fatal(err) - } - test.MakePost(t, svcapi, url(svcapi)+"/pins", pinJSON, &errName) - if !strings.Contains(errName.Details.Reason, "255") { - t.Error("expected name error") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIGetPinEndpoint(t *testing.T) { - ctx := context.Background() - svcapi := testAPI(t) - defer svcapi.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - // test existing pin - var status pinsvc.PinStatus - test.MakeGet(t, svcapi, url(svcapi)+"/pins/"+clustertest.Cid1.String(), &status) - - if !status.Pin.Cid.Equals(clustertest.Cid1) { - t.Error("Cid should be set") - } - - if status.Pin.Meta["meta"] != "data" { - t.Errorf("metadata should match: %+v", status.Pin) - } - if len(status.Delegates) != 1 { - t.Errorf("expected 1 delegates: %+v", status) - } - - var err pinsvc.APIError - test.MakeGet(t, svcapi, url(svcapi)+"/pins/"+clustertest.ErrorCid.String(), &err) - if err.Details.Reason == "" { - t.Error("expected an error") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIRemovePinEndpoint(t *testing.T) { - ctx := context.Background() - svcapi := testAPI(t) - defer svcapi.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - // test existing pin - test.MakeDelete(t, svcapi, url(svcapi)+"/pins/"+clustertest.Cid1.String(), nil) - } - - test.BothEndpoints(t, tf) -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/.travis.yml b/packages/networking/ipfs-cluster/api/rest/client/.travis.yml deleted file mode 100644 index 4d2adc3..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go: -- '1.9' -- tip -install: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -- make deps -script: -- make test -- "$GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN" -env: - global: - secure: Skjty77A/J/34pKFmHtxnpNejY2QAJw5PAacBnflo1yZfq4D2mEqVjyd0V2o/pSqm54b+eUouYp+9hNsBbVRHXlgi3PocVClBTV7McFMAoOn+OOEBrdt5wF57L0IPbt8yde+RpXcnCQ5rRvuSfCkEcTNhlxUdUjx4r9qhFsGWKvZVodcSO6xZTRwPYu7/MJWnJK/JV5CAWl7dWlWeAZhrASwXwS7662tu3SN9eor5+ZVF0t5BMhLP6juu6WPz9TFijQ/W4cRiXJ1REbg+M2RscAj9gOy7lIdKR5MEF1xj8naX2jtiZXcxIdV5cduLwSeBA8v5hahwV0H/1cN4Ypymix9vXfkZKyMbU7/TpO0pEzZOcoFne9edHRh6oUrCRBrf4veOiPbkObjmAs0HsdE1ZoeakgCQVHGqaMUlYW1ybeu04JJrXNAMC7s+RD9lxacwknrx333fSBmw+kQwJGmkYkdKcELo2toivrX+yXezISLf2+puqVPAZznY/OxHAuWDi047QLEBxW72ZuTCpT9QiOj3nl5chvmNV+edqgdLN3SlUNOB0jTOpyac/J1GicFkI7IgE2+PjeqpzVnrhZvpcAy4j8YLadGfISWVzbg4NaoUrBUIqA82rqwiZ1L+CcQKNW1h+vEXWp6cLnn2kcPSihM8RrsLuSiJMMgdIhMN3o= diff --git a/packages/networking/ipfs-cluster/api/rest/client/README.md b/packages/networking/ipfs-cluster/api/rest/client/README.md deleted file mode 100644 index db806ee..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# ipfs-cluster client - -[![Made by](https://img.shields.io/badge/By-Protocol%20Labs-000000.svg?style=flat-square)](https://protocol.ai) -[![Main project](https://img.shields.io/badge/project-ipfs--cluster-ef5c43.svg?style=flat-square)](http://github.com/ipfs-cluster) -[![Discord](https://img.shields.io/badge/forum-discuss.ipfs.io-f9a035.svg?style=flat-square)](https://discuss.ipfs.io/c/help/help-ipfs-cluster/24) -[![Matrix channel](https://img.shields.io/badge/matrix-%23ipfs--cluster-3c8da0.svg?style=flat-square)](https://app.element.io/#/room/#ipfs-cluster:ipfs.io) -[![pkg.go.dev](https://pkg.go.dev/badge/github.com/ipfs-cluster/ipfs-cluster)](https://pkg.go.dev/github.com/ipfs-cluster/ipfs-cluster/api/rest/client) - - -> Go client for the ipfs-cluster HTTP API. - -This is a Go client library to use the ipfs-cluster REST HTTP API. - -## Table of Contents - -- [Install](#install) -- [Usage](#usage) -- [Contribute](#contribute) -- [License](#license) - -## Install - -You can import `github.com/ipfs-cluster/ipfs-cluster/api/rest/client` in your code. - -The code can be downloaded and tested with: - -``` -$ git clone https://github.com/ipfs-cluster/ipfs-cluster.git -$ cd ipfs-cluster/ipfs-cluster/rest/api/client -$ go test -v -``` - -## Usage - -Documentation can be read at [pkg.go.dev](https://pkg.go.dev/github.com/ipfs-cluster/ipfs-cluster/api/rest/client). - -## Contribute - -PRs accepted. - -## License - -MIT © Protocol Labs diff --git a/packages/networking/ipfs-cluster/api/rest/client/client.go b/packages/networking/ipfs-cluster/api/rest/client/client.go deleted file mode 100644 index cb6a641..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/client.go +++ /dev/null @@ -1,402 +0,0 @@ -// Package client provides a Go Client for the IPFS Cluster API provided -// by the "api/rest" component. It supports both the HTTP(s) endpoint and -// the libp2p-http endpoint. -package client - -import ( - "context" - "fmt" - "net" - "net/http" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - shell "github.com/ipfs/go-ipfs-api" - files "github.com/ipfs/go-ipfs-files" - logging "github.com/ipfs/go-log/v2" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - pnet "github.com/libp2p/go-libp2p/core/pnet" - ma "github.com/multiformats/go-multiaddr" - madns "github.com/multiformats/go-multiaddr-dns" - manet "github.com/multiformats/go-multiaddr/net" - - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/trace" -) - -// Configuration defaults -var ( - DefaultTimeout = 0 - DefaultAPIAddr = "/ip4/127.0.0.1/tcp/9094" - DefaultLogLevel = "info" - DefaultProxyPort = 9095 - ResolveTimeout = 30 * time.Second - DefaultPort = 9094 -) - -var loggingFacility = "apiclient" -var logger = logging.Logger(loggingFacility) - -// Client interface defines the interface to be used by API clients to -// interact with the ipfs-cluster-service. All methods take a -// context.Context as their first parameter, this allows for -// timing out and canceling of requests as well as recording -// metrics and tracing of requests through the API. -type Client interface { - // ID returns information about the cluster Peer. - ID(context.Context) (api.ID, error) - - // Peers requests ID information for all cluster peers. - Peers(context.Context, chan<- api.ID) error - // PeerAdd adds a new peer to the cluster. - PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) - // PeerRm removes a current peer from the cluster - PeerRm(ctx context.Context, pid peer.ID) error - - // Add imports files to the cluster from the given paths. - Add(ctx context.Context, paths []string, params api.AddParams, out chan<- api.AddedOutput) error - // AddMultiFile imports new files from a MultiFileReader. - AddMultiFile(ctx context.Context, multiFileR *files.MultiFileReader, params api.AddParams, out chan<- api.AddedOutput) error - - // Pin tracks a Cid with the given replication factor and a name for - // human-friendliness. - Pin(ctx context.Context, ci api.Cid, opts api.PinOptions) (api.Pin, error) - // Unpin untracks a Cid from cluster. - Unpin(ctx context.Context, ci api.Cid) (api.Pin, error) - - // PinPath resolves given path into a cid and performs the pin operation. - PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) - // UnpinPath resolves given path into a cid and performs the unpin operation. - // It returns api.Pin of the given cid before it is unpinned. - UnpinPath(ctx context.Context, path string) (api.Pin, error) - - // Allocations returns the consensus state listing all tracked items - // and the peers that should be pinning them. - Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error - // Allocation returns the current allocations for a given Cid. - Allocation(ctx context.Context, ci api.Cid) (api.Pin, error) - - // Status returns the current ipfs state for a given Cid. If local is true, - // the information affects only the current peer, otherwise the information - // is fetched from all cluster peers. - Status(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) - // StatusCids status information for the requested CIDs. - StatusCids(ctx context.Context, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error - // StatusAll gathers Status() for all tracked items. - StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error - - // Recover retriggers pin or unpin ipfs operations for a Cid in error - // state. If local is true, the operation is limited to the current - // peer, otherwise it happens on every cluster peer. - Recover(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) - // RecoverAll triggers Recover() operations on all tracked items. If - // local is true, the operation is limited to the current peer. - // Otherwise, it happens everywhere. - RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error - - // Alerts returns information health events in the cluster (expired - // metrics etc.). - Alerts(ctx context.Context) ([]api.Alert, error) - - // Version returns the ipfs-cluster peer's version. - Version(context.Context) (api.Version, error) - - // IPFS returns an instance of go-ipfs-api's Shell, pointing to a - // Cluster's IPFS proxy endpoint. - IPFS(context.Context) *shell.Shell - - // GetConnectGraph returns an ipfs-cluster connection graph. - GetConnectGraph(context.Context) (api.ConnectGraph, error) - - // Metrics returns a map with the latest metrics of matching name - // for the current cluster peers. - Metrics(ctx context.Context, name string) ([]api.Metric, error) - - // MetricNames returns the list of metric types. - MetricNames(ctx context.Context) ([]string, error) - - // RepoGC runs garbage collection on IPFS daemons of cluster peers and - // returns collected CIDs. If local is true, it would garbage collect - // only on contacted peer, otherwise on all peers' IPFS daemons. - RepoGC(ctx context.Context, local bool) (api.GlobalRepoGC, error) -} - -// Config allows to configure the parameters to connect -// to the ipfs-cluster REST API. -type Config struct { - // Enable SSL support. Only valid without APIAddr. - SSL bool - // Skip certificate verification (insecure) - NoVerifyCert bool - - // Username and password for basic authentication - Username string - Password string - - // The ipfs-cluster REST API endpoint in multiaddress form - // (takes precedence over host:port). It this address contains - // an /ipfs/, /p2p/ or /dnsaddr, the API will be contacted - // through a libp2p tunnel, thus getting encryption for - // free. Using the libp2p tunnel will ignore any configurations. - APIAddr ma.Multiaddr - - // REST API endpoint host and port. Only valid without - // APIAddr. - Host string - Port string - - // If APIAddr is provided, and the peer uses private networks (pnet), - // then we need to provide the key. If the peer is the cluster peer, - // this corresponds to the cluster secret. - ProtectorKey pnet.PSK - - // ProxyAddr is used to obtain a go-ipfs-api Shell instance pointing - // to the ipfs proxy endpoint of ipfs-cluster. If empty, the location - // will be guessed from one of APIAddr/Host, - // and the port used will be ipfs-cluster's proxy default port (9095) - ProxyAddr ma.Multiaddr - - // Define timeout for network operations - Timeout time.Duration - - // Specifies if we attempt to re-use connections to the same - // hosts. - DisableKeepAlives bool - - // LogLevel defines the verbosity of the logging facility - LogLevel string -} - -// AsTemplateFor creates client configs from resolved multiaddresses -func (c *Config) AsTemplateFor(addrs []ma.Multiaddr) []*Config { - var cfgs []*Config - for _, addr := range addrs { - cfg := *c - cfg.APIAddr = addr - cfgs = append(cfgs, &cfg) - } - return cfgs -} - -// AsTemplateForResolvedAddress creates client configs from a multiaddress -func (c *Config) AsTemplateForResolvedAddress(ctx context.Context, addr ma.Multiaddr) ([]*Config, error) { - resolvedAddrs, err := resolveAddr(ctx, addr) - if err != nil { - return nil, err - } - return c.AsTemplateFor(resolvedAddrs), nil -} - -// DefaultClient provides methods to interact with the ipfs-cluster API. Use -// NewDefaultClient() to create one. -type defaultClient struct { - ctx context.Context - cancel context.CancelFunc - config *Config - transport *http.Transport - net string - hostname string - client *http.Client - p2p host.Host -} - -// NewDefaultClient initializes a client given a Config. -func NewDefaultClient(cfg *Config) (Client, error) { - ctx, cancel := context.WithCancel(context.Background()) - client := &defaultClient{ - ctx: ctx, - cancel: cancel, - config: cfg, - } - - if client.config.Port == "" { - client.config.Port = fmt.Sprintf("%d", DefaultPort) - } - - err := client.setupAPIAddr() - if err != nil { - return nil, err - } - - err = client.resolveAPIAddr() - if err != nil { - return nil, err - } - - err = client.setupHTTPClient() - if err != nil { - return nil, err - } - - err = client.setupHostname() - if err != nil { - return nil, err - } - - err = client.setupProxy() - if err != nil { - return nil, err - } - - if lvl := cfg.LogLevel; lvl != "" { - logging.SetLogLevel(loggingFacility, lvl) - } else { - logging.SetLogLevel(loggingFacility, DefaultLogLevel) - } - - return client, nil -} - -func (c *defaultClient) setupAPIAddr() error { - if c.config.APIAddr != nil { - return nil // already setup by user - } - - var addr ma.Multiaddr - var err error - - if c.config.Host == "" { //default - addr, err := ma.NewMultiaddr(DefaultAPIAddr) - c.config.APIAddr = addr - return err - } - - var addrStr string - ip := net.ParseIP(c.config.Host) - switch { - case ip == nil: - addrStr = fmt.Sprintf("/dns4/%s/tcp/%s", c.config.Host, c.config.Port) - case ip.To4() != nil: - addrStr = fmt.Sprintf("/ip4/%s/tcp/%s", c.config.Host, c.config.Port) - default: - addrStr = fmt.Sprintf("/ip6/%s/tcp/%s", c.config.Host, c.config.Port) - } - - addr, err = ma.NewMultiaddr(addrStr) - c.config.APIAddr = addr - return err -} - -func (c *defaultClient) resolveAPIAddr() error { - // Only resolve libp2p addresses. For HTTP addresses, we let - // the default client handle any resolving. We extract the hostname - // in setupHostname() - if !IsPeerAddress(c.config.APIAddr) { - return nil - } - resolved, err := resolveAddr(c.ctx, c.config.APIAddr) - if err != nil { - return err - } - c.config.APIAddr = resolved[0] - return nil -} - -func (c *defaultClient) setupHTTPClient() error { - var err error - - switch { - case IsPeerAddress(c.config.APIAddr): - err = c.enableLibp2p() - case isUnixSocketAddress(c.config.APIAddr): - err = c.enableUnix() - case c.config.SSL: - err = c.enableTLS() - default: - c.defaultTransport() - } - - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &ochttp.Transport{ - Base: c.transport, - Propagation: &tracecontext.HTTPFormat{}, - StartOptions: trace.StartOptions{SpanKind: trace.SpanKindClient}, - FormatSpanName: func(req *http.Request) string { return req.Host + ":" + req.URL.Path + ":" + req.Method }, - NewClientTrace: ochttp.NewSpanAnnotatingClientTrace, - }, - Timeout: c.config.Timeout, - } - return nil -} - -func (c *defaultClient) setupHostname() error { - // Extract host:port form APIAddr or use Host:Port. - // For libp2p, hostname is set in enableLibp2p() - // For unix sockets, hostname set in enableUnix() - if IsPeerAddress(c.config.APIAddr) || isUnixSocketAddress(c.config.APIAddr) { - return nil - } - _, hostname, err := manet.DialArgs(c.config.APIAddr) - if err != nil { - return err - } - - c.hostname = hostname - return nil -} - -func (c *defaultClient) setupProxy() error { - if c.config.ProxyAddr != nil { - return nil - } - - // Guess location from APIAddr - port, err := ma.NewMultiaddr(fmt.Sprintf("/tcp/%d", DefaultProxyPort)) - if err != nil { - return err - } - c.config.ProxyAddr = ma.Split(c.config.APIAddr)[0].Encapsulate(port) - return nil -} - -// IPFS returns an instance of go-ipfs-api's Shell, pointing to the -// configured ProxyAddr (or to the default Cluster's IPFS proxy port). -// It re-uses this Client's HTTP client, thus will be constrained by -// the same configurations affecting it (timeouts...). -func (c *defaultClient) IPFS(ctx context.Context) *shell.Shell { - return shell.NewShellWithClient(c.config.ProxyAddr.String(), c.client) -} - -// IsPeerAddress detects if the given multiaddress identifies a libp2p peer, -// either because it has the /p2p/ protocol or because it uses /dnsaddr/ -func IsPeerAddress(addr ma.Multiaddr) bool { - if addr == nil { - return false - } - pid, err := addr.ValueForProtocol(ma.P_P2P) - dnsaddr, err2 := addr.ValueForProtocol(ma.P_DNSADDR) - return (pid != "" && err == nil) || (dnsaddr != "" && err2 == nil) -} - -// isUnixSocketAddress returns if the given address corresponds to a -// unix socket. -func isUnixSocketAddress(addr ma.Multiaddr) bool { - if addr == nil { - return false - } - value, err := addr.ValueForProtocol(ma.P_UNIX) - return (value != "" && err == nil) -} - -// resolve addr -func resolveAddr(ctx context.Context, addr ma.Multiaddr) ([]ma.Multiaddr, error) { - resolveCtx, cancel := context.WithTimeout(ctx, ResolveTimeout) - defer cancel() - resolved, err := madns.Resolve(resolveCtx, addr) - if err != nil { - return nil, err - } - - if len(resolved) == 0 { - return nil, fmt.Errorf("resolving %s returned 0 results", addr) - } - - return resolved, nil -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/client_test.go b/packages/networking/ipfs-cluster/api/rest/client/client_test.go deleted file mode 100644 index 3a62b94..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/client_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package client - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api/rest" - "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - pnet "github.com/libp2p/go-libp2p/core/pnet" - tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp" - ma "github.com/multiformats/go-multiaddr" -) - -func testAPI(t *testing.T) *rest.API { - ctx := context.Background() - //logging.SetDebugLogging() - apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - - cfg := rest.NewConfig() - cfg.Default() - cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr} - secret := make(pnet.PSK, 32) - - h, err := libp2p.New( - libp2p.ListenAddrs(apiMAddr), - libp2p.PrivateNetwork(secret), - libp2p.NoTransports, - libp2p.Transport(tcp.NewTCPTransport), - ) - if err != nil { - t.Fatal(err) - } - - rest, err := rest.NewAPIWithHost(ctx, cfg, h) - if err != nil { - t.Fatal("should be able to create a new Api: ", err) - } - - rest.SetClient(test.NewMockRPCClient(t)) - return rest -} - -func shutdown(a *rest.API) { - ctx := context.Background() - a.Shutdown(ctx) - a.Host().Close() -} - -func apiMAddr(a *rest.API) ma.Multiaddr { - listen, _ := a.HTTPAddresses() - hostPort := strings.Split(listen[0], ":") - - addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%s", hostPort[1])) - return addr -} - -func peerMAddr(a *rest.API) ma.Multiaddr { - ipfsAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", a.Host().ID().String())) - for _, a := range a.Host().Addrs() { - if _, err := a.ValueForProtocol(ma.P_IP4); err == nil { - return a.Encapsulate(ipfsAddr) - } - } - return nil -} - -func testClientHTTP(t *testing.T, api *rest.API) *defaultClient { - cfg := &Config{ - APIAddr: apiMAddr(api), - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - - return c.(*defaultClient) -} - -func testClientLibp2p(t *testing.T, api *rest.API) *defaultClient { - cfg := &Config{ - APIAddr: peerMAddr(api), - ProtectorKey: make([]byte, 32), - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - return c.(*defaultClient) -} - -func TestNewDefaultClient(t *testing.T) { - api := testAPI(t) - defer shutdown(api) - - c := testClientHTTP(t, api) - if c.p2p != nil { - t.Error("should not use a libp2p host") - } - - c = testClientLibp2p(t, api) - if c.p2p == nil { - t.Error("expected a libp2p host") - } -} - -func TestDefaultAddress(t *testing.T) { - cfg := &Config{ - APIAddr: nil, - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - if dc.hostname != "127.0.0.1:9094" { - t.Error("default should be used") - } - - if dc.config.ProxyAddr == nil || dc.config.ProxyAddr.String() != "/ip4/127.0.0.1/tcp/9095" { - t.Error("proxy address was not guessed correctly") - } -} - -func TestMultiaddressPrecedence(t *testing.T) { - addr, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234") - cfg := &Config{ - APIAddr: addr, - Host: "localhost", - Port: "9094", - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - if dc.hostname != "1.2.3.4:1234" { - t.Error("APIAddr should be used") - } - - if dc.config.ProxyAddr == nil || dc.config.ProxyAddr.String() != "/ip4/1.2.3.4/tcp/9095" { - t.Error("proxy address was not guessed correctly") - } -} - -func TestHostPort(t *testing.T) { - - type testcase struct { - host string - port string - expectedHostname string - expectedProxyAddr string - } - - testcases := []testcase{ - { - host: "3.3.1.1", - port: "9094", - expectedHostname: "3.3.1.1:9094", - expectedProxyAddr: "/ip4/3.3.1.1/tcp/9095", - }, - { - host: "ipfs.io", - port: "9094", - expectedHostname: "ipfs.io:9094", - expectedProxyAddr: "/dns4/ipfs.io/tcp/9095", - }, - { - host: "2001:db8::1", - port: "9094", - expectedHostname: "[2001:db8::1]:9094", - expectedProxyAddr: "/ip6/2001:db8::1/tcp/9095", - }, - } - - for _, tc := range testcases { - cfg := &Config{ - APIAddr: nil, - Host: tc.host, - Port: tc.port, - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - if dc.hostname != tc.expectedHostname { - t.Error("Host Port should be used") - } - - if paddr := dc.config.ProxyAddr; paddr == nil || paddr.String() != tc.expectedProxyAddr { - t.Error("proxy address was not guessed correctly: ", paddr) - } - } -} - -func TestDNSMultiaddress(t *testing.T) { - addr2, _ := ma.NewMultiaddr("/dns4/localhost/tcp/1234") - cfg := &Config{ - APIAddr: addr2, - Host: "localhost", - Port: "9094", - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - if dc.hostname != "localhost:1234" { - t.Error("address should not be resolved") - } - - if paddr := dc.config.ProxyAddr; paddr == nil || paddr.String() != "/dns4/localhost/tcp/9095" { - t.Error("proxy address was not guessed correctly: ", paddr) - } -} - -func TestPeerAddress(t *testing.T) { - peerAddr, _ := ma.NewMultiaddr("/dns4/localhost/tcp/1234/p2p/QmP7R7gWEnruNePxmCa9GBa4VmUNexLVnb1v47R8Gyo3LP") - cfg := &Config{ - APIAddr: peerAddr, - Host: "localhost", - Port: "9094", - DisableKeepAlives: true, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - if dc.hostname != "QmP7R7gWEnruNePxmCa9GBa4VmUNexLVnb1v47R8Gyo3LP" || dc.net != "libp2p" { - t.Error("bad resolved address") - } - - if dc.config.ProxyAddr == nil || dc.config.ProxyAddr.String() != "/ip4/127.0.0.1/tcp/9095" { - t.Error("proxy address was not guessed correctly") - } -} - -func TestProxyAddress(t *testing.T) { - addr, _ := ma.NewMultiaddr("/ip4/1.3.4.5/tcp/1234") - cfg := &Config{ - DisableKeepAlives: true, - ProxyAddr: addr, - } - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - if dc.config.ProxyAddr.String() != addr.String() { - t.Error("proxy address was replaced") - } -} - -func TestIPFS(t *testing.T) { - ctx := context.Background() - ipfsMock := test.NewIpfsMock(t) - defer ipfsMock.Close() - - proxyAddr, err := ma.NewMultiaddr( - fmt.Sprintf("/ip4/%s/tcp/%d", ipfsMock.Addr, ipfsMock.Port), - ) - if err != nil { - t.Fatal(err) - } - - cfg := &Config{ - DisableKeepAlives: true, - ProxyAddr: proxyAddr, - } - - c, err := NewDefaultClient(cfg) - if err != nil { - t.Fatal(err) - } - dc := c.(*defaultClient) - ipfs := dc.IPFS(ctx) - - err = ipfs.Pin(test.Cid1.String()) - if err != nil { - t.Error(err) - } - - pins, err := ipfs.Pins() - if err != nil { - t.Error(err) - } - - pin, ok := pins[test.Cid1.String()] - if !ok { - t.Error("pin should be in pin list") - } - if pin.Type != "recursive" { - t.Error("pin type unexpected") - } -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/lbclient.go b/packages/networking/ipfs-cluster/api/rest/client/lbclient.go deleted file mode 100644 index a579b1c..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/lbclient.go +++ /dev/null @@ -1,555 +0,0 @@ -package client - -import ( - "context" - "sync/atomic" - - shell "github.com/ipfs/go-ipfs-api" - files "github.com/ipfs/go-ipfs-files" - "github.com/ipfs-cluster/ipfs-cluster/api" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// loadBalancingClient is a client to interact with IPFS Cluster APIs -// that balances the load by distributing requests among peers. -type loadBalancingClient struct { - strategy LBStrategy - retries int -} - -// LBStrategy is a strategy to load balance requests among clients. -type LBStrategy interface { - Next(count int) Client - SetClients(clients []Client) -} - -// RoundRobin is a load balancing strategy that would use clients in a sequence -// for all methods, throughout the lifetime of the lb client. -type RoundRobin struct { - clients []Client - counter uint32 - length uint32 -} - -// Next return the next client to be used. -func (r *RoundRobin) Next(count int) Client { - i := atomic.AddUint32(&r.counter, 1) % r.length - - return r.clients[i] -} - -// SetClients sets a list of clients for this strategy. -func (r *RoundRobin) SetClients(cl []Client) { - r.clients = cl - r.length = uint32(len(cl)) -} - -// Failover is a load balancing strategy that would try the first cluster peer -// first. If the first call fails it would try other clients for that call in a -// round robin fashion. -type Failover struct { - clients []Client -} - -// Next returns the next client to be used. -func (f *Failover) Next(count int) Client { - return f.clients[count%len(f.clients)] -} - -// SetClients sets a list of clients for this strategy. -func (f *Failover) SetClients(cl []Client) { - f.clients = cl -} - -// NewLBClient returns a new client that would load balance requests among -// clients. -func NewLBClient(strategy LBStrategy, cfgs []*Config, retries int) (Client, error) { - var clients []Client - for _, cfg := range cfgs { - defaultClient, err := NewDefaultClient(cfg) - if err != nil { - return nil, err - } - clients = append(clients, defaultClient) - } - strategy.SetClients(clients) - return &loadBalancingClient{strategy: strategy, retries: retries}, nil -} - -// retry tries the request until it is successful or tries `lc.retries` times. -func (lc *loadBalancingClient) retry(count int, call func(Client) error) error { - logger.Debugf("retrying %d times", count+1) - - err := call(lc.strategy.Next(count)) - count++ - - // successful request - if err == nil { - return nil - } - - // It is a safety check. This error should never occur. - // All errors returned by client methods are of type `api.Error`. - apiErr, ok := err.(api.Error) - if !ok { - logger.Error("could not cast error into api.Error") - return err - } - - if apiErr.Code != 0 { - return err - } - - if count == lc.retries { - logger.Errorf("reached maximum number of retries without success, retries: %d", lc.retries) - return err - } - - return lc.retry(count, call) -} - -// ID returns information about the cluster Peer. -func (lc *loadBalancingClient) ID(ctx context.Context) (api.ID, error) { - var id api.ID - call := func(c Client) error { - var err error - id, err = c.ID(ctx) - return err - } - - err := lc.retry(0, call) - return id, err -} - -// Peers requests ID information for all cluster peers. -func (lc *loadBalancingClient) Peers(ctx context.Context, out chan<- api.ID) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.ID, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.Peers(ctx, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - // retries call as needed. - err := lc.retry(0, call) - close(out) - return err -} - -// PeerAdd adds a new peer to the cluster. -func (lc *loadBalancingClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) { - var id api.ID - call := func(c Client) error { - var err error - id, err = c.PeerAdd(ctx, pid) - return err - } - - err := lc.retry(0, call) - return id, err -} - -// PeerRm removes a current peer from the cluster. -func (lc *loadBalancingClient) PeerRm(ctx context.Context, id peer.ID) error { - call := func(c Client) error { - return c.PeerRm(ctx, id) - } - - return lc.retry(0, call) -} - -// Pin tracks a Cid with the given replication factor and a name for -// human-friendliness. -func (lc *loadBalancingClient) Pin(ctx context.Context, ci api.Cid, opts api.PinOptions) (api.Pin, error) { - var pin api.Pin - call := func(c Client) error { - var err error - pin, err = c.Pin(ctx, ci, opts) - return err - } - - err := lc.retry(0, call) - return pin, err -} - -// Unpin untracks a Cid from cluster. -func (lc *loadBalancingClient) Unpin(ctx context.Context, ci api.Cid) (api.Pin, error) { - var pin api.Pin - call := func(c Client) error { - var err error - pin, err = c.Unpin(ctx, ci) - return err - } - - err := lc.retry(0, call) - return pin, err -} - -// PinPath allows to pin an element by the given IPFS path. -func (lc *loadBalancingClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) { - var pin api.Pin - call := func(c Client) error { - var err error - pin, err = c.PinPath(ctx, path, opts) - return err - } - - err := lc.retry(0, call) - return pin, err -} - -// UnpinPath allows to unpin an item by providing its IPFS path. -// It returns the unpinned api.Pin information of the resolved Cid. -func (lc *loadBalancingClient) UnpinPath(ctx context.Context, p string) (api.Pin, error) { - var pin api.Pin - call := func(c Client) error { - var err error - pin, err = c.UnpinPath(ctx, p) - return err - } - - err := lc.retry(0, call) - return pin, err -} - -// Allocations returns the consensus state listing all tracked items and -// the peers that should be pinning them. -func (lc *loadBalancingClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.Pin, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.Allocations(ctx, filter, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - err := lc.retry(0, call) - close(out) - return err -} - -// Allocation returns the current allocations for a given Cid. -func (lc *loadBalancingClient) Allocation(ctx context.Context, ci api.Cid) (api.Pin, error) { - var pin api.Pin - call := func(c Client) error { - var err error - pin, err = c.Allocation(ctx, ci) - return err - } - - err := lc.retry(0, call) - return pin, err -} - -// Status returns the current ipfs state for a given Cid. If local is true, -// the information affects only the current peer, otherwise the information -// is fetched from all cluster peers. -func (lc *loadBalancingClient) Status(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) { - var pinInfo api.GlobalPinInfo - call := func(c Client) error { - var err error - pinInfo, err = c.Status(ctx, ci, local) - return err - } - - err := lc.retry(0, call) - return pinInfo, err -} - -// StatusCids returns Status() information for the given Cids. If local is -// true, the information affects only the current peer, otherwise the -// information is fetched from all cluster peers. -func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.GlobalPinInfo, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.StatusCids(ctx, cids, local, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - err := lc.retry(0, call) - close(out) - return err -} - -// StatusAll gathers Status() for all tracked items. If a filter is -// provided, only entries matching the given filter statuses -// will be returned. A filter can be built by merging TrackerStatuses with -// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or -// api.TrackerStatusUndefined), means all. -func (lc *loadBalancingClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.GlobalPinInfo, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.StatusAll(ctx, filter, local, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - err := lc.retry(0, call) - close(out) - return err -} - -// Recover retriggers pin or unpin ipfs operations for a Cid in error state. -// If local is true, the operation is limited to the current peer, otherwise -// it happens on every cluster peer. -func (lc *loadBalancingClient) Recover(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) { - var pinInfo api.GlobalPinInfo - call := func(c Client) error { - var err error - pinInfo, err = c.Recover(ctx, ci, local) - return err - } - - err := lc.retry(0, call) - return pinInfo, err -} - -// RecoverAll triggers Recover() operations on all tracked items. If local is -// true, the operation is limited to the current peer. Otherwise, it happens -// everywhere. -func (lc *loadBalancingClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.GlobalPinInfo, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.RecoverAll(ctx, local, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - err := lc.retry(0, call) - close(out) - return err -} - -// Alerts returns things that are wrong with cluster. -func (lc *loadBalancingClient) Alerts(ctx context.Context) ([]api.Alert, error) { - var alerts []api.Alert - call := func(c Client) error { - var err error - alerts, err = c.Alerts(ctx) - return err - } - - err := lc.retry(0, call) - return alerts, err -} - -// Version returns the ipfs-cluster peer's version. -func (lc *loadBalancingClient) Version(ctx context.Context) (api.Version, error) { - var v api.Version - call := func(c Client) error { - var err error - v, err = c.Version(ctx) - return err - } - err := lc.retry(0, call) - return v, err -} - -// GetConnectGraph returns an ipfs-cluster connection graph. -// The serialized version, strings instead of pids, is returned. -func (lc *loadBalancingClient) GetConnectGraph(ctx context.Context) (api.ConnectGraph, error) { - var graph api.ConnectGraph - call := func(c Client) error { - var err error - graph, err = c.GetConnectGraph(ctx) - return err - } - - err := lc.retry(0, call) - return graph, err -} - -// Metrics returns a map with the latest valid metrics of the given name -// for the current cluster peers. -func (lc *loadBalancingClient) Metrics(ctx context.Context, name string) ([]api.Metric, error) { - var metrics []api.Metric - call := func(c Client) error { - var err error - metrics, err = c.Metrics(ctx, name) - return err - } - - err := lc.retry(0, call) - return metrics, err -} - -// MetricNames returns the list of metric types. -func (lc *loadBalancingClient) MetricNames(ctx context.Context) ([]string, error) { - var metricNames []string - call := func(c Client) error { - var err error - metricNames, err = c.MetricNames(ctx) - return err - } - - err := lc.retry(0, call) - - return metricNames, err -} - -// RepoGC runs garbage collection on IPFS daemons of cluster peers and -// returns collected CIDs. If local is true, it would garbage collect -// only on contacted peer, otherwise on all peers' IPFS daemons. -func (lc *loadBalancingClient) RepoGC(ctx context.Context, local bool) (api.GlobalRepoGC, error) { - var repoGC api.GlobalRepoGC - - call := func(c Client) error { - var err error - repoGC, err = c.RepoGC(ctx, local) - return err - } - - err := lc.retry(0, call) - return repoGC, err -} - -// Add imports files to the cluster from the given paths. A path can -// either be a local filesystem location or an web url (http:// or https://). -// In the latter case, the destination will be downloaded with a GET request. -// The AddParams allow to control different options, like enabling the -// sharding the resulting DAG across the IPFS daemons of multiple cluster -// peers. The output channel will receive regular updates as the adding -// process progresses. -func (lc *loadBalancingClient) Add( - ctx context.Context, - paths []string, - params api.AddParams, - out chan<- api.AddedOutput, -) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.AddedOutput, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.Add(ctx, paths, params, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - err := lc.retry(0, call) - close(out) - return err -} - -// AddMultiFile imports new files from a MultiFileReader. See Add(). -func (lc *loadBalancingClient) AddMultiFile( - ctx context.Context, - multiFileR *files.MultiFileReader, - params api.AddParams, - out chan<- api.AddedOutput, -) error { - call := func(c Client) error { - done := make(chan struct{}) - cout := make(chan api.AddedOutput, cap(out)) - go func() { - for o := range cout { - out <- o - } - done <- struct{}{} - }() - - // this blocks until done - err := c.AddMultiFile(ctx, multiFileR, params, cout) - // wait for cout to be closed - select { - case <-ctx.Done(): - case <-done: - } - return err - } - - err := lc.retry(0, call) - close(out) - return err -} - -// IPFS returns an instance of go-ipfs-api's Shell, pointing to the -// configured ProxyAddr (or to the default Cluster's IPFS proxy port). -// It re-uses this Client's HTTP client, thus will be constrained by -// the same configurations affecting it (timeouts...). -func (lc *loadBalancingClient) IPFS(ctx context.Context) *shell.Shell { - var s *shell.Shell - call := func(c Client) error { - s = c.IPFS(ctx) - return nil - } - - lc.retry(0, call) - - return s -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/lbclient_test.go b/packages/networking/ipfs-cluster/api/rest/client/lbclient_test.go deleted file mode 100644 index e0455c4..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/lbclient_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package client - -import ( - "context" - "fmt" - "sync" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - ma "github.com/multiformats/go-multiaddr" -) - -func TestFailoverConcurrently(t *testing.T) { - // Create a load balancing client with 5 empty clients and 5 clients with APIs - // say we want to retry the request for at most 5 times - cfgs := make([]*Config, 10) - - // 5 clients with an invalid api address - for i := 0; i < 5; i++ { - maddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - cfgs[i] = &Config{ - APIAddr: maddr, - DisableKeepAlives: true, - } - } - - // 5 clients with APIs - for i := 5; i < 10; i++ { - cfgs[i] = &Config{ - APIAddr: apiMAddr(testAPI(t)), - DisableKeepAlives: true, - } - } - - // Run many requests at the same time - - // With Failover strategy, it would go through first 5 empty clients - // and then 6th working client. Thus, all requests should always succeed. - testRunManyRequestsConcurrently(t, cfgs, &Failover{}, 200, 6, true) - // First 5 clients are empty. Thus, all requests should fail. - testRunManyRequestsConcurrently(t, cfgs, &Failover{}, 200, 5, false) -} - -type dummyClient struct { - defaultClient - i int -} - -// ID returns dummy client's serial number. -func (d *dummyClient) ID(ctx context.Context) (api.ID, error) { - return api.ID{ - Peername: fmt.Sprintf("%d", d.i), - }, nil -} - -func TestRoundRobin(t *testing.T) { - var clients []Client - // number of clients - n := 5 - // create n dummy clients - for i := 0; i < n; i++ { - c := &dummyClient{ - i: i, - } - clients = append(clients, c) - } - - roundRobin := loadBalancingClient{ - strategy: &RoundRobin{ - clients: clients, - length: uint32(len(clients)), - }, - } - - // clients should be used in the sequence 1, 2,.., 4, 0. - for i := 0; i < n; i++ { - id, _ := roundRobin.ID(context.Background()) - if id.Peername != fmt.Sprintf("%d", (i+1)%n) { - t.Errorf("clients are not being tried in sequence, expected client: %d, but found: %s", i, id.Peername) - } - } - -} - -func testRunManyRequestsConcurrently(t *testing.T, cfgs []*Config, strategy LBStrategy, requests int, retries int, pass bool) { - c, err := NewLBClient(strategy, cfgs, retries) - if err != nil { - t.Fatal(err) - } - - var wg sync.WaitGroup - for i := 0; i < requests; i++ { - wg.Add(1) - go func() { - defer wg.Done() - ctx := context.Background() - _, err := c.ID(ctx) - if err != nil && pass { - t.Error(err) - } - if err == nil && !pass { - t.Error("request should fail with connection refusal") - } - }() - } - wg.Wait() -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/methods.go b/packages/networking/ipfs-cluster/api/rest/client/methods.go deleted file mode 100644 index 8745b59..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/methods.go +++ /dev/null @@ -1,699 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - files "github.com/ipfs/go-ipfs-files" - gopath "github.com/ipfs/go-path" - peer "github.com/libp2p/go-libp2p/core/peer" - - "go.opencensus.io/trace" -) - -// ID returns information about the cluster Peer. -func (c *defaultClient) ID(ctx context.Context) (api.ID, error) { - ctx, span := trace.StartSpan(ctx, "client/ID") - defer span.End() - - var id api.ID - err := c.do(ctx, "GET", "/id", nil, nil, &id) - return id, err -} - -// Peers requests ID information for all cluster peers. -func (c *defaultClient) Peers(ctx context.Context, out chan<- api.ID) error { - defer close(out) - - ctx, span := trace.StartSpan(ctx, "client/Peers") - defer span.End() - - handler := func(dec *json.Decoder) error { - var obj api.ID - err := dec.Decode(&obj) - if err != nil { - return err - } - out <- obj - return nil - } - - return c.doStream(ctx, "GET", "/peers", nil, nil, handler) - -} - -type peerAddBody struct { - PeerID string `json:"peer_id"` -} - -// PeerAdd adds a new peer to the cluster. -func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) { - ctx, span := trace.StartSpan(ctx, "client/PeerAdd") - defer span.End() - - body := peerAddBody{pid.String()} - - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - enc.Encode(body) - - var id api.ID - err := c.do(ctx, "POST", "/peers", nil, &buf, &id) - return id, err -} - -// PeerRm removes a current peer from the cluster -func (c *defaultClient) PeerRm(ctx context.Context, id peer.ID) error { - ctx, span := trace.StartSpan(ctx, "client/PeerRm") - defer span.End() - - return c.do(ctx, "DELETE", fmt.Sprintf("/peers/%s", id.Pretty()), nil, nil, nil) -} - -// Pin tracks a Cid with the given replication factor and a name for -// human-friendliness. -func (c *defaultClient) Pin(ctx context.Context, ci api.Cid, opts api.PinOptions) (api.Pin, error) { - ctx, span := trace.StartSpan(ctx, "client/Pin") - defer span.End() - - query, err := opts.ToQuery() - if err != nil { - return api.Pin{}, err - } - var pin api.Pin - err = c.do( - ctx, - "POST", - fmt.Sprintf( - "/pins/%s?%s", - ci.String(), - query, - ), - nil, - nil, - &pin, - ) - return pin, err -} - -// Unpin untracks a Cid from cluster. -func (c *defaultClient) Unpin(ctx context.Context, ci api.Cid) (api.Pin, error) { - ctx, span := trace.StartSpan(ctx, "client/Unpin") - defer span.End() - var pin api.Pin - err := c.do(ctx, "DELETE", fmt.Sprintf("/pins/%s", ci.String()), nil, nil, &pin) - return pin, err -} - -// PinPath allows to pin an element by the given IPFS path. -func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) { - ctx, span := trace.StartSpan(ctx, "client/PinPath") - defer span.End() - - var pin api.Pin - ipfspath, err := gopath.ParsePath(path) - if err != nil { - return api.Pin{}, err - } - query, err := opts.ToQuery() - if err != nil { - return api.Pin{}, err - } - err = c.do( - ctx, - "POST", - fmt.Sprintf( - "/pins%s?%s", - ipfspath.String(), - query, - ), - nil, - nil, - &pin, - ) - - return pin, err -} - -// UnpinPath allows to unpin an item by providing its IPFS path. -// It returns the unpinned api.Pin information of the resolved Cid. -func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error) { - ctx, span := trace.StartSpan(ctx, "client/UnpinPath") - defer span.End() - - var pin api.Pin - ipfspath, err := gopath.ParsePath(p) - if err != nil { - return api.Pin{}, err - } - - err = c.do(ctx, "DELETE", fmt.Sprintf("/pins%s", ipfspath.String()), nil, nil, &pin) - return pin, err -} - -// Allocations returns the consensus state listing all tracked items and -// the peers that should be pinning them. -func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error { - defer close(out) - - ctx, span := trace.StartSpan(ctx, "client/Allocations") - defer span.End() - - types := []api.PinType{ - api.DataType, - api.MetaType, - api.ClusterDAGType, - api.ShardType, - } - - var strFilter []string - - if filter == api.AllType { - strFilter = []string{"all"} - } else { - for _, t := range types { - if t&filter > 0 { // the filter includes this type - strFilter = append(strFilter, t.String()) - } - } - } - - handler := func(dec *json.Decoder) error { - var obj api.Pin - err := dec.Decode(&obj) - if err != nil { - return err - } - out <- obj - return nil - } - - f := url.QueryEscape(strings.Join(strFilter, ",")) - return c.doStream( - ctx, - "GET", - fmt.Sprintf("/allocations?filter=%s", f), - nil, - nil, - handler) -} - -// Allocation returns the current allocations for a given Cid. -func (c *defaultClient) Allocation(ctx context.Context, ci api.Cid) (api.Pin, error) { - ctx, span := trace.StartSpan(ctx, "client/Allocation") - defer span.End() - - var pin api.Pin - err := c.do(ctx, "GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin) - return pin, err -} - -// Status returns the current ipfs state for a given Cid. If local is true, -// the information affects only the current peer, otherwise the information -// is fetched from all cluster peers. -func (c *defaultClient) Status(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) { - ctx, span := trace.StartSpan(ctx, "client/Status") - defer span.End() - - var gpi api.GlobalPinInfo - err := c.do( - ctx, - "GET", - fmt.Sprintf("/pins/%s?local=%t", ci.String(), local), - nil, - nil, - &gpi, - ) - return gpi, err -} - -// StatusCids returns Status() information for the given Cids. If local is -// true, the information affects only the current peer, otherwise the -// information is fetched from all cluster peers. -func (c *defaultClient) StatusCids(ctx context.Context, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error { - return c.statusAllWithCids(ctx, api.TrackerStatusUndefined, cids, local, out) -} - -// StatusAll gathers Status() for all tracked items. If a filter is -// provided, only entries matching the given filter statuses -// will be returned. A filter can be built by merging TrackerStatuses with -// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or -// api.TrackerStatusUndefined), means all. -func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error { - return c.statusAllWithCids(ctx, filter, nil, local, out) -} - -func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.TrackerStatus, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error { - defer close(out) - ctx, span := trace.StartSpan(ctx, "client/StatusAll") - defer span.End() - - filterStr := "" - if filter != api.TrackerStatusUndefined { // undefined filter means "all" - filterStr = filter.String() - if filterStr == "" { - return errors.New("invalid filter value") - } - } - - cidsStr := make([]string, len(cids)) - for i, c := range cids { - cidsStr[i] = c.String() - } - - handler := func(dec *json.Decoder) error { - var obj api.GlobalPinInfo - err := dec.Decode(&obj) - if err != nil { - return err - } - out <- obj - return nil - } - - return c.doStream( - ctx, - "GET", - fmt.Sprintf("/pins?local=%t&filter=%s&cids=%s", - local, url.QueryEscape(filterStr), strings.Join(cidsStr, ",")), - nil, - nil, - handler, - ) -} - -// Recover retriggers pin or unpin ipfs operations for a Cid in error state. -// If local is true, the operation is limited to the current peer, otherwise -// it happens on every cluster peer. -func (c *defaultClient) Recover(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) { - ctx, span := trace.StartSpan(ctx, "client/Recover") - defer span.End() - - var gpi api.GlobalPinInfo - err := c.do(ctx, "POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi) - return gpi, err -} - -// RecoverAll triggers Recover() operations on all tracked items. If local is -// true, the operation is limited to the current peer. Otherwise, it happens -// everywhere. -func (c *defaultClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error { - defer close(out) - - ctx, span := trace.StartSpan(ctx, "client/RecoverAll") - defer span.End() - - handler := func(dec *json.Decoder) error { - var obj api.GlobalPinInfo - err := dec.Decode(&obj) - if err != nil { - return err - } - out <- obj - return nil - } - - return c.doStream( - ctx, - "POST", - fmt.Sprintf("/pins/recover?local=%t", local), - nil, - nil, - handler) -} - -// Alerts returns information health events in the cluster (expired metrics -// etc.). -func (c *defaultClient) Alerts(ctx context.Context) ([]api.Alert, error) { - ctx, span := trace.StartSpan(ctx, "client/Alert") - defer span.End() - - var alerts []api.Alert - err := c.do(ctx, "GET", "/health/alerts", nil, nil, &alerts) - return alerts, err -} - -// Version returns the ipfs-cluster peer's version. -func (c *defaultClient) Version(ctx context.Context) (api.Version, error) { - ctx, span := trace.StartSpan(ctx, "client/Version") - defer span.End() - - var ver api.Version - err := c.do(ctx, "GET", "/version", nil, nil, &ver) - return ver, err -} - -// GetConnectGraph returns an ipfs-cluster connection graph. -// The serialized version, strings instead of pids, is returned -func (c *defaultClient) GetConnectGraph(ctx context.Context) (api.ConnectGraph, error) { - ctx, span := trace.StartSpan(ctx, "client/GetConnectGraph") - defer span.End() - - var graph api.ConnectGraph - err := c.do(ctx, "GET", "/health/graph", nil, nil, &graph) - return graph, err -} - -// Metrics returns a map with the latest valid metrics of the given name -// for the current cluster peers. -func (c *defaultClient) Metrics(ctx context.Context, name string) ([]api.Metric, error) { - ctx, span := trace.StartSpan(ctx, "client/Metrics") - defer span.End() - - if name == "" { - return nil, errors.New("bad metric name") - } - var metrics []api.Metric - err := c.do(ctx, "GET", fmt.Sprintf("/monitor/metrics/%s", name), nil, nil, &metrics) - return metrics, err -} - -// MetricNames lists names of all metrics. -func (c *defaultClient) MetricNames(ctx context.Context) ([]string, error) { - ctx, span := trace.StartSpan(ctx, "client/MetricNames") - defer span.End() - - var metricsNames []string - err := c.do(ctx, "GET", "/monitor/metrics", nil, nil, &metricsNames) - return metricsNames, err -} - -// RepoGC runs garbage collection on IPFS daemons of cluster peers and -// returns collected CIDs. If local is true, it would garbage collect -// only on contacted peer, otherwise on all peers' IPFS daemons. -func (c *defaultClient) RepoGC(ctx context.Context, local bool) (api.GlobalRepoGC, error) { - ctx, span := trace.StartSpan(ctx, "client/RepoGC") - defer span.End() - - var repoGC api.GlobalRepoGC - err := c.do( - ctx, - "POST", - fmt.Sprintf("/ipfs/gc?local=%t", local), - nil, - nil, - &repoGC, - ) - - return repoGC, err -} - -// WaitFor is a utility function that allows for a caller to wait until a CID -// status target is reached (as given in StatusFilterParams). -// It returns the final status for that CID and an error, if there was one. -// -// WaitFor works by calling Status() repeatedly and checking that returned -// peers have transitioned to the target TrackerStatus. It immediately returns -// an error when the an error is among the statuses (and an empty -// GlobalPinInfo). -// -// A special case exists for TrackerStatusPinned targets: in this case, -// TrackerStatusRemote statuses are ignored, so WaitFor will return when -// all Statuses are Pinned or Remote by default. -// -// The Limit parameter allows to specify finer-grained control to, for -// example, only wait until a number of peers reaches a status. -func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPinInfo, error) { - ctx, span := trace.StartSpan(ctx, "client/WaitFor") - defer span.End() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - sf := newStatusFilter() - - go sf.pollStatus(ctx, c, fp) - go sf.filter(ctx, fp) - - var status api.GlobalPinInfo - - for { - select { - case <-ctx.Done(): - return status, ctx.Err() - case err := <-sf.Err: - return status, err - case st, ok := <-sf.Out: - if !ok { // channel closed - return status, nil - } - status = st - } - } -} - -// StatusFilterParams contains the parameters required -// to filter a stream of status results. -type StatusFilterParams struct { - Cid api.Cid - Local bool // query status from the local peer only - Target api.TrackerStatus - Limit int // wait for N peers reaching status. 0 == all - CheckFreq time.Duration -} - -type statusFilter struct { - In, Out chan api.GlobalPinInfo - Done chan struct{} - Err chan error -} - -func newStatusFilter() *statusFilter { - return &statusFilter{ - In: make(chan api.GlobalPinInfo), - Out: make(chan api.GlobalPinInfo), - Done: make(chan struct{}), - Err: make(chan error), - } -} - -func (sf *statusFilter) filter(ctx context.Context, fp StatusFilterParams) { - defer close(sf.Done) - defer close(sf.Out) - - for { - select { - case <-ctx.Done(): - sf.Err <- ctx.Err() - return - case gblPinInfo, more := <-sf.In: - if !more { - return - } - ok, err := statusReached(fp.Target, gblPinInfo, fp.Limit) - if err != nil { - sf.Err <- err - return - } - - sf.Out <- gblPinInfo - if !ok { - continue - } - return - } - } -} - -func (sf *statusFilter) pollStatus(ctx context.Context, c Client, fp StatusFilterParams) { - ticker := time.NewTicker(fp.CheckFreq) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - sf.Err <- ctx.Err() - return - case <-ticker.C: - gblPinInfo, err := c.Status(ctx, fp.Cid, fp.Local) - if err != nil { - sf.Err <- err - return - } - logger.Debugf("pollStatus: status: %#v", gblPinInfo) - sf.In <- gblPinInfo - case <-sf.Done: - close(sf.In) - return - } - } -} - -func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo, limit int) (bool, error) { - // Specific case: return error if there are errors - for _, pinInfo := range gblPinInfo.PeerMap { - switch pinInfo.Status { - case api.TrackerStatusUndefined, - api.TrackerStatusClusterError, - api.TrackerStatusPinError, - api.TrackerStatusUnpinError: - return false, fmt.Errorf("error has occurred while attempting to reach status: %s", target.String()) - } - } - - // Specific case: when limit it set, just count how many targets we - // reached. - if limit > 0 { - total := 0 - for _, pinInfo := range gblPinInfo.PeerMap { - if pinInfo.Status == target { - total++ - } - } - return total >= limit, nil - } - - // General case: all statuses should be the target. - // Specific case: when looking for Pinned, ignore status remote. - for _, pinInfo := range gblPinInfo.PeerMap { - if pinInfo.Status == api.TrackerStatusRemote && target == api.TrackerStatusPinned { - continue - } - if pinInfo.Status == target { - continue - } - return false, nil - } - - // All statuses are the target, as otherwise we would have returned - // false. - return true, nil -} - -// logic drawn from go-ipfs-cmds/cli/parse.go: appendFile -func makeSerialFile(fpath string, params api.AddParams) (string, files.Node, error) { - if fpath == "." { - cwd, err := os.Getwd() - if err != nil { - return "", nil, err - } - cwd, err = filepath.EvalSymlinks(cwd) - if err != nil { - return "", nil, err - } - fpath = cwd - } - - fpath = filepath.ToSlash(filepath.Clean(fpath)) - - stat, err := os.Lstat(fpath) - if err != nil { - return "", nil, err - } - - if stat.IsDir() { - if !params.Recursive { - return "", nil, fmt.Errorf("%s is a directory, but Recursive option is not set", fpath) - } - } - - sf, err := files.NewSerialFile(fpath, params.Hidden, stat) - return path.Base(fpath), sf, err -} - -// Add imports files to the cluster from the given paths. A path can -// either be a local filesystem location or an web url (http:// or https://). -// In the latter case, the destination will be downloaded with a GET request. -// The AddParams allow to control different options, like enabling the -// sharding the resulting DAG across the IPFS daemons of multiple cluster -// peers. The output channel will receive regular updates as the adding -// process progresses. -func (c *defaultClient) Add( - ctx context.Context, - paths []string, - params api.AddParams, - out chan<- api.AddedOutput, -) error { - ctx, span := trace.StartSpan(ctx, "client/Add") - defer span.End() - - addFiles := make([]files.DirEntry, len(paths)) - for i, p := range paths { - u, err := url.Parse(p) - if err != nil { - close(out) - return fmt.Errorf("error parsing path: %s", err) - } - var name string - var addFile files.Node - if strings.HasPrefix(u.Scheme, "http") { - addFile = files.NewWebFile(u) - name = path.Base(u.Path) - } else { - if params.NoCopy { - close(out) - return fmt.Errorf("nocopy option is only valid for URLs") - } - name, addFile, err = makeSerialFile(p, params) - if err != nil { - close(out) - return err - } - } - addFiles[i] = files.FileEntry(name, addFile) - } - - sliceFile := files.NewSliceDirectory(addFiles) - // If `form` is set to true, the multipart data will have - // a Content-Type of 'multipart/form-data', if `form` is false, - // the Content-Type will be 'multipart/mixed'. - return c.AddMultiFile(ctx, files.NewMultiFileReader(sliceFile, true), params, out) -} - -// AddMultiFile imports new files from a MultiFileReader. See Add(). -func (c *defaultClient) AddMultiFile( - ctx context.Context, - multiFileR *files.MultiFileReader, - params api.AddParams, - out chan<- api.AddedOutput, -) error { - ctx, span := trace.StartSpan(ctx, "client/AddMultiFile") - defer span.End() - - defer close(out) - - headers := make(map[string]string) - headers["Content-Type"] = "multipart/form-data; boundary=" + multiFileR.Boundary() - - // This method must run with StreamChannels set. - params.StreamChannels = true - queryStr, err := params.ToQueryString() - if err != nil { - return err - } - - // our handler decodes an AddedOutput and puts it - // in the out channel. - handler := func(dec *json.Decoder) error { - if out == nil { - return nil - } - var obj api.AddedOutput - err := dec.Decode(&obj) - if err != nil { - return err - } - out <- obj - return nil - } - - err = c.doStream(ctx, - "POST", - "/add?"+queryStr, - headers, - multiFileR, - handler, - ) - return err -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/methods_test.go b/packages/networking/ipfs-cluster/api/rest/client/methods_test.go deleted file mode 100644 index 27badf2..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/methods_test.go +++ /dev/null @@ -1,905 +0,0 @@ -package client - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - types "github.com/ipfs-cluster/ipfs-cluster/api" - rest "github.com/ipfs-cluster/ipfs-cluster/api/rest" - test "github.com/ipfs-cluster/ipfs-cluster/test" - - rpc "github.com/libp2p/go-libp2p-gorpc" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -func testClients(t *testing.T, api *rest.API, f func(*testing.T, Client)) { - t.Run("in-parallel", func(t *testing.T) { - t.Run("libp2p", func(t *testing.T) { - t.Parallel() - f(t, testClientLibp2p(t, api)) - }) - t.Run("http", func(t *testing.T) { - t.Parallel() - f(t, testClientHTTP(t, api)) - }) - }) -} - -func TestVersion(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - v, err := c.Version(ctx) - if err != nil || v.Version == "" { - t.Logf("%+v", v) - t.Log(err) - t.Error("expected something in version") - } - } - - testClients(t, api, testF) -} - -func TestID(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - id, err := c.ID(ctx) - if err != nil { - t.Fatal(err) - } - if id.ID == "" { - t.Error("bad id") - } - } - - testClients(t, api, testF) -} - -func TestPeers(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - out := make(chan types.ID, 10) - err := c.Peers(ctx, out) - if err != nil { - t.Fatal(err) - } - if len(out) == 0 { - t.Error("expected some peers") - } - } - - testClients(t, api, testF) -} - -func TestPeersWithError(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/44444") - var _ = c - c, _ = NewDefaultClient(&Config{APIAddr: addr, DisableKeepAlives: true}) - out := make(chan types.ID, 10) - err := c.Peers(ctx, out) - if err == nil { - t.Fatal("expected error") - } - if len(out) > 0 { - t.Fatal("expected no ids") - } - } - - testClients(t, api, testF) -} - -func TestPeerAdd(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - id, err := c.PeerAdd(ctx, test.PeerID1) - if err != nil { - t.Fatal(err) - } - if id.ID != test.PeerID1 { - t.Error("bad peer") - } - } - - testClients(t, api, testF) -} - -func TestPeerRm(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - err := c.PeerRm(ctx, test.PeerID1) - if err != nil { - t.Fatal(err) - } - } - - testClients(t, api, testF) -} - -func TestPin(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - opts := types.PinOptions{ - ReplicationFactorMin: 6, - ReplicationFactorMax: 7, - Name: "hello there", - } - _, err := c.Pin(ctx, test.Cid1, opts) - if err != nil { - t.Fatal(err) - } - } - - testClients(t, api, testF) -} - -func TestUnpin(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - _, err := c.Unpin(ctx, test.Cid1) - if err != nil { - t.Fatal(err) - } - } - - testClients(t, api, testF) -} - -type pathCase struct { - path string - wantErr bool - expectedCid string -} - -var pathTestCases = []pathCase{ - { - test.CidResolved.String(), - false, - test.CidResolved.String(), - }, - { - test.PathIPFS1, - false, - "QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY", - }, - { - test.PathIPFS2, - false, - test.CidResolved.String(), - }, - { - test.PathIPNS1, - false, - test.CidResolved.String(), - }, - { - test.PathIPLD1, - false, - "QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY", - }, - { - test.InvalidPath1, - true, - "", - }, -} - -func TestPinPath(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - opts := types.PinOptions{ - ReplicationFactorMin: 6, - ReplicationFactorMax: 7, - Name: "hello there", - UserAllocations: []peer.ID{test.PeerID1, test.PeerID2}, - } - - testF := func(t *testing.T, c Client) { - for _, testCase := range pathTestCases { - ec, _ := types.DecodeCid(testCase.expectedCid) - resultantPin := types.PinWithOpts(ec, opts) - p := testCase.path - pin, err := c.PinPath(ctx, p, opts) - if err != nil { - if testCase.wantErr { - continue - } - t.Fatalf("unexpected error %s: %s", p, err) - } - - if !pin.Equals(resultantPin) { - t.Errorf("expected different pin: %s", p) - t.Errorf("expected: %+v", resultantPin) - t.Errorf("actual: %+v", pin) - } - - } - } - - testClients(t, api, testF) -} - -func TestUnpinPath(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - for _, testCase := range pathTestCases { - p := testCase.path - pin, err := c.UnpinPath(ctx, p) - if err != nil { - if testCase.wantErr { - continue - } - t.Fatalf("unepected error %s: %s", p, err) - } - - if pin.Cid.String() != testCase.expectedCid { - t.Errorf("bad resolved Cid: %s, %s", p, pin.Cid) - } - } - } - - testClients(t, api, testF) -} - -func TestAllocations(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - pins := make(chan types.Pin) - n := 0 - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for range pins { - n++ - } - }() - - err := c.Allocations(ctx, types.DataType|types.MetaType, pins) - if err != nil { - t.Fatal(err) - } - - wg.Wait() - if n == 0 { - t.Error("should be some pins") - } - } - - testClients(t, api, testF) -} - -func TestAllocation(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - pin, err := c.Allocation(ctx, test.Cid1) - if err != nil { - t.Fatal(err) - } - if !pin.Cid.Equals(test.Cid1) { - t.Error("should be same pin") - } - } - - testClients(t, api, testF) -} - -func TestStatus(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - pin, err := c.Status(ctx, test.Cid1, false) - if err != nil { - t.Fatal(err) - } - if !pin.Cid.Equals(test.Cid1) { - t.Error("should be same pin") - } - } - - testClients(t, api, testF) -} - -func TestStatusCids(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - out := make(chan types.GlobalPinInfo) - - go func() { - err := c.StatusCids(ctx, []types.Cid{test.Cid1}, false, out) - if err != nil { - t.Error(err) - } - }() - - pins := collectGlobalPinInfos(t, out) - if len(pins) != 1 { - t.Fatal("wrong number of pins returned") - } - if !pins[0].Cid.Equals(test.Cid1) { - t.Error("should be same pin") - } - } - - testClients(t, api, testF) -} - -func collectGlobalPinInfos(t *testing.T, out <-chan types.GlobalPinInfo) []types.GlobalPinInfo { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var gpis []types.GlobalPinInfo - for { - select { - case <-ctx.Done(): - t.Error(ctx.Err()) - return gpis - case gpi, ok := <-out: - if !ok { - return gpis - } - gpis = append(gpis, gpi) - } - } -} - -func TestStatusAll(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - out := make(chan types.GlobalPinInfo) - go func() { - err := c.StatusAll(ctx, 0, false, out) - if err != nil { - t.Error(err) - } - }() - pins := collectGlobalPinInfos(t, out) - - if len(pins) == 0 { - t.Error("there should be some pins") - } - - out2 := make(chan types.GlobalPinInfo) - go func() { - err := c.StatusAll(ctx, 0, true, out2) - if err != nil { - t.Error(err) - } - }() - pins = collectGlobalPinInfos(t, out2) - - if len(pins) != 2 { - t.Error("there should be two pins") - } - - out3 := make(chan types.GlobalPinInfo) - go func() { - err := c.StatusAll(ctx, types.TrackerStatusPinning, false, out3) - if err != nil { - t.Error(err) - } - }() - pins = collectGlobalPinInfos(t, out3) - - if len(pins) != 1 { - t.Error("there should be one pin") - } - - out4 := make(chan types.GlobalPinInfo) - go func() { - err := c.StatusAll(ctx, types.TrackerStatusPinned|types.TrackerStatusError, false, out4) - if err != nil { - t.Error(err) - } - }() - pins = collectGlobalPinInfos(t, out4) - - if len(pins) != 2 { - t.Error("there should be two pins") - } - - out5 := make(chan types.GlobalPinInfo, 1) - err := c.StatusAll(ctx, 1<<25, false, out5) - if err == nil { - t.Error("expected an error") - } - } - - testClients(t, api, testF) -} - -func TestRecover(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - pin, err := c.Recover(ctx, test.Cid1, false) - if err != nil { - t.Fatal(err) - } - if !pin.Cid.Equals(test.Cid1) { - t.Error("should be same pin") - } - } - - testClients(t, api, testF) -} - -func TestRecoverAll(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - out := make(chan types.GlobalPinInfo, 10) - err := c.RecoverAll(ctx, true, out) - if err != nil { - t.Fatal(err) - } - - out2 := make(chan types.GlobalPinInfo, 10) - err = c.RecoverAll(ctx, false, out2) - if err != nil { - t.Fatal(err) - } - } - - testClients(t, api, testF) -} - -func TestAlerts(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - alerts, err := c.Alerts(ctx) - if err != nil { - t.Fatal(err) - } - if len(alerts) != 1 { - t.Fatal("expected 1 alert") - } - pID2 := test.PeerID2.String() - if alerts[0].Peer != test.PeerID2 { - t.Errorf("expected an alert from %s", pID2) - } - } - - testClients(t, api, testF) -} - -func TestGetConnectGraph(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - cg, err := c.GetConnectGraph(ctx) - if err != nil { - t.Fatal(err) - } - if len(cg.IPFSLinks) != 3 || len(cg.ClusterLinks) != 3 || - len(cg.ClustertoIPFS) != 3 { - t.Fatal("Bad graph") - } - } - - testClients(t, api, testF) -} - -func TestMetrics(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - m, err := c.Metrics(ctx, "somemetricstype") - if err != nil { - t.Fatal(err) - } - - if len(m) == 0 { - t.Fatal("No metrics found") - } - } - - testClients(t, api, testF) -} - -func TestMetricNames(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - m, err := c.MetricNames(ctx) - if err != nil { - t.Fatal(err) - } - - if len(m) == 0 { - t.Fatal("No metric names found") - } - } - - testClients(t, api, testF) -} - -type waitService struct { - l sync.Mutex - pinStart time.Time -} - -func (wait *waitService) Pin(ctx context.Context, in types.Pin, out *types.Pin) error { - wait.l.Lock() - defer wait.l.Unlock() - wait.pinStart = time.Now() - *out = in - return nil -} - -func (wait *waitService) Status(ctx context.Context, in types.Cid, out *types.GlobalPinInfo) error { - wait.l.Lock() - defer wait.l.Unlock() - if time.Now().After(wait.pinStart.Add(5 * time.Second)) { //pinned - *out = types.GlobalPinInfo{ - Cid: in, - PeerMap: map[string]types.PinInfoShort{ - test.PeerID1.String(): { - Status: types.TrackerStatusPinned, - TS: wait.pinStart, - }, - test.PeerID2.String(): { - Status: types.TrackerStatusPinned, - TS: wait.pinStart, - }, - test.PeerID3.String(): { - Status: types.TrackerStatusPinning, - TS: wait.pinStart, - }, - test.PeerID3.String(): { - Status: types.TrackerStatusRemote, - TS: wait.pinStart, - }, - }, - } - } else { // pinning - *out = types.GlobalPinInfo{ - Cid: in, - PeerMap: map[string]types.PinInfoShort{ - test.PeerID1.String(): { - Status: types.TrackerStatusPinning, - TS: wait.pinStart, - }, - test.PeerID2.String(): { - Status: types.TrackerStatusPinned, - TS: wait.pinStart, - }, - test.PeerID3.String(): { - Status: types.TrackerStatusPinning, - TS: wait.pinStart, - }, - test.PeerID3.String(): { - Status: types.TrackerStatusRemote, - TS: wait.pinStart, - }, - }, - } - } - - return nil -} - -func (wait *waitService) PinGet(ctx context.Context, in types.Cid, out *types.Pin) error { - p := types.PinCid(in) - p.ReplicationFactorMin = 2 - p.ReplicationFactorMax = 3 - *out = p - return nil -} - -type waitServiceUnpin struct { - l sync.Mutex - unpinStart time.Time -} - -func (wait *waitServiceUnpin) Unpin(ctx context.Context, in types.Pin, out *types.Pin) error { - wait.l.Lock() - defer wait.l.Unlock() - wait.unpinStart = time.Now() - return nil -} - -func (wait *waitServiceUnpin) Status(ctx context.Context, in types.Cid, out *types.GlobalPinInfo) error { - wait.l.Lock() - defer wait.l.Unlock() - if time.Now().After(wait.unpinStart.Add(5 * time.Second)) { //unpinned - *out = types.GlobalPinInfo{ - Cid: in, - PeerMap: map[string]types.PinInfoShort{ - test.PeerID1.String(): { - Status: types.TrackerStatusUnpinned, - TS: wait.unpinStart, - }, - test.PeerID2.String(): { - Status: types.TrackerStatusUnpinned, - TS: wait.unpinStart, - }, - }, - } - } else { // pinning - *out = types.GlobalPinInfo{ - Cid: in, - PeerMap: map[string]types.PinInfoShort{ - test.PeerID1.String(): { - Status: types.TrackerStatusUnpinning, - TS: wait.unpinStart, - }, - test.PeerID2.String(): { - Status: types.TrackerStatusUnpinning, - TS: wait.unpinStart, - }, - }, - } - } - - return nil -} - -func (wait *waitServiceUnpin) PinGet(ctx context.Context, in types.Cid, out *types.Pin) error { - return errors.New("not found") -} - -func TestWaitForPin(t *testing.T) { - ctx := context.Background() - tapi := testAPI(t) - defer shutdown(tapi) - - rpcS := rpc.NewServer(nil, "wait") - rpcC := rpc.NewClientWithServer(nil, "wait", rpcS) - err := rpcS.RegisterName("Cluster", &waitService{}) - if err != nil { - t.Fatal(err) - } - - tapi.SetClient(rpcC) - - testF := func(t *testing.T, c Client) { - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - fp := StatusFilterParams{ - Cid: test.Cid1, - Local: false, - Target: types.TrackerStatusPinned, - CheckFreq: time.Second, - } - start := time.Now() - - st, err := WaitFor(ctx, c, fp) - if err != nil { - t.Error(err) - return - } - if time.Since(start) <= 5*time.Second { - t.Error("slow pin should have taken at least 5 seconds") - return - } - - totalPinned := 0 - for _, pi := range st.PeerMap { - if pi.Status == types.TrackerStatusPinned { - totalPinned++ - } - } - if totalPinned < 2 { // repl factor min - t.Error("pin info should show the item is pinnedin two places at least") - } - }() - _, err := c.Pin(ctx, test.Cid1, types.PinOptions{ReplicationFactorMin: 0, ReplicationFactorMax: 0, Name: "test", ShardSize: 0}) - if err != nil { - t.Fatal(err) - } - wg.Wait() - } - - testClients(t, tapi, testF) -} - -func TestWaitForUnpin(t *testing.T) { - ctx := context.Background() - tapi := testAPI(t) - defer shutdown(tapi) - - rpcS := rpc.NewServer(nil, "wait") - rpcC := rpc.NewClientWithServer(nil, "wait", rpcS) - err := rpcS.RegisterName("Cluster", &waitServiceUnpin{}) - if err != nil { - t.Fatal(err) - } - - tapi.SetClient(rpcC) - - testF := func(t *testing.T, c Client) { - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - fp := StatusFilterParams{ - Cid: test.Cid1, - Local: false, - Target: types.TrackerStatusUnpinned, - CheckFreq: time.Second, - } - start := time.Now() - - st, err := WaitFor(ctx, c, fp) - if err != nil { - t.Error(err) - return - } - if time.Since(start) <= 5*time.Second { - t.Error("slow unpin should have taken at least 5 seconds") - return - } - - for _, pi := range st.PeerMap { - if pi.Status != types.TrackerStatusUnpinned { - t.Error("the item should have been unpinned everywhere") - } - } - }() - _, err := c.Unpin(ctx, test.Cid1) - if err != nil { - t.Fatal(err) - } - wg.Wait() - } - - testClients(t, tapi, testF) -} - -func TestAddMultiFile(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer api.Shutdown(ctx) - - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - testF := func(t *testing.T, c Client) { - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - - p := types.AddParams{ - PinOptions: types.PinOptions{ - ReplicationFactorMin: -1, - ReplicationFactorMax: -1, - Name: "test something", - ShardSize: 1024, - }, - Shard: false, - Format: "", - IPFSAddParams: types.IPFSAddParams{ - Chunker: "", - RawLeaves: false, - }, - Hidden: false, - StreamChannels: true, - } - - out := make(chan types.AddedOutput, 1) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for v := range out { - t.Logf("output: Name: %s. Hash: %s", v.Name, v.Cid) - } - }() - - err := c.AddMultiFile(ctx, mfr, p, out) - if err != nil { - t.Fatal(err) - } - - wg.Wait() - } - - testClients(t, api, testF) -} - -func TestRepoGC(t *testing.T) { - ctx := context.Background() - api := testAPI(t) - defer shutdown(api) - - testF := func(t *testing.T, c Client) { - globalGC, err := c.RepoGC(ctx, false) - if err != nil { - t.Fatal(err) - } - - if globalGC.PeerMap == nil { - t.Fatal("expected a non-nil peer map") - } - - for _, gc := range globalGC.PeerMap { - if gc.Peer == "" { - t.Error("bad id") - } - if gc.Error != "" { - t.Error("did not expect any error") - } - if gc.Keys == nil { - t.Error("expected a non-nil array of IPFSRepoGC") - } else { - if !gc.Keys[0].Key.Equals(test.Cid1) { - t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, gc.Keys[0].Key) - } - } - } - } - - testClients(t, api, testF) -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/request.go b/packages/networking/ipfs-cluster/api/rest/client/request.go deleted file mode 100644 index a08e610..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/request.go +++ /dev/null @@ -1,170 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "errors" - "io" - "net/http" - "strings" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "go.uber.org/multierr" - - "go.opencensus.io/trace" -) - -type responseDecoder func(d *json.Decoder) error - -func (c *defaultClient) do( - ctx context.Context, - method, path string, - headers map[string]string, - body io.Reader, - obj interface{}, -) error { - - resp, err := c.doRequest(ctx, method, path, headers, body) - if err != nil { - return api.Error{Code: 0, Message: err.Error()} - } - return c.handleResponse(resp, obj) -} - -func (c *defaultClient) doStream( - ctx context.Context, - method, path string, - headers map[string]string, - body io.Reader, - outHandler responseDecoder, -) error { - - resp, err := c.doRequest(ctx, method, path, headers, body) - if err != nil { - return api.Error{Code: 0, Message: err.Error()} - } - return c.handleStreamResponse(resp, outHandler) -} - -func (c *defaultClient) doRequest( - ctx context.Context, - method, path string, - headers map[string]string, - body io.Reader, -) (*http.Response, error) { - span := trace.FromContext(ctx) - span.AddAttributes( - trace.StringAttribute("method", method), - trace.StringAttribute("path", path), - ) - defer span.End() - - urlpath := c.net + "://" + c.hostname + "/" + strings.TrimPrefix(path, "/") - logger.Debugf("%s: %s", method, urlpath) - - r, err := http.NewRequestWithContext(ctx, method, urlpath, body) - if err != nil { - return nil, err - } - if c.config.DisableKeepAlives { - r.Close = true - } - - if c.config.Username != "" { - r.SetBasicAuth(c.config.Username, c.config.Password) - } - - for k, v := range headers { - r.Header.Set(k, v) - } - - if body != nil { - r.ContentLength = -1 // this lets go use "chunked". - } - - ctx = trace.NewContext(ctx, span) - r = r.WithContext(ctx) - - return c.client.Do(r) -} -func (c *defaultClient) handleResponse(resp *http.Response, obj interface{}) error { - body, err := io.ReadAll(resp.Body) - resp.Body.Close() - - if err != nil { - return api.Error{Code: resp.StatusCode, Message: err.Error()} - } - logger.Debugf("Response body: %s", body) - - switch { - case resp.StatusCode == http.StatusAccepted: - logger.Debug("Request accepted") - case resp.StatusCode == http.StatusNoContent: - logger.Debug("Request succeeded. Response has no content") - default: - if resp.StatusCode > 399 && resp.StatusCode < 600 { - var apiErr api.Error - err = json.Unmarshal(body, &apiErr) - if err != nil { - // not json. 404s etc. - return api.Error{ - Code: resp.StatusCode, - Message: string(body), - } - } - return apiErr - } - err = json.Unmarshal(body, obj) - if err != nil { - return api.Error{ - Code: resp.StatusCode, - Message: err.Error(), - } - } - } - return nil -} - -func (c *defaultClient) handleStreamResponse(resp *http.Response, handler responseDecoder) error { - if resp.StatusCode > 399 && resp.StatusCode < 600 { - return c.handleResponse(resp, nil) - } - - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return api.Error{ - Code: resp.StatusCode, - Message: "expected streaming response with code 200/204", - } - } - - dec := json.NewDecoder(resp.Body) - for { - err := handler(dec) - if err == io.EOF { - // we need to check trailers - break - } - if err != nil { - logger.Error(err) - return err - } - } - - trailerErrs := resp.Trailer.Values("X-Stream-Error") - var err error - for _, trailerErr := range trailerErrs { - if trailerErr != "" { - err = multierr.Append(err, errors.New(trailerErr)) - } - } - - if err != nil { - return api.Error{ - Code: 500, - Message: err.Error(), - } - } - return nil -} diff --git a/packages/networking/ipfs-cluster/api/rest/client/transports.go b/packages/networking/ipfs-cluster/api/rest/client/transports.go deleted file mode 100644 index d183b8a..0000000 --- a/packages/networking/ipfs-cluster/api/rest/client/transports.go +++ /dev/null @@ -1,129 +0,0 @@ -package client - -import ( - "context" - "crypto/tls" - "errors" - "net" - "net/http" - "time" - - libp2p "github.com/libp2p/go-libp2p" - p2phttp "github.com/libp2p/go-libp2p-http" - peer "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - noise "github.com/libp2p/go-libp2p/p2p/security/noise" - libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" - tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp" - websocket "github.com/libp2p/go-libp2p/p2p/transport/websocket" - madns "github.com/multiformats/go-multiaddr-dns" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/tv42/httpunix" -) - -// This is essentially a http.DefaultTransport. We should not mess -// with it since it's a global variable, and we don't know who else uses -// it, so we create our own. -// TODO: Allow more configuration options. -func (c *defaultClient) defaultTransport() { - c.transport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - c.net = "http" -} - -func (c *defaultClient) enableLibp2p() error { - c.defaultTransport() - - pinfo, err := peer.AddrInfoFromP2pAddr(c.config.APIAddr) - if err != nil { - return err - } - - if len(pinfo.Addrs) == 0 { - return errors.New("APIAddr only includes a Peer ID") - } - - if c.config.ProtectorKey != nil && len(c.config.ProtectorKey) > 0 { - if len(c.config.ProtectorKey) != 32 { - return errors.New("length of ProtectorKey should be 32") - } - } - - transports := libp2p.DefaultTransports - if c.config.ProtectorKey != nil { - transports = libp2p.ChainOptions( - libp2p.NoTransports, - libp2p.Transport(tcp.NewTCPTransport), - libp2p.Transport(websocket.New), - ) - } - - h, err := libp2p.New( - libp2p.PrivateNetwork(c.config.ProtectorKey), - libp2p.Security(noise.ID, noise.New), - libp2p.Security(libp2ptls.ID, libp2ptls.New), - transports, - ) - if err != nil { - return err - } - - ctx, cancel := context.WithTimeout(c.ctx, ResolveTimeout) - defer cancel() - resolvedAddrs, err := madns.Resolve(ctx, pinfo.Addrs[0]) - if err != nil { - return err - } - - h.Peerstore().AddAddrs(pinfo.ID, resolvedAddrs, peerstore.PermanentAddrTTL) - c.transport.RegisterProtocol("libp2p", p2phttp.NewTransport(h)) - c.net = "libp2p" - c.p2p = h - c.hostname = pinfo.ID.String() - return nil -} - -func (c *defaultClient) enableTLS() error { - c.defaultTransport() - // based on https://github.com/denji/golang-tls - c.transport.TLSClientConfig = &tls.Config{ - MinVersion: tls.VersionTLS12, - CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - }, - InsecureSkipVerify: c.config.NoVerifyCert, - } - c.net = "https" - return nil -} - -func (c *defaultClient) enableUnix() error { - c.defaultTransport() - unixTransport := &httpunix.Transport{ - DialTimeout: time.Second, - } - _, addr, err := manet.DialArgs(c.config.APIAddr) - if err != nil { - return err - } - unixTransport.RegisterLocation("restapi", addr) - c.transport.RegisterProtocol(httpunix.Scheme, unixTransport) - c.net = httpunix.Scheme - c.hostname = "restapi" - return nil -} diff --git a/packages/networking/ipfs-cluster/api/rest/config.go b/packages/networking/ipfs-cluster/api/rest/config.go deleted file mode 100644 index d5e0393..0000000 --- a/packages/networking/ipfs-cluster/api/rest/config.go +++ /dev/null @@ -1,130 +0,0 @@ -package rest - -import ( - "net/http" - "time" - - ma "github.com/multiformats/go-multiaddr" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/common" -) - -const configKey = "restapi" -const envConfigKey = "cluster_restapi" - -const minMaxHeaderBytes = 4096 - -// Default values for Config. -const ( - DefaultReadTimeout = 0 - DefaultReadHeaderTimeout = 5 * time.Second - DefaultWriteTimeout = 0 - DefaultIdleTimeout = 120 * time.Second - DefaultMaxHeaderBytes = minMaxHeaderBytes -) - -// Default values for Config. -var ( - // DefaultHTTPListenAddrs contains default listen addresses for the HTTP API. - DefaultHTTPListenAddrs = []string{"/ip4/127.0.0.1/tcp/9094"} - DefaultHeaders = map[string][]string{} -) - -// CORS defaults. -var ( - DefaultCORSAllowedOrigins = []string{"*"} - DefaultCORSAllowedMethods = []string{ - http.MethodGet, - } - // rs/cors this will set sensible defaults when empty: - // {"Origin", "Accept", "Content-Type", "X-Requested-With"} - DefaultCORSAllowedHeaders = []string{} - DefaultCORSExposedHeaders = []string{ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length", - } - DefaultCORSAllowCredentials = true - DefaultCORSMaxAge time.Duration // 0. Means always. -) - -// Config fully implements the config.ComponentConfig interface. Use -// NewConfig() to instantiate. Config embeds a common.Config object. -type Config struct { - common.Config -} - -// NewConfig creates a Config object setting the necessary meta-fields in the -// common.Config embedded object. -func NewConfig() *Config { - cfg := Config{} - cfg.Config.ConfigKey = configKey - cfg.EnvConfigKey = envConfigKey - cfg.Logger = logger - cfg.RequestLogger = apiLogger - cfg.DefaultFunc = defaultFunc - cfg.APIErrorFunc = func(err error, status int) error { - return &api.Error{ - Code: status, - Message: err.Error(), - } - } - return &cfg -} - -// ConfigKey returns a human-friendly identifier for this type of -// Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with working values. -func (cfg *Config) Default() error { - return defaultFunc(&cfg.Config) -} - -// Sets all defaults for this config. -func defaultFunc(cfg *common.Config) error { - // http - addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs)) - for _, def := range DefaultHTTPListenAddrs { - httpListen, err := ma.NewMultiaddr(def) - if err != nil { - return err - } - addrs = append(addrs, httpListen) - } - cfg.HTTPListenAddr = addrs - cfg.PathSSLCertFile = "" - cfg.PathSSLKeyFile = "" - cfg.ReadTimeout = DefaultReadTimeout - cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout - cfg.WriteTimeout = DefaultWriteTimeout - cfg.IdleTimeout = DefaultIdleTimeout - cfg.MaxHeaderBytes = DefaultMaxHeaderBytes - - // libp2p - cfg.ID = "" - cfg.PrivateKey = nil - cfg.Libp2pListenAddr = nil - - // Auth - cfg.BasicAuthCredentials = nil - - // Logs - cfg.HTTPLogFile = "" - - // Headers - cfg.Headers = DefaultHeaders - - cfg.CORSAllowedOrigins = DefaultCORSAllowedOrigins - cfg.CORSAllowedMethods = DefaultCORSAllowedMethods - cfg.CORSAllowedHeaders = DefaultCORSAllowedHeaders - cfg.CORSExposedHeaders = DefaultCORSExposedHeaders - cfg.CORSAllowCredentials = DefaultCORSAllowCredentials - cfg.CORSMaxAge = DefaultCORSMaxAge - - return nil -} diff --git a/packages/networking/ipfs-cluster/api/rest/restapi.go b/packages/networking/ipfs-cluster/api/rest/restapi.go deleted file mode 100644 index 14afc2e..0000000 --- a/packages/networking/ipfs-cluster/api/rest/restapi.go +++ /dev/null @@ -1,856 +0,0 @@ -// Package rest implements an IPFS Cluster API component. It provides -// a REST-ish API to interact with Cluster. -// -// The implented API is based on the common.API component (refer to module -// description there). The only thing this module does is to provide route -// handling for the otherwise common API component. -package rest - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math/rand" - "net/http" - "strings" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/adder/adderutils" - types "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/common" - - logging "github.com/ipfs/go-log/v2" - rpc "github.com/libp2p/go-libp2p-gorpc" - "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - - mux "github.com/gorilla/mux" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -var ( - logger = logging.Logger("restapi") - apiLogger = logging.Logger("restapilog") -) - -type peerAddBody struct { - PeerID string `json:"peer_id"` -} - -// API implements the REST API Component. -// It embeds a common.API. -type API struct { - *common.API - - rpcClient *rpc.Client - config *Config -} - -// NewAPI creates a new REST API component. -func NewAPI(ctx context.Context, cfg *Config) (*API, error) { - return NewAPIWithHost(ctx, cfg, nil) -} - -// NewAPIWithHost creates a new REST API component using the given libp2p Host. -func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host) (*API, error) { - api := API{ - config: cfg, - } - capi, err := common.NewAPIWithHost(ctx, &cfg.Config, h, api.routes) - api.API = capi - return &api, err -} - -// Routes returns endpoints supported by this API. -func (api *API) routes(c *rpc.Client) []common.Route { - api.rpcClient = c - return []common.Route{ - { - Name: "ID", - Method: "GET", - Pattern: "/id", - HandlerFunc: api.idHandler, - }, - - { - Name: "Version", - Method: "GET", - Pattern: "/version", - HandlerFunc: api.versionHandler, - }, - - { - Name: "Peers", - Method: "GET", - Pattern: "/peers", - HandlerFunc: api.peerListHandler, - }, - { - Name: "PeerAdd", - Method: "POST", - Pattern: "/peers", - HandlerFunc: api.peerAddHandler, - }, - { - Name: "PeerRemove", - Method: "DELETE", - Pattern: "/peers/{peer}", - HandlerFunc: api.peerRemoveHandler, - }, - { - Name: "Add", - Method: "POST", - Pattern: "/add", - HandlerFunc: api.addHandler, - }, - { - Name: "Allocations", - Method: "GET", - Pattern: "/allocations", - HandlerFunc: api.allocationsHandler, - }, - { - Name: "Allocation", - Method: "GET", - Pattern: "/allocations/{hash}", - HandlerFunc: api.allocationHandler, - }, - { - Name: "StatusAll", - Method: "GET", - Pattern: "/pins", - HandlerFunc: api.statusAllHandler, - }, - { - Name: "Recover", - Method: "POST", - Pattern: "/pins/{hash}/recover", - HandlerFunc: api.recoverHandler, - }, - { - Name: "RecoverAll", - Method: "POST", - Pattern: "/pins/recover", - HandlerFunc: api.recoverAllHandler, - }, - { - Name: "Status", - Method: "GET", - Pattern: "/pins/{hash}", - HandlerFunc: api.statusHandler, - }, - { - Name: "Pin", - Method: "POST", - Pattern: "/pins/{hash}", - HandlerFunc: api.pinHandler, - }, - { - Name: "PinPath", - Method: "POST", - Pattern: "/pins/{keyType:ipfs|ipns|ipld}/{path:.*}", - HandlerFunc: api.pinPathHandler, - }, - { - Name: "Unpin", - Method: "DELETE", - Pattern: "/pins/{hash}", - HandlerFunc: api.unpinHandler, - }, - { - Name: "UnpinPath", - Method: "DELETE", - Pattern: "/pins/{keyType:ipfs|ipns|ipld}/{path:.*}", - HandlerFunc: api.unpinPathHandler, - }, - { - Name: "RepoGC", - Method: "POST", - Pattern: "/ipfs/gc", - HandlerFunc: api.repoGCHandler, - }, - { - Name: "ConnectionGraph", - Method: "GET", - Pattern: "/health/graph", - HandlerFunc: api.graphHandler, - }, - { - Name: "Alerts", - Method: "GET", - Pattern: "/health/alerts", - HandlerFunc: api.alertsHandler, - }, - { - Name: "Metrics", - Method: "GET", - Pattern: "/monitor/metrics/{name}", - HandlerFunc: api.metricsHandler, - }, - { - Name: "MetricNames", - Method: "GET", - Pattern: "/monitor/metrics", - HandlerFunc: api.metricNamesHandler, - }, - { - Name: "GetToken", - Method: "POST", - Pattern: "/token", - HandlerFunc: api.GenerateTokenHandler, - }, - } -} - -func (api *API) idHandler(w http.ResponseWriter, r *http.Request) { - var id types.ID - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "ID", - struct{}{}, - &id, - ) - - api.SendResponse(w, common.SetStatusAutomatically, err, &id) -} - -func (api *API) versionHandler(w http.ResponseWriter, r *http.Request) { - var v types.Version - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Version", - struct{}{}, - &v, - ) - - api.SendResponse(w, common.SetStatusAutomatically, err, v) -} - -func (api *API) graphHandler(w http.ResponseWriter, r *http.Request) { - var graph types.ConnectGraph - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "ConnectGraph", - struct{}{}, - &graph, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, graph) -} - -func (api *API) metricsHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - name := vars["name"] - - var metrics []types.Metric - err := api.rpcClient.CallContext( - r.Context(), - "", - "PeerMonitor", - "LatestMetrics", - name, - &metrics, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, metrics) -} - -func (api *API) metricNamesHandler(w http.ResponseWriter, r *http.Request) { - var metricNames []string - err := api.rpcClient.CallContext( - r.Context(), - "", - "PeerMonitor", - "MetricNames", - struct{}{}, - &metricNames, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, metricNames) -} - -func (api *API) alertsHandler(w http.ResponseWriter, r *http.Request) { - var alerts []types.Alert - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Alerts", - struct{}{}, - &alerts, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, alerts) -} - -func (api *API) addHandler(w http.ResponseWriter, r *http.Request) { - reader, err := r.MultipartReader() - if err != nil { - api.SendResponse(w, http.StatusBadRequest, err, nil) - return - } - - params, err := types.AddParamsFromQuery(r.URL.Query()) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, err, nil) - return - } - - api.SetHeaders(w) - - // any errors sent as trailer - adderutils.AddMultipartHTTPHandler( - r.Context(), - api.rpcClient, - params, - reader, - w, - nil, - ) -} - -func (api *API) peerListHandler(w http.ResponseWriter, r *http.Request) { - in := make(chan struct{}) - close(in) - out := make(chan types.ID, common.StreamChannelSize) - errCh := make(chan error, 1) - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "Peers", - in, - out, - ) - }() - - iter := func() (interface{}, bool, error) { - p, ok := <-out - return p, ok, nil - } - api.StreamResponse(w, iter, errCh) -} - -func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) { - dec := json.NewDecoder(r.Body) - defer r.Body.Close() - - var addInfo peerAddBody - err := dec.Decode(&addInfo) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding request body"), nil) - return - } - - pid, err := peer.Decode(addInfo.PeerID) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding peer_id"), nil) - return - } - - var id types.ID - err = api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "PeerAdd", - pid, - &id, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, &id) -} - -func (api *API) peerRemoveHandler(w http.ResponseWriter, r *http.Request) { - if p := api.ParsePidOrFail(w, r); p != "" { - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "PeerRemove", - p, - &struct{}{}, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, nil) - } -} - -func (api *API) pinHandler(w http.ResponseWriter, r *http.Request) { - if pin := api.ParseCidOrFail(w, r); pin.Defined() { - api.config.Logger.Debugf("rest api pinHandler: %s", pin.Cid) - // span.AddAttributes(trace.StringAttribute("cid", pin.Cid)) - var pinObj types.Pin - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Pin", - pin, - &pinObj, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinObj) - api.config.Logger.Debug("rest api pinHandler done") - } -} - -func (api *API) unpinHandler(w http.ResponseWriter, r *http.Request) { - if pin := api.ParseCidOrFail(w, r); pin.Defined() { - api.config.Logger.Debugf("rest api unpinHandler: %s", pin.Cid) - // span.AddAttributes(trace.StringAttribute("cid", pin.Cid)) - var pinObj types.Pin - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Unpin", - pin, - &pinObj, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinObj) - api.config.Logger.Debug("rest api unpinHandler done") - } -} - -func (api *API) pinPathHandler(w http.ResponseWriter, r *http.Request) { - var pin types.Pin - if pinpath := api.ParsePinPathOrFail(w, r); pinpath.Defined() { - api.config.Logger.Debugf("rest api pinPathHandler: %s", pinpath.Path) - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "PinPath", - pinpath, - &pin, - ) - - api.SendResponse(w, common.SetStatusAutomatically, err, pin) - api.config.Logger.Debug("rest api pinPathHandler done") - } -} - -func (api *API) unpinPathHandler(w http.ResponseWriter, r *http.Request) { - var pin types.Pin - if pinpath := api.ParsePinPathOrFail(w, r); pinpath.Defined() { - api.config.Logger.Debugf("rest api unpinPathHandler: %s", pinpath.Path) - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "UnpinPath", - pinpath, - &pin, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pin) - api.config.Logger.Debug("rest api unpinPathHandler done") - } -} - -func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) { - queryValues := r.URL.Query() - filterStr := queryValues.Get("filter") - var filter types.PinType - for _, f := range strings.Split(filterStr, ",") { - filter |= types.PinTypeFromString(f) - } - - if filter == types.BadType { - api.SendResponse(w, http.StatusBadRequest, errors.New("invalid filter value"), nil) - return - } - - in := make(chan struct{}) - close(in) - - out := make(chan types.Pin, common.StreamChannelSize) - errCh := make(chan error, 1) - - ctx, cancel := context.WithCancel(r.Context()) - defer cancel() - - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "Pins", - in, - out, - ) - }() - - iter := func() (interface{}, bool, error) { - var p types.Pin - var ok bool - iterloop: - for { - - select { - case <-ctx.Done(): - break iterloop - case p, ok = <-out: - if !ok { - break iterloop - } - // this means we keep iterating if no filter - // matched - if filter == types.AllType || filter&p.Type > 0 { - break iterloop - } - } - } - return p, ok, ctx.Err() - } - - api.StreamResponse(w, iter, errCh) -} - -func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) { - if pin := api.ParseCidOrFail(w, r); pin.Defined() { - var pinResp types.Pin - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "PinGet", - pin.Cid, - &pinResp, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinResp) - } -} - -func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithCancel(r.Context()) - defer cancel() - - queryValues := r.URL.Query() - if queryValues.Get("cids") != "" { - api.statusCidsHandler(w, r) - return - } - - local := queryValues.Get("local") - - filterStr := queryValues.Get("filter") - filter := types.TrackerStatusFromString(filterStr) - // FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a - // valid "pinned" filter. - if filter == types.TrackerStatusUndefined && filterStr != "" { - api.SendResponse(w, http.StatusBadRequest, errors.New("invalid filter value"), nil) - return - } - - var iter common.StreamIterator - in := make(chan types.TrackerStatus, 1) - in <- filter - close(in) - errCh := make(chan error, 1) - - if local == "true" { - out := make(chan types.PinInfo, common.StreamChannelSize) - iter = func() (interface{}, bool, error) { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() - case p, ok := <-out: - return p.ToGlobal(), ok, nil - } - } - - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "StatusAllLocal", - in, - out, - ) - }() - - } else { - out := make(chan types.GlobalPinInfo, common.StreamChannelSize) - iter = func() (interface{}, bool, error) { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() - case p, ok := <-out: - return p, ok, nil - } - } - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "StatusAll", - in, - out, - ) - }() - } - - api.StreamResponse(w, iter, errCh) -} - -// request statuses for multiple CIDs in parallel. -func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithCancel(r.Context()) - defer cancel() - - queryValues := r.URL.Query() - filterCidsStr := strings.Split(queryValues.Get("cids"), ",") - var cids []types.Cid - - for _, cidStr := range filterCidsStr { - c, err := types.DecodeCid(cidStr) - if err != nil { - api.SendResponse(w, http.StatusBadRequest, fmt.Errorf("error decoding Cid: %w", err), nil) - return - } - cids = append(cids, c) - } - - local := queryValues.Get("local") - - gpiCh := make(chan types.GlobalPinInfo, len(cids)) - errCh := make(chan error, len(cids)) - var wg sync.WaitGroup - wg.Add(len(cids)) - - // Close channel when done - go func() { - wg.Wait() - close(errCh) - close(gpiCh) - }() - - if local == "true" { - for _, ci := range cids { - go func(c types.Cid) { - defer wg.Done() - var pinInfo types.PinInfo - err := api.rpcClient.CallContext( - ctx, - "", - "Cluster", - "StatusLocal", - c, - &pinInfo, - ) - if err != nil { - errCh <- err - return - } - gpiCh <- pinInfo.ToGlobal() - }(ci) - } - } else { - for _, ci := range cids { - go func(c types.Cid) { - defer wg.Done() - var pinInfo types.GlobalPinInfo - err := api.rpcClient.CallContext( - ctx, - "", - "Cluster", - "Status", - c, - &pinInfo, - ) - if err != nil { - errCh <- err - return - } - gpiCh <- pinInfo - }(ci) - } - } - - iter := func() (interface{}, bool, error) { - gpi, ok := <-gpiCh - return gpi, ok, nil - } - - api.StreamResponse(w, iter, errCh) -} - -func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) { - queryValues := r.URL.Query() - local := queryValues.Get("local") - - if pin := api.ParseCidOrFail(w, r); pin.Defined() { - if local == "true" { - var pinInfo types.PinInfo - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "StatusLocal", - pin.Cid, - &pinInfo, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo.ToGlobal()) - } else { - var pinInfo types.GlobalPinInfo - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Status", - pin.Cid, - &pinInfo, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo) - } - } -} - -func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) { - ctx, cancel := context.WithCancel(r.Context()) - defer cancel() - - queryValues := r.URL.Query() - local := queryValues.Get("local") - - var iter common.StreamIterator - in := make(chan struct{}) - close(in) - errCh := make(chan error, 1) - - if local == "true" { - out := make(chan types.PinInfo, common.StreamChannelSize) - iter = func() (interface{}, bool, error) { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() - case p, ok := <-out: - return p.ToGlobal(), ok, nil - } - } - - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "RecoverAllLocal", - in, - out, - ) - }() - - } else { - out := make(chan types.GlobalPinInfo, common.StreamChannelSize) - iter = func() (interface{}, bool, error) { - select { - case <-ctx.Done(): - return nil, false, ctx.Err() - case p, ok := <-out: - return p, ok, nil - } - } - go func() { - defer close(errCh) - - errCh <- api.rpcClient.Stream( - r.Context(), - "", - "Cluster", - "RecoverAll", - in, - out, - ) - }() - } - - api.StreamResponse(w, iter, errCh) -} - -func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) { - queryValues := r.URL.Query() - local := queryValues.Get("local") - - if pin := api.ParseCidOrFail(w, r); pin.Defined() { - if local == "true" { - var pinInfo types.PinInfo - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "RecoverLocal", - pin.Cid, - &pinInfo, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo.ToGlobal()) - } else { - var pinInfo types.GlobalPinInfo - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "Recover", - pin.Cid, - &pinInfo, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo) - } - } -} - -func (api *API) repoGCHandler(w http.ResponseWriter, r *http.Request) { - queryValues := r.URL.Query() - local := queryValues.Get("local") - - if local == "true" { - var localRepoGC types.RepoGC - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "RepoGCLocal", - struct{}{}, - &localRepoGC, - ) - - api.SendResponse(w, common.SetStatusAutomatically, err, repoGCToGlobal(localRepoGC)) - return - } - - var repoGC types.GlobalRepoGC - err := api.rpcClient.CallContext( - r.Context(), - "", - "Cluster", - "RepoGC", - struct{}{}, - &repoGC, - ) - api.SendResponse(w, common.SetStatusAutomatically, err, repoGC) -} - -func repoGCToGlobal(r types.RepoGC) types.GlobalRepoGC { - return types.GlobalRepoGC{ - PeerMap: map[string]types.RepoGC{ - r.Peer.String(): r, - }, - } -} diff --git a/packages/networking/ipfs-cluster/api/rest/restapi_test.go b/packages/networking/ipfs-cluster/api/rest/restapi_test.go deleted file mode 100644 index d338d04..0000000 --- a/packages/networking/ipfs-cluster/api/rest/restapi_test.go +++ /dev/null @@ -1,846 +0,0 @@ -package rest - -import ( - "context" - "fmt" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - test "github.com/ipfs-cluster/ipfs-cluster/api/common/test" - clustertest "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -const ( - SSLCertFile = "test/server.crt" - SSLKeyFile = "test/server.key" - clientOrigin = "myorigin" - validUserName = "validUserName" - validUserPassword = "validUserPassword" - adminUserName = "adminUserName" - adminUserPassword = "adminUserPassword" - invalidUserName = "invalidUserName" - invalidUserPassword = "invalidUserPassword" -) - -func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API { - ctx := context.Background() - apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - h, err := libp2p.New(libp2p.ListenAddrs(apiMAddr)) - if err != nil { - t.Fatal(err) - } - - cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr} - - rest, err := NewAPIWithHost(ctx, cfg, h) - if err != nil { - t.Fatalf("should be able to create a new %s API: %s", name, err) - } - - // No keep alive for tests - rest.SetKeepAlivesEnabled(false) - rest.SetClient(clustertest.NewMockRPCClient(t)) - - return rest -} - -func testAPI(t *testing.T) *API { - cfg := NewConfig() - cfg.Default() - cfg.CORSAllowedOrigins = []string{clientOrigin} - cfg.CORSAllowedMethods = []string{"GET", "POST", "DELETE"} - //cfg.CORSAllowedHeaders = []string{"Content-Type"} - cfg.CORSMaxAge = 10 * time.Minute - - return testAPIwithConfig(t, cfg, "basic") -} - -func TestRestAPIIDEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - id := api.ID{} - test.MakeGet(t, rest, url(rest)+"/id", &id) - if id.ID.Pretty() != clustertest.PeerID1.Pretty() { - t.Error("expected correct id") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIVersionEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - ver := api.Version{} - test.MakeGet(t, rest, url(rest)+"/version", &ver) - if ver.Version != "0.0.mock" { - t.Error("expected correct version") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIPeersEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var list []api.ID - test.MakeStreamingGet(t, rest, url(rest)+"/peers", &list, false) - if len(list) != 1 { - t.Fatal("expected 1 element") - } - if list[0].ID.Pretty() != clustertest.PeerID1.Pretty() { - t.Error("expected a different peer id list: ", list) - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIPeerAddEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - id := api.ID{} - // post with valid body - body := fmt.Sprintf("{\"peer_id\":\"%s\"}", clustertest.PeerID1.Pretty()) - t.Log(body) - test.MakePost(t, rest, url(rest)+"/peers", []byte(body), &id) - if id.ID.Pretty() != clustertest.PeerID1.Pretty() { - t.Error("expected correct ID") - } - if id.Error != "" { - t.Error("did not expect an error") - } - - // Send invalid body - errResp := api.Error{} - test.MakePost(t, rest, url(rest)+"/peers", []byte("oeoeoeoe"), &errResp) - if errResp.Code != 400 { - t.Error("expected error with bad body") - } - // Send invalid peer id - test.MakePost(t, rest, url(rest)+"/peers", []byte("{\"peer_id\": \"ab\"}"), &errResp) - if errResp.Code != 400 { - t.Error("expected error with bad peer_id") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAddFileEndpointBadContentType(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1" - localURL := url(rest) + fmtStr1 - - errResp := api.Error{} - test.MakePost(t, rest, localURL, []byte("test"), &errResp) - - if errResp.Code != 400 { - t.Error("expected error with bad content-type") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAddFileEndpointLocal(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - sth := clustertest.NewShardingTestHelper() - defer sth.Clean(t) - - // This generates the testing files and - // writes them to disk. - // This is necessary here because we run tests - // in parallel, and otherwise a write-race might happen. - _, closer := sth.GetTreeMultiReader(t) - closer.Close() - - tf := func(t *testing.T, url test.URLFunc) { - fmtStr1 := "/add?shard=false&repl_min=-1&repl_max=-1&stream-channels=true" - localURL := url(rest) + fmtStr1 - body, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - resp := api.AddedOutput{} - mpContentType := "multipart/form-data; boundary=" + body.Boundary() - test.MakeStreamingPost(t, rest, localURL, body, mpContentType, &resp) - - // resp will contain the last object from the streaming - if resp.Cid.String() != clustertest.ShardingDirBalancedRootCID { - t.Error("Bad Cid after adding: ", resp.Cid) - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAddFileEndpointShard(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - sth := clustertest.NewShardingTestHelper() - defer sth.Clean(t) - - // This generates the testing files and - // writes them to disk. - // This is necessary here because we run tests - // in parallel, and otherwise a write-race might happen. - _, closer := sth.GetTreeMultiReader(t) - closer.Close() - - tf := func(t *testing.T, url test.URLFunc) { - body, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - mpContentType := "multipart/form-data; boundary=" + body.Boundary() - resp := api.AddedOutput{} - fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1&stream-channels=true&shard-size=1000000" - shardURL := url(rest) + fmtStr1 - test.MakeStreamingPost(t, rest, shardURL, body, mpContentType, &resp) - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAddFileEndpoint_StreamChannelsFalse(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - sth := clustertest.NewShardingTestHelper() - defer sth.Clean(t) - - // This generates the testing files and - // writes them to disk. - // This is necessary here because we run tests - // in parallel, and otherwise a write-race might happen. - _, closer := sth.GetTreeMultiReader(t) - closer.Close() - - tf := func(t *testing.T, url test.URLFunc) { - body, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - fullBody, err := io.ReadAll(body) - if err != nil { - t.Fatal(err) - } - mpContentType := "multipart/form-data; boundary=" + body.Boundary() - resp := []api.AddedOutput{} - fmtStr1 := "/add?shard=false&repl_min=-1&repl_max=-1&stream-channels=false" - shardURL := url(rest) + fmtStr1 - - test.MakePostWithContentType(t, rest, shardURL, fullBody, mpContentType, &resp) - lastHash := resp[len(resp)-1] - if lastHash.Cid.String() != clustertest.ShardingDirBalancedRootCID { - t.Error("Bad Cid after adding: ", lastHash.Cid) - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIPeerRemoveEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - test.MakeDelete(t, rest, url(rest)+"/peers/"+clustertest.PeerID1.Pretty(), &struct{}{}) - } - - test.BothEndpoints(t, tf) -} - -func TestConnectGraphEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var cg api.ConnectGraph - test.MakeGet(t, rest, url(rest)+"/health/graph", &cg) - if cg.ClusterID.Pretty() != clustertest.PeerID1.Pretty() { - t.Error("unexpected cluster id") - } - if len(cg.IPFSLinks) != 3 { - t.Error("unexpected number of ipfs peers") - } - if len(cg.ClusterLinks) != 3 { - t.Error("unexpected number of cluster peers") - } - if len(cg.ClustertoIPFS) != 3 { - t.Error("unexpected number of cluster to ipfs links") - } - // test a few link values - pid1 := clustertest.PeerID1 - pid4 := clustertest.PeerID4 - if _, ok := cg.ClustertoIPFS[pid1.String()]; !ok { - t.Fatal("missing cluster peer 1 from cluster to peer links map") - } - if cg.ClustertoIPFS[pid1.String()] != pid4 { - t.Error("unexpected ipfs peer mapped to cluster peer 1 in graph") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIPinEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - // test regular post - test.MakePost(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String(), []byte{}, &struct{}{}) - - errResp := api.Error{} - test.MakePost(t, rest, url(rest)+"/pins/"+clustertest.ErrorCid.String(), []byte{}, &errResp) - if errResp.Message != clustertest.ErrBadCid.Error() { - t.Error("expected different error: ", errResp.Message) - } - - test.MakePost(t, rest, url(rest)+"/pins/abcd", []byte{}, &errResp) - if errResp.Code != 400 { - t.Error("should fail with bad Cid") - } - } - - test.BothEndpoints(t, tf) -} - -type pathCase struct { - path string - opts api.PinOptions - wantErr bool - code int - expectedCid string -} - -func (p *pathCase) WithQuery(t *testing.T) string { - query, err := p.opts.ToQuery() - if err != nil { - t.Fatal(err) - } - return p.path + "?" + query -} - -var testPinOpts = api.PinOptions{ - ReplicationFactorMax: 7, - ReplicationFactorMin: 6, - Name: "hello there", - UserAllocations: []peer.ID{clustertest.PeerID1, clustertest.PeerID2}, - ExpireAt: time.Now().Add(30 * time.Second), -} - -var pathTestCases = []pathCase{ - { - "/ipfs/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY", - testPinOpts, - false, - http.StatusOK, - "QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY", - }, - { - "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif", - testPinOpts, - false, - http.StatusOK, - clustertest.CidResolved.String(), - }, - { - "/ipfs/invalidhash", - testPinOpts, - true, - http.StatusBadRequest, - "", - }, - { - "/ipfs/bafyreiay3jpjk74dkckv2r74eyvf3lfnxujefay2rtuluintasq2zlapv4", - testPinOpts, - true, - http.StatusNotFound, - "", - }, - // TODO: A case with trailing slash with paths - // clustertest.PathIPNS2, clustertest.PathIPLD2, clustertest.InvalidPath1 -} - -func TestAPIPinEndpointWithPath(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - for _, testCase := range pathTestCases[:3] { - c, _ := api.DecodeCid(testCase.expectedCid) - resultantPin := api.PinWithOpts( - c, - testPinOpts, - ) - - if testCase.wantErr { - errResp := api.Error{} - q := testCase.WithQuery(t) - test.MakePost(t, rest, url(rest)+"/pins"+q, []byte{}, &errResp) - if errResp.Code != testCase.code { - t.Errorf( - "status code: expected: %d, got: %d, path: %s\n", - testCase.code, - errResp.Code, - testCase.path, - ) - } - continue - } - pin := api.Pin{} - q := testCase.WithQuery(t) - test.MakePost(t, rest, url(rest)+"/pins"+q, []byte{}, &pin) - if !pin.Equals(resultantPin) { - t.Errorf("pin: expected: %+v", resultantPin) - t.Errorf("pin: got: %+v", pin) - t.Errorf("path: %s", testCase.path) - } - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIUnpinEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - // test regular delete - test.MakeDelete(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String(), &struct{}{}) - - errResp := api.Error{} - test.MakeDelete(t, rest, url(rest)+"/pins/"+clustertest.ErrorCid.String(), &errResp) - if errResp.Message != clustertest.ErrBadCid.Error() { - t.Error("expected different error: ", errResp.Message) - } - - test.MakeDelete(t, rest, url(rest)+"/pins/"+clustertest.NotFoundCid.String(), &errResp) - if errResp.Code != http.StatusNotFound { - t.Error("expected different error code: ", errResp.Code) - } - - test.MakeDelete(t, rest, url(rest)+"/pins/abcd", &errResp) - if errResp.Code != 400 { - t.Error("expected different error code: ", errResp.Code) - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIUnpinEndpointWithPath(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - for _, testCase := range pathTestCases { - if testCase.wantErr { - errResp := api.Error{} - test.MakeDelete(t, rest, url(rest)+"/pins"+testCase.path, &errResp) - if errResp.Code != testCase.code { - t.Errorf( - "status code: expected: %d, got: %d, path: %s\n", - testCase.code, - errResp.Code, - testCase.path, - ) - } - continue - } - pin := api.Pin{} - test.MakeDelete(t, rest, url(rest)+"/pins"+testCase.path, &pin) - if pin.Cid.String() != testCase.expectedCid { - t.Errorf( - "cid: expected: %s, got: %s, path: %s\n", - clustertest.CidResolved, - pin.Cid, - testCase.path, - ) - } - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAllocationsEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []api.Pin - test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp, false) - if len(resp) != 3 || - !resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) || - !resp[2].Cid.Equals(clustertest.Cid3) { - t.Error("unexpected pin list: ", resp) - } - - test.MakeStreamingGet(t, rest, url(rest)+"/allocations", &resp, false) - if len(resp) != 3 || - !resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) || - !resp[2].Cid.Equals(clustertest.Cid3) { - t.Error("unexpected pin list: ", resp) - } - - errResp := api.Error{} - test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=invalid", &errResp, false) - if errResp.Code != http.StatusBadRequest { - t.Error("an invalid filter value should 400") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAllocationEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp api.Pin - test.MakeGet(t, rest, url(rest)+"/allocations/"+clustertest.Cid1.String(), &resp) - if !resp.Cid.Equals(clustertest.Cid1) { - t.Errorf("cid should be the same: %s %s", resp.Cid, clustertest.Cid1) - } - - errResp := api.Error{} - test.MakeGet(t, rest, url(rest)+"/allocations/"+clustertest.Cid4.String(), &errResp) - if errResp.Code != 404 { - t.Error("a non-pinned cid should 404") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIMetricsEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []api.Metric - test.MakeGet(t, rest, url(rest)+"/monitor/metrics/somemetricstype", &resp) - if len(resp) == 0 { - t.Fatal("No metrics found") - } - for _, m := range resp { - if m.Name != "test" { - t.Error("Unexpected metric name: ", m.Name) - } - if m.Peer.Pretty() != clustertest.PeerID1.Pretty() { - t.Error("Unexpected peer id: ", m.Peer) - } - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIMetricNamesEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []string - test.MakeGet(t, rest, url(rest)+"/monitor/metrics", &resp) - if len(resp) == 0 { - t.Fatal("No metric names found") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIAlertsEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []api.Alert - test.MakeGet(t, rest, url(rest)+"/health/alerts", &resp) - if len(resp) != 1 { - t.Error("expected one alert") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIStatusAllEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []api.GlobalPinInfo - - test.MakeStreamingGet(t, rest, url(rest)+"/pins", &resp, false) - - // mockPinTracker returns 3 items for Cluster.StatusAll - if len(resp) != 3 || - !resp[0].Cid.Equals(clustertest.Cid1) || - resp[1].PeerMap[clustertest.PeerID1.String()].Status.String() != "pinning" { - t.Errorf("unexpected statusAll resp") - } - - // Test local=true - var resp2 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins?local=true", &resp2, false) - // mockPinTracker calls pintracker.StatusAll which returns 2 - // items. - if len(resp2) != 2 { - t.Errorf("unexpected statusAll+local resp:\n %+v", resp2) - } - - // Test with filter - var resp3 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=queued", &resp3, false) - if len(resp3) != 0 { - t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3) - } - - var resp4 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4, false) - if len(resp4) != 1 { - t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4) - } - - var resp5 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5, false) - if len(resp5) != 1 { - t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5) - } - - var resp6 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error", &resp6, false) - if len(resp6) != 1 { - t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6) - } - - var resp7 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7, false) - if len(resp7) != 2 { - t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7) - } - - var errorResp api.Error - test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=invalid", &errorResp, false) - if errorResp.Code != http.StatusBadRequest { - t.Error("an invalid filter value should 400") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIStatusAllWithCidsEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []api.GlobalPinInfo - cids := []string{ - clustertest.Cid1.String(), - clustertest.Cid2.String(), - clustertest.Cid3.String(), - clustertest.Cid4.String(), - } - test.MakeStreamingGet(t, rest, url(rest)+"/pins/?cids="+strings.Join(cids, ","), &resp, false) - - if len(resp) != 4 { - t.Error("wrong number of responses") - } - - // Test local=true - var resp2 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp2, false) - if len(resp2) != 4 { - t.Error("wrong number of responses") - } - - // Test with an error. This should produce a trailer error. - cids = append(cids, clustertest.ErrorCid.String()) - var resp3 []api.GlobalPinInfo - test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp3, true) - if len(resp3) != 4 { - t.Error("wrong number of responses") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIStatusEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp api.GlobalPinInfo - test.MakeGet(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String(), &resp) - - if !resp.Cid.Equals(clustertest.Cid1) { - t.Error("expected the same cid") - } - info, ok := resp.PeerMap[clustertest.PeerID1.String()] - if !ok { - t.Fatal("expected info for clustertest.PeerID1") - } - if info.Status.String() != "pinned" { - t.Error("expected different status") - } - - // Test local=true - var resp2 api.GlobalPinInfo - test.MakeGet(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String()+"?local=true", &resp2) - - if !resp2.Cid.Equals(clustertest.Cid1) { - t.Error("expected the same cid") - } - info, ok = resp2.PeerMap[clustertest.PeerID2.String()] - if !ok { - t.Fatal("expected info for clustertest.PeerID2") - } - if info.Status.String() != "pinned" { - t.Error("expected different status") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIRecoverEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp api.GlobalPinInfo - test.MakePost(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String()+"/recover", []byte{}, &resp) - - if !resp.Cid.Equals(clustertest.Cid1) { - t.Error("expected the same cid") - } - info, ok := resp.PeerMap[clustertest.PeerID1.String()] - if !ok { - t.Fatal("expected info for clustertest.PeerID1") - } - if info.Status.String() != "pinned" { - t.Error("expected different status") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIRecoverAllEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - tf := func(t *testing.T, url test.URLFunc) { - var resp []api.GlobalPinInfo - test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover?local=true", nil, "", &resp) - if len(resp) != 0 { - t.Fatal("bad response length") - } - - var resp1 []api.GlobalPinInfo - test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover", nil, "", &resp1) - if len(resp1) == 0 { - t.Fatal("bad response length") - } - } - - test.BothEndpoints(t, tf) -} - -func TestAPIIPFSGCEndpoint(t *testing.T) { - ctx := context.Background() - rest := testAPI(t) - defer rest.Shutdown(ctx) - - testGlobalRepoGC := func(t *testing.T, gRepoGC api.GlobalRepoGC) { - if gRepoGC.PeerMap == nil { - t.Fatal("expected a non-nil peer map") - } - - if len(gRepoGC.PeerMap) != 1 { - t.Error("expected repo gc information for one peer") - } - - for _, repoGC := range gRepoGC.PeerMap { - if repoGC.Peer == "" { - t.Error("expected a cluster ID") - } - if repoGC.Error != "" { - t.Error("did not expect any error") - } - if repoGC.Keys == nil { - t.Fatal("expected a non-nil array of IPFSRepoGC") - } - if len(repoGC.Keys) == 0 { - t.Fatal("expected at least one key, but found none") - } - if !repoGC.Keys[0].Key.Equals(clustertest.Cid1) { - t.Errorf("expected a different cid, expected: %s, found: %s", clustertest.Cid1, repoGC.Keys[0].Key) - } - - } - } - - tf := func(t *testing.T, url test.URLFunc) { - var resp api.GlobalRepoGC - test.MakePost(t, rest, url(rest)+"/ipfs/gc?local=true", []byte{}, &resp) - testGlobalRepoGC(t, resp) - - var resp1 api.GlobalRepoGC - test.MakePost(t, rest, url(rest)+"/ipfs/gc", []byte{}, &resp1) - testGlobalRepoGC(t, resp1) - } - - test.BothEndpoints(t, tf) -} diff --git a/packages/networking/ipfs-cluster/api/types.go b/packages/networking/ipfs-cluster/api/types.go deleted file mode 100644 index 5b2951d..0000000 --- a/packages/networking/ipfs-cluster/api/types.go +++ /dev/null @@ -1,1449 +0,0 @@ -// Package api holds declarations for types used in ipfs-cluster APIs to make -// them re-usable across differen tools. This include RPC API "Serial[izable]" -// versions for types. The Go API uses natives types, while RPC API, -// REST APIs etc use serializable types (i.e. json format). Conversion methods -// exists between types. -// -// Note that all conversion methods ignore any parsing errors. All values must -// be validated first before initializing any of the types defined here. -package api - -import ( - "encoding/json" - "fmt" - "net/url" - "sort" - "strconv" - "strings" - "time" - - pb "github.com/ipfs-cluster/ipfs-cluster/api/pb" - - cid "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - protocol "github.com/libp2p/go-libp2p/core/protocol" - multiaddr "github.com/multiformats/go-multiaddr" - - // needed to parse /ws multiaddresses - _ "github.com/libp2p/go-libp2p/p2p/transport/websocket" - // needed to parse /dns* multiaddresses - _ "github.com/multiformats/go-multiaddr-dns" - - "github.com/pkg/errors" - proto "google.golang.org/protobuf/proto" -) - -var logger = logging.Logger("apitypes") - -var unixZero = time.Unix(0, 0) - -func init() { - // intialize trackerStatusString - stringTrackerStatus = make(map[string]TrackerStatus) - for k, v := range trackerStatusString { - stringTrackerStatus[v] = k - } -} - -// TrackerStatus values -const ( - // IPFSStatus should never take this value. - // When used as a filter. It means "all". - TrackerStatusUndefined TrackerStatus = 0 - // The cluster node is offline or not responding - TrackerStatusClusterError TrackerStatus = 1 << iota - // An error occurred pinning - TrackerStatusPinError - // An error occurred unpinning - TrackerStatusUnpinError - // The IPFS daemon has pinned the item - TrackerStatusPinned - // The IPFS daemon is currently pinning the item - TrackerStatusPinning - // The IPFS daemon is currently unpinning the item - TrackerStatusUnpinning - // The IPFS daemon is not pinning the item - TrackerStatusUnpinned - // The IPFS daemon is not pinning the item but it is being tracked - TrackerStatusRemote - // The item has been queued for pinning on the IPFS daemon - TrackerStatusPinQueued - // The item has been queued for unpinning on the IPFS daemon - TrackerStatusUnpinQueued - // The IPFS daemon is not pinning the item through this cid but it is - // tracked in a cluster dag - TrackerStatusSharded - // The item is in the state and should be pinned, but - // it is however not pinned and not queued/pinning. - TrackerStatusUnexpectedlyUnpinned -) - -// Composite TrackerStatus. -const ( - TrackerStatusError = TrackerStatusClusterError | TrackerStatusPinError | TrackerStatusUnpinError - TrackerStatusQueued = TrackerStatusPinQueued | TrackerStatusUnpinQueued -) - -// TrackerStatus represents the status of a tracked Cid in the PinTracker -type TrackerStatus int - -var trackerStatusString = map[TrackerStatus]string{ - TrackerStatusUndefined: "undefined", - TrackerStatusClusterError: "cluster_error", - TrackerStatusPinError: "pin_error", - TrackerStatusUnpinError: "unpin_error", - TrackerStatusError: "error", - TrackerStatusPinned: "pinned", - TrackerStatusPinning: "pinning", - TrackerStatusUnpinning: "unpinning", - TrackerStatusUnpinned: "unpinned", - TrackerStatusRemote: "remote", - TrackerStatusPinQueued: "pin_queued", - TrackerStatusUnpinQueued: "unpin_queued", - TrackerStatusQueued: "queued", - TrackerStatusSharded: "sharded", - TrackerStatusUnexpectedlyUnpinned: "unexpectedly_unpinned", -} - -// values autofilled in init() -var stringTrackerStatus map[string]TrackerStatus - -// String converts a TrackerStatus into a readable string. -// If the given TrackerStatus is a filter (with several -// bits set), it will return a comma-separated list. -func (st TrackerStatus) String() string { - var values []string - - // simple and known composite values - if v, ok := trackerStatusString[st]; ok { - return v - } - - // other filters - for k, v := range trackerStatusString { - if st&k > 0 { - values = append(values, v) - } - } - - return strings.Join(values, ",") -} - -// Match returns true if the tracker status matches the given filter. -// For example TrackerStatusPinError will match TrackerStatusPinError -// and TrackerStatusError. -func (st TrackerStatus) Match(filter TrackerStatus) bool { - return filter == TrackerStatusUndefined || - st == TrackerStatusUndefined || - st&filter > 0 -} - -// MarshalJSON uses the string representation of TrackerStatus for JSON -// encoding. -func (st TrackerStatus) MarshalJSON() ([]byte, error) { - return json.Marshal(st.String()) -} - -// UnmarshalJSON sets a tracker status from its JSON representation. -func (st *TrackerStatus) UnmarshalJSON(data []byte) error { - var v string - err := json.Unmarshal(data, &v) - if err != nil { - return err - } - *st = TrackerStatusFromString(v) - return nil -} - -// TrackerStatusFromString parses a string and returns the matching -// TrackerStatus value. The string can be a comma-separated list -// representing a TrackerStatus filter. Unknown status names are -// ignored. -func TrackerStatusFromString(str string) TrackerStatus { - values := strings.Split(strings.Replace(str, " ", "", -1), ",") - var status TrackerStatus - for _, v := range values { - st, ok := stringTrackerStatus[v] - if ok { - status |= st - } - } - return status -} - -// TrackerStatusAll all known TrackerStatus values. -func TrackerStatusAll() []TrackerStatus { - var list []TrackerStatus - for k := range trackerStatusString { - if k != TrackerStatusUndefined { - list = append(list, k) - } - } - - return list -} - -// IPFSPinStatus values -// FIXME include maxdepth -const ( - IPFSPinStatusBug IPFSPinStatus = iota - IPFSPinStatusError - IPFSPinStatusDirect - IPFSPinStatusRecursive - IPFSPinStatusIndirect - IPFSPinStatusUnpinned -) - -// IPFSPinStatus represents the status of a pin in IPFS (direct, recursive etc.) -type IPFSPinStatus int - -// IPFSPinStatusFromString parses a string and returns the matching -// IPFSPinStatus. -func IPFSPinStatusFromString(t string) IPFSPinStatus { - // Since indirect statuses are of the form "indirect through " - // use a prefix match - - switch { - case strings.HasPrefix(t, "indirect"): - return IPFSPinStatusIndirect - case strings.HasPrefix(t, "recursive"): - // FIXME: Maxdepth? - return IPFSPinStatusRecursive - case t == "direct": - return IPFSPinStatusDirect - default: - return IPFSPinStatusBug - } -} - -// String returns the string form of the status as written by IPFS. -func (ips IPFSPinStatus) String() string { - switch ips { - case IPFSPinStatusDirect: - return "direct" - case IPFSPinStatusRecursive: - return "recursive" - case IPFSPinStatusIndirect: - return "indirect" - default: - return "" - } -} - -// UnmarshalJSON parses a status from JSON -func (ips *IPFSPinStatus) UnmarshalJSON(b []byte) error { - var str string - err := json.Unmarshal(b, &str) - if err != nil { - return err - } - *ips = IPFSPinStatusFromString(str) - return nil -} - -// MarshalJSON converts a status to JSON. -func (ips IPFSPinStatus) MarshalJSON() ([]byte, error) { - return json.Marshal(ips.String()) -} - -// IsPinned returns true if the item is pinned as expected by the -// maxDepth parameter. -func (ips IPFSPinStatus) IsPinned(maxDepth PinDepth) bool { - switch { - case maxDepth < 0: - return ips == IPFSPinStatusRecursive - case maxDepth == 0: - return ips == IPFSPinStatusDirect - case maxDepth > 0: - // FIXME: when we know how ipfs returns partial pins. - return ips == IPFSPinStatusRecursive - } - return false -} - -// ToTrackerStatus converts the IPFSPinStatus value to the -// appropriate TrackerStatus value. -func (ips IPFSPinStatus) ToTrackerStatus() TrackerStatus { - return ipfsPinStatus2TrackerStatusMap[ips] -} - -var ipfsPinStatus2TrackerStatusMap = map[IPFSPinStatus]TrackerStatus{ - IPFSPinStatusDirect: TrackerStatusPinned, - IPFSPinStatusRecursive: TrackerStatusPinned, - IPFSPinStatusIndirect: TrackerStatusUnpinned, - IPFSPinStatusUnpinned: TrackerStatusUnpinned, - IPFSPinStatusBug: TrackerStatusUndefined, - IPFSPinStatusError: TrackerStatusClusterError, //TODO(ajl): check suitability -} - -// Cid embeds a cid.Cid with the MarshalJSON/UnmarshalJSON methods overwritten. -type Cid struct { - cid.Cid -} - -// CidUndef is an Undefined CID. -var CidUndef = Cid{cid.Undef} - -// NewCid wraps a cid.Cid in a Cid. -func NewCid(c cid.Cid) Cid { - return Cid{ - Cid: c, - } -} - -// DecodeCid parses a CID from its string form. -func DecodeCid(str string) (Cid, error) { - c, err := cid.Decode(str) - return Cid{c}, err -} - -// CastCid returns a CID from its bytes. -func CastCid(bs []byte) (Cid, error) { - c, err := cid.Cast(bs) - return Cid{c}, err -} - -// MarshalJSON marshals a CID as JSON as a normal CID string. -func (c Cid) MarshalJSON() ([]byte, error) { - if !c.Defined() { - return []byte("null"), nil - } - return []byte(`"` + c.String() + `"`), nil -} - -// UnmarshalJSON reads a CID from its representation as JSON string. -func (c *Cid) UnmarshalJSON(b []byte) error { - if string(b) == "null" { - *c = CidUndef - return nil - } - - var cidStr string - err := json.Unmarshal(b, &cidStr) - if err != nil { - return err - } - cc, err := DecodeCid(cidStr) - if err != nil { - return err - } - *c = cc - return nil -} - -// Equals returns true if two Cids are equal. -func (c Cid) Equals(c2 Cid) bool { - return c.Cid.Equals(c2.Cid) -} - -// IPFSPinInfo represents an IPFS Pin, which only has a CID and type. -// Its JSON form is what IPFS returns when querying a pinset. -type IPFSPinInfo struct { - Cid Cid `json:"Cid" codec:"c"` - Type IPFSPinStatus `json:"Type" codec:"t"` -} - -// GlobalPinInfo contains cluster-wide status information about a tracked Cid, -// indexed by cluster peer. -type GlobalPinInfo struct { - Cid Cid `json:"cid" codec:"c"` - Name string `json:"name" codec:"n"` - Allocations []peer.ID `json:"allocations" codec:"a,omitempty"` - Origins []Multiaddr `json:"origins" codec:"g,omitempty"` - Created time.Time `json:"created" codec:"t,omitempty"` - Metadata map[string]string `json:"metadata" codec:"m,omitempty"` - - // https://github.com/golang/go/issues/28827 - // Peer IDs are of string Kind(). We can't use peer IDs here - // as Go ignores TextMarshaler. - PeerMap map[string]PinInfoShort `json:"peer_map" codec:"pm,omitempty"` -} - -// String returns the string representation of a GlobalPinInfo. -func (gpi GlobalPinInfo) String() string { - str := fmt.Sprintf("Cid: %s\n", gpi.Cid) - str = str + "Peers:\n" - for pid, p := range gpi.PeerMap { - str = str + fmt.Sprintf("\t%s: %+v\n", pid, p) - } - return str -} - -// Add adds a PinInfo object to a GlobalPinInfo -func (gpi *GlobalPinInfo) Add(pi PinInfo) { - if !gpi.Cid.Defined() || !pi.Status.Match(TrackerStatusClusterError) { - gpi.Cid = pi.Cid - gpi.Name = pi.Name - gpi.Allocations = pi.Allocations - gpi.Origins = pi.Origins - gpi.Created = pi.Created - gpi.Metadata = pi.Metadata - } - - if gpi.PeerMap == nil { - gpi.PeerMap = make(map[string]PinInfoShort) - } - - gpi.PeerMap[pi.Peer.String()] = pi.PinInfoShort -} - -// Defined returns if the object is not empty. -func (gpi *GlobalPinInfo) Defined() bool { - return gpi.Cid.Defined() -} - -// Match returns true if one of the statuses in GlobalPinInfo matches -// the given filter. -func (gpi GlobalPinInfo) Match(filter TrackerStatus) bool { - for _, pi := range gpi.PeerMap { - if pi.Status.Match(filter) { - return true - } - } - return false -} - -// PinInfoShort is a subset of PinInfo which is embedded in GlobalPinInfo -// objects and does not carry redundant information as PinInfo would. -type PinInfoShort struct { - PeerName string `json:"peername" codec:"pn,omitempty"` - IPFS peer.ID `json:"ipfs_peer_id,omitempty" codec:"i,omitempty"` - IPFSAddresses []Multiaddr `json:"ipfs_peer_addresses,omitempty" codec:"ia,omitempty"` - Status TrackerStatus `json:"status" codec:"st,omitempty"` - TS time.Time `json:"timestamp" codec:"ts,omitempty"` - Error string `json:"error" codec:"e,omitempty"` - AttemptCount int `json:"attempt_count" codec:"a,omitempty"` - PriorityPin bool `json:"priority_pin" codec:"y,omitempty"` -} - -// String provides a string representation of PinInfoShort. -func (pis PinInfoShort) String() string { - var b strings.Builder - fmt.Fprintf(&b, "status: %s\n", pis.Status) - fmt.Fprintf(&b, "peername: %s\n", pis.PeerName) - fmt.Fprintf(&b, "ipfs: %s\n", pis.IPFS) - fmt.Fprintf(&b, "ipfsAddresses: %v\n", pis.IPFSAddresses) - fmt.Fprintf(&b, "error: %s\n", pis.Error) - fmt.Fprintf(&b, "attemptCount: %d\n", pis.AttemptCount) - fmt.Fprintf(&b, "priority: %t\n", pis.PriorityPin) - return b.String() -} - -// PinInfo holds information about local pins. This is used by the Pin -// Trackers. -type PinInfo struct { - Cid Cid `json:"cid" codec:"c"` - Name string `json:"name" codec:"m,omitempty"` - Peer peer.ID `json:"peer" codec:"p,omitempty"` - Allocations []peer.ID `json:"allocations" codec:"o,omitempty"` - Origins []Multiaddr `json:"origins" codec:"g,omitempty"` - Created time.Time `json:"created" codec:"t,omitempty"` - Metadata map[string]string `json:"metadata" codec:"md,omitempty"` - - PinInfoShort -} - -// ToGlobal converts a PinInfo object to a GlobalPinInfo with -// a single peer corresponding to the given PinInfo. -func (pi PinInfo) ToGlobal() GlobalPinInfo { - gpi := GlobalPinInfo{} - gpi.Add(pi) - return gpi -} - -// Defined returns if the PinInfo is not zero. -func (pi PinInfo) Defined() bool { - return pi.Cid.Defined() -} - -// String provides a string representation of PinInfo. -func (pi PinInfo) String() string { - var b strings.Builder - fmt.Fprintf(&b, "cid: %s\n", pi.Cid) - fmt.Fprintf(&b, "name: %s\n", pi.Name) - fmt.Fprintf(&b, "peer: %s\n", pi.Peer) - fmt.Fprintf(&b, "allocations: %v\n", pi.Allocations) - fmt.Fprintf(&b, "%s\n", pi.PinInfoShort) - return b.String() -} - -// Version holds version information -type Version struct { - Version string `json:"version" codec:"v"` -} - -// ConnectGraph holds information about the connectivity of the cluster To -// read, traverse the keys of ClusterLinks. Each such id is one of the peers -// of the "ClusterID" peer running the query. ClusterLinks[id] in turn lists -// the ids that peer "id" sees itself connected to. It is possible that id is -// a peer of ClusterID, but ClusterID can not reach id over rpc, in which case -// ClusterLinks[id] == [], as id's view of its connectivity can not be -// retrieved. -// -// Iff there was an error reading the IPFSID of the peer then id will not be a -// key of ClustertoIPFS or IPFSLinks. Finally iff id is a key of ClustertoIPFS -// then id will be a key of IPFSLinks. In the event of a SwarmPeers error -// IPFSLinks[id] == []. -type ConnectGraph struct { - ClusterID peer.ID `json:"cluster_id" codec:"id"` - IDtoPeername map[string]string `json:"id_to_peername" codec:"ip,omitempty"` - // ipfs to ipfs links - IPFSLinks map[string][]peer.ID `json:"ipfs_links" codec:"il,omitempty"` - // cluster to cluster links - ClusterLinks map[string][]peer.ID `json:"cluster_links" codec:"cl,omitempty"` - // cluster trust links - ClusterTrustLinks map[string]bool `json:"cluster_trust_links" codec:"ctl,omitempty"` - // cluster to ipfs links - ClustertoIPFS map[string]peer.ID `json:"cluster_to_ipfs" codec:"ci,omitempty"` -} - -// Multiaddr is a concrete type to wrap a Multiaddress so that it knows how to -// serialize and deserialize itself. -type Multiaddr struct { - multiaddr.Multiaddr -} - -// NewMultiaddr returns a cluster Multiaddr wrapper creating the -// multiaddr.Multiaddr with the given string. -func NewMultiaddr(mstr string) (Multiaddr, error) { - m, err := multiaddr.NewMultiaddr(mstr) - return Multiaddr{Multiaddr: m}, err -} - -// NewMultiaddrWithValue returns a new cluster Multiaddr wrapper using the -// given multiaddr.Multiaddr. -func NewMultiaddrWithValue(ma multiaddr.Multiaddr) Multiaddr { - return Multiaddr{Multiaddr: ma} -} - -// MarshalJSON returns a JSON-formatted multiaddress. -func (maddr Multiaddr) MarshalJSON() ([]byte, error) { - return maddr.Multiaddr.MarshalJSON() -} - -// UnmarshalJSON parses a cluster Multiaddr from the JSON representation. -func (maddr *Multiaddr) UnmarshalJSON(data []byte) error { - maddr.Multiaddr, _ = multiaddr.NewMultiaddr("/ip4/127.0.0.1") // null multiaddresses not allowed - return maddr.Multiaddr.UnmarshalJSON(data) -} - -// MarshalBinary returs the bytes of the wrapped multiaddress. -func (maddr Multiaddr) MarshalBinary() ([]byte, error) { - return maddr.Multiaddr.MarshalBinary() -} - -// UnmarshalBinary casts some bytes as a multiaddress wraps it with -// the given cluster Multiaddr. -func (maddr *Multiaddr) UnmarshalBinary(data []byte) error { - datacopy := make([]byte, len(data)) // This is super important - copy(datacopy, data) - maddr.Multiaddr, _ = multiaddr.NewMultiaddr("/ip4/127.0.0.1") // null multiaddresses not allowed - return maddr.Multiaddr.UnmarshalBinary(datacopy) -} - -// Value returns the wrapped multiaddr.Multiaddr. -func (maddr Multiaddr) Value() multiaddr.Multiaddr { - return maddr.Multiaddr -} - -// ID holds information about the Cluster peer -type ID struct { - ID peer.ID `json:"id" codec:"i,omitempty"` - Addresses []Multiaddr `json:"addresses" codec:"a,omitempty"` - ClusterPeers []peer.ID `json:"cluster_peers" codec:"cp,omitempty"` - ClusterPeersAddresses []Multiaddr `json:"cluster_peers_addresses" codec:"cpa,omitempty"` - Version string `json:"version" codec:"v,omitempty"` - Commit string `json:"commit" codec:"c,omitempty"` - RPCProtocolVersion protocol.ID `json:"rpc_protocol_version" codec:"rv,omitempty"` - Error string `json:"error" codec:"e,omitempty"` - IPFS IPFSID `json:"ipfs,omitempty" codec:"ip,omitempty"` - Peername string `json:"peername" codec:"pn,omitempty"` - //PublicKey crypto.PubKey -} - -// IPFSID is used to store information about the underlying IPFS daemon -type IPFSID struct { - ID peer.ID `json:"id,omitempty" codec:"i,omitempty"` - Addresses []Multiaddr `json:"addresses" codec:"a,omitempty"` - Error string `json:"error" codec:"e,omitempty"` -} - -// PinType specifies which sort of Pin object we are dealing with. -// In practice, the PinType decides how a Pin object is treated by the -// PinTracker. -// See descriptions above. -// A sharded Pin would look like: -// -// [ Meta ] (not pinned on IPFS, only present in cluster state) -// -// | -// v -// -// [ Cluster DAG ] (pinned everywhere in "direct") -// -// | .. | -// v v -// -// [Shard1] .. [ShardN] (allocated to peers and pinned with max-depth=1 -// | | .. | | | .. | -// v v .. v v v .. v -// [][]..[] [][]..[] Blocks (indirectly pinned on ipfs, not tracked in cluster) -type PinType uint64 - -// PinType values. See PinType documentation for further explanation. -const ( - // BadType type showing up anywhere indicates a bug - BadType PinType = 1 << iota - // DataType is a regular, non-sharded pin. It is pinned recursively. - // It has no associated reference. - DataType - // MetaType tracks the original CID of a sharded DAG. Its Reference - // points to the Cluster DAG CID. - MetaType - // ClusterDAGType pins carry the CID of the root node that points to - // all the shard-root-nodes of the shards in which a DAG has been - // divided. Its Reference carries the MetaType CID. - // ClusterDAGType pins are pinned directly everywhere. - ClusterDAGType - // ShardType pins carry the root CID of a shard, which points - // to individual blocks on the original DAG that the user is adding, - // which has been sharded. - // They carry a Reference to the previous shard. - // ShardTypes are pinned with MaxDepth=1 (root and - // direct children only). - ShardType -) - -// AllType is a PinType used for filtering all pin types -const AllType PinType = DataType | MetaType | ClusterDAGType | ShardType - -// PinTypeFromString is the inverse of String. It returns the PinType value -// corresponding to the input string -func PinTypeFromString(str string) PinType { - switch str { - case "pin": - return DataType - case "meta-pin": - return MetaType - case "clusterdag-pin": - return ClusterDAGType - case "shard-pin": - return ShardType - case "all": - return AllType - case "": - return AllType - default: - return BadType - } -} - -// String returns a printable value to identify the PinType -func (pT PinType) String() string { - switch pT { - case DataType: - return "pin" - case MetaType: - return "meta-pin" - case ClusterDAGType: - return "clusterdag-pin" - case ShardType: - return "shard-pin" - case AllType: - return "all" - default: - return "bad-type" - } -} - -// MarshalJSON provides json-representation of the pin type. -func (pT PinType) MarshalJSON() ([]byte, error) { - return json.Marshal(pT.String()) -} - -// UnmarshalJSON provides json-representation of the pin type. -func (pT *PinType) UnmarshalJSON(b []byte) error { - var str string - err := json.Unmarshal(b, &str) - if err != nil { - return err - } - t := PinTypeFromString(str) - *pT = t - return nil -} - -var pinOptionsMetaPrefix = "meta-" - -// PinMode is a PinOption that indicates how to pin something on IPFS, -// recursively or direct. -type PinMode int - -// PinMode values -const ( - PinModeRecursive PinMode = 0 - PinModeDirect PinMode = 1 -) - -// PinModeFromString converts a string to PinMode. -func PinModeFromString(s string) PinMode { - switch s { - case "recursive", "": - return PinModeRecursive - case "direct": - return PinModeDirect - default: - logger.Warn("unknown pin mode string. Defaulting to recursive") - return PinModeRecursive - } -} - -// String returns a human-readable value for PinMode. -func (pm PinMode) String() string { - switch pm { - case PinModeRecursive: - return "recursive" - case PinModeDirect: - return "direct" - default: - return "recursive" - } -} - -// ToIPFSPinStatus converts a PinMode to IPFSPinStatus. -func (pm PinMode) ToIPFSPinStatus() IPFSPinStatus { - if pm == PinModeDirect { - return IPFSPinStatusDirect - } - if pm == PinModeRecursive { - return IPFSPinStatusRecursive - } - return IPFSPinStatusBug -} - -// MarshalJSON converts the PinMode into a readable string in JSON. -func (pm PinMode) MarshalJSON() ([]byte, error) { - return json.Marshal(pm.String()) -} - -// UnmarshalJSON takes a JSON value and parses it into PinMode. -func (pm *PinMode) UnmarshalJSON(b []byte) error { - var s string - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *pm = PinModeFromString(s) - return nil -} - -// ToPinDepth converts the Mode to Depth. -func (pm PinMode) ToPinDepth() PinDepth { - switch pm { - case PinModeRecursive: - return -1 - case PinModeDirect: - return 0 - default: - logger.Warn("unknown pin mode %d. Defaulting to -1 depth", pm) - return -1 - } -} - -// PinOptions wraps user-defined options for Pins -type PinOptions struct { - ReplicationFactorMin int `json:"replication_factor_min" codec:"rn,omitempty"` - ReplicationFactorMax int `json:"replication_factor_max" codec:"rx,omitempty"` - Name string `json:"name" codec:"n,omitempty"` - Mode PinMode `json:"mode" codec:"o,omitempty"` - ShardSize uint64 `json:"shard_size" codec:"s,omitempty"` - UserAllocations []peer.ID `json:"user_allocations" codec:"ua,omitempty"` - ExpireAt time.Time `json:"expire_at" codec:"e,omitempty"` - Metadata map[string]string `json:"metadata" codec:"m,omitempty"` - PinUpdate Cid `json:"pin_update,omitempty" codec:"pu,omitempty"` - Origins []Multiaddr `json:"origins" codec:"g,omitempty"` -} - -// Equals returns true if two PinOption objects are equivalent. po and po2 may -// be nil. -func (po PinOptions) Equals(po2 PinOptions) bool { - if po.Name != po2.Name { - return false - } - - if po.Mode != po2.Mode { - return false - } - - if po.ReplicationFactorMax != po2.ReplicationFactorMax { - return false - } - - if po.ReplicationFactorMin != po2.ReplicationFactorMin { - return false - } - - if po.ShardSize != po2.ShardSize { - return false - } - - lenAllocs1 := len(po.UserAllocations) - lenAllocs2 := len(po2.UserAllocations) - if lenAllocs1 != lenAllocs2 { - return false - } - - // avoid side effects in the original objects - allocs1 := PeersToStrings(po.UserAllocations) - allocs2 := PeersToStrings(po2.UserAllocations) - sort.Strings(allocs1) - sort.Strings(allocs2) - if strings.Join(allocs1, ",") != strings.Join(allocs2, ",") { - return false - } - - if !po.ExpireAt.Equal(po2.ExpireAt) { - return false - } - - for k, v := range po.Metadata { - v2 := po2.Metadata[k] - if k != "" && v != v2 { - return false - } - } - - // deliberately ignore Update - - lenOrigins1 := len(po.Origins) - lenOrigins2 := len(po2.Origins) - if lenOrigins1 != lenOrigins2 { - return false - } - - for _, o1 := range po.Origins { - found := false - for _, o2 := range po2.Origins { - if o1.Value().Equal(o2.Value()) { - found = true - } - } - if !found { - return false - } - } - - return true -} - -// ToQuery returns the PinOption as query arguments. -func (po PinOptions) ToQuery() (string, error) { - q := url.Values{} - q.Set("replication-min", fmt.Sprintf("%d", po.ReplicationFactorMin)) - q.Set("replication-max", fmt.Sprintf("%d", po.ReplicationFactorMax)) - q.Set("name", po.Name) - q.Set("mode", po.Mode.String()) - q.Set("shard-size", fmt.Sprintf("%d", po.ShardSize)) - q.Set("user-allocations", strings.Join(PeersToStrings(po.UserAllocations), ",")) - if !po.ExpireAt.IsZero() { - v, err := po.ExpireAt.MarshalText() - if err != nil { - return "", err - } - q.Set("expire-at", string(v)) - } - for k, v := range po.Metadata { - if k == "" { - continue - } - q.Set(fmt.Sprintf("%s%s", pinOptionsMetaPrefix, k), v) - } - if po.PinUpdate.Defined() { - q.Set("pin-update", po.PinUpdate.String()) - } - - if len(po.Origins) > 0 { - origins := make([]string, len(po.Origins)) - for i, o := range po.Origins { - origins[i] = o.String() - } - q.Set("origins", strings.Join(origins, ",")) - } - - return q.Encode(), nil -} - -// FromQuery is the inverse of ToQuery(). -func (po *PinOptions) FromQuery(q url.Values) error { - po.Name = q.Get("name") - - po.Mode = PinModeFromString(q.Get("mode")) - - rplStr := q.Get("replication") - if rplStr != "" { // override - q.Set("replication-min", rplStr) - q.Set("replication-max", rplStr) - } - - err := parseIntParam(q, "replication-min", &po.ReplicationFactorMin) - if err != nil { - return err - } - - err = parseIntParam(q, "replication-max", &po.ReplicationFactorMax) - if err != nil { - return err - } - - if v := q.Get("shard-size"); v != "" { - shardSize, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return errors.New("parameter shard_size is invalid") - } - po.ShardSize = shardSize - } - - if allocs := q.Get("user-allocations"); allocs != "" { - po.UserAllocations = StringsToPeers(strings.Split(allocs, ",")) - } - - if v := q.Get("expire-at"); v != "" { - var tm time.Time - err := tm.UnmarshalText([]byte(v)) - if err != nil { - return errors.Wrap(err, "expire-at cannot be parsed") - } - po.ExpireAt = tm - } else if v = q.Get("expire-in"); v != "" { - d, err := time.ParseDuration(v) - if err != nil { - return errors.Wrap(err, "expire-in cannot be parsed") - } - if d < time.Second { - return errors.New("expire-in duration too short") - } - po.ExpireAt = time.Now().Add(d) - } - - po.Metadata = make(map[string]string) - for k := range q { - if !strings.HasPrefix(k, pinOptionsMetaPrefix) { - continue - } - metaKey := strings.TrimPrefix(k, pinOptionsMetaPrefix) - if metaKey == "" { - continue - } - po.Metadata[metaKey] = q.Get(k) - } - - updateStr := q.Get("pin-update") - if updateStr != "" { - updateCid, err := DecodeCid(updateStr) - if err != nil { - return fmt.Errorf("error decoding update option parameter: %s", err) - } - po.PinUpdate = updateCid - } - - originsStr := q.Get("origins") - if originsStr != "" { - origins := strings.Split(originsStr, ",") - maOrigins := make([]Multiaddr, len(origins)) - for i, ostr := range origins { - maOrig, err := NewMultiaddr(ostr) - if err != nil { - return fmt.Errorf("error decoding multiaddress: %w", err) - } - _, err = maOrig.ValueForProtocol(multiaddr.P_P2P) - if err != nil { - return fmt.Errorf("multiaddress does not contain peer ID: %w", err) - } - - maOrigins[i] = maOrig - } - po.Origins = maOrigins - } - - return nil -} - -// PinDepth indicates how deep a pin should be pinned, with -// -1 meaning "to the bottom", or "recursive". -type PinDepth int - -// ToPinMode converts PinDepth to PinMode -func (pd PinDepth) ToPinMode() PinMode { - switch pd { - case -1: - return PinModeRecursive - case 0: - return PinModeDirect - default: - logger.Warnf("bad pin depth: %d", pd) - return PinModeRecursive - } -} - -// Pin carries all the information associated to a CID that is pinned -// in IPFS Cluster. It also carries transient information (that may not -// get protobuffed, like UserAllocations). -type Pin struct { - PinOptions - - Cid Cid `json:"cid" codec:"c"` - - // See PinType comments - Type PinType `json:"type" codec:"t,omitempty"` - - // The peers to which this pin is allocated - Allocations []peer.ID `json:"allocations" codec:"a,omitempty"` - - // MaxDepth associated to this pin. -1 means - // recursive. - MaxDepth PinDepth `json:"max_depth" codec:"d,omitempty"` - - // We carry a reference CID to this pin. For - // ClusterDAGs, it is the MetaPin CID. For the - // MetaPin it is the ClusterDAG CID. For Shards, - // it is the previous shard CID. - // When not needed the pointer is nil - Reference *Cid `json:"reference" codec:"r,omitempty"` - - // The time that the pin was submitted to the consensus layer. - Timestamp time.Time `json:"timestamp" codec:"i,omitempty"` -} - -// String is a string representation of a Pin. -func (pin Pin) String() string { - var b strings.Builder - fmt.Fprintf(&b, "cid: %s\n", pin.Cid.String()) - fmt.Fprintf(&b, "type: %s\n", pin.Type) - fmt.Fprintf(&b, "allocations: %v\n", pin.Allocations) - fmt.Fprintf(&b, "maxdepth: %d\n", pin.MaxDepth) - if pin.Reference != nil { - fmt.Fprintf(&b, "reference: %s\n", pin.Reference) - } - return b.String() -} - -// IsPinEverywhere returns when the both replication factors are set to -1. -func (pin Pin) IsPinEverywhere() bool { - return pin.ReplicationFactorMin == -1 && pin.ReplicationFactorMax == -1 -} - -// PinPath is a wrapper for holding pin options and path of the content. -type PinPath struct { - PinOptions - Path string `json:"path"` -} - -// Defined returns if the path has a value. -func (pp PinPath) Defined() bool { - return pp.Path != "" -} - -// PinCid is a shortcut to create a Pin only with a Cid. Default is for pin to -// be recursive and the pin to be of DataType. -func PinCid(c Cid) Pin { - return Pin{ - Cid: c, - Type: DataType, - Allocations: []peer.ID{}, - MaxDepth: -1, // Recursive - Timestamp: time.Now(), - } -} - -// PinWithOpts creates a new Pin calling PinCid(c) and then sets its -// PinOptions fields with the given options. Pin fields that are linked to -// options are set accordingly (MaxDepth from Mode). -func PinWithOpts(c Cid, opts PinOptions) Pin { - p := PinCid(c) - p.PinOptions = opts - p.MaxDepth = p.Mode.ToPinDepth() - return p -} - -func convertPinType(t PinType) pb.Pin_PinType { - var i pb.Pin_PinType - for t != 1 { - if t == 0 { - return pb.Pin_BadType - } - t = t >> 1 - i++ - } - return i -} - -// ProtoMarshal marshals this Pin using probobuf. -func (pin Pin) ProtoMarshal() ([]byte, error) { - allocs := make([][]byte, len(pin.Allocations)) - for i, pid := range pin.Allocations { - bs, err := pid.Marshal() - if err != nil { - return nil, err - } - allocs[i] = bs - } - - // Cursory google search says len=0 slices will be - // decoded as null, which is fine. - origins := make([][]byte, len(pin.Origins)) - for i, orig := range pin.Origins { - origins[i] = orig.Bytes() - } - - var expireAtProto uint64 - // Only set the protobuf field with non-zero times. - if !(pin.ExpireAt.IsZero() || pin.ExpireAt.Equal(unixZero)) { - expireAtProto = uint64(pin.ExpireAt.Unix()) - } - - var timestampProto uint64 - // Only set the protobuf field with non-zero times. - if !(pin.Timestamp.IsZero() || pin.Timestamp.Equal(unixZero)) { - timestampProto = uint64(pin.Timestamp.Unix()) - } - - // Our metadata needs to always be seralized in exactly the same way, - // and that is why we use an array sorted by key and deprecated using - // a protobuf map. - var sortedMetadata []*pb.Metadata - var metaKeys []string - for k := range pin.Metadata { - metaKeys = append(metaKeys, k) - } - sort.Strings(metaKeys) - - for _, k := range metaKeys { - metadata := &pb.Metadata{ - Key: k, - Value: pin.Metadata[k], - } - sortedMetadata = append(sortedMetadata, metadata) - } - - opts := &pb.PinOptions{ - ReplicationFactorMin: int32(pin.ReplicationFactorMin), - ReplicationFactorMax: int32(pin.ReplicationFactorMax), - Name: pin.Name, - ShardSize: pin.ShardSize, - // Metadata: pin.Metadata, - PinUpdate: pin.PinUpdate.Bytes(), - ExpireAt: expireAtProto, - // Mode: pin.Mode, - // UserAllocations: pin.UserAllocations, - Origins: origins, - SortedMetadata: sortedMetadata, - } - - pbPin := &pb.Pin{ - Cid: pin.Cid.Bytes(), - Type: convertPinType(pin.Type), - Allocations: allocs, - MaxDepth: int32(pin.MaxDepth), - Options: opts, - Timestamp: timestampProto, - } - if ref := pin.Reference; ref != nil { - pbPin.Reference = ref.Bytes() - } - return proto.Marshal(pbPin) -} - -// ProtoUnmarshal unmarshals this fields from protobuf-encoded bytes. -func (pin *Pin) ProtoUnmarshal(data []byte) error { - pbPin := pb.Pin{} - err := proto.Unmarshal(data, &pbPin) - if err != nil { - return err - } - ci, err := CastCid(pbPin.GetCid()) - if err != nil { - pin.Cid = CidUndef - } else { - pin.Cid = ci - } - - pin.Type = 1 << uint64(pbPin.GetType()) - - pbAllocs := pbPin.GetAllocations() - lenAllocs := len(pbAllocs) - allocs := make([]peer.ID, lenAllocs) - for i, pidb := range pbAllocs { - pid, err := peer.IDFromBytes(pidb) - if err != nil { - return err - } - allocs[i] = pid - } - - pin.Allocations = allocs - pin.MaxDepth = PinDepth(pbPin.GetMaxDepth()) - ref, err := CastCid(pbPin.GetReference()) - if err != nil { - pin.Reference = nil - - } else { - pin.Reference = &ref - } - - ts := pbPin.GetTimestamp() - if ts > 0 { - pin.Timestamp = time.Unix(int64(ts), 0) - } - - opts := pbPin.GetOptions() - pin.ReplicationFactorMin = int(opts.GetReplicationFactorMin()) - pin.ReplicationFactorMax = int(opts.GetReplicationFactorMax()) - pin.Name = opts.GetName() - pin.ShardSize = opts.GetShardSize() - - // pin.UserAllocations = opts.GetUserAllocations() - exp := opts.GetExpireAt() - if exp > 0 { - pin.ExpireAt = time.Unix(int64(exp), 0) - } - - // Use whatever metadata is available. - //lint:ignore SA1019 we keed to keep backwards compat - pin.Metadata = opts.GetMetadata() - sortedMetadata := opts.GetSortedMetadata() - if len(sortedMetadata) > 0 && pin.Metadata == nil { - pin.Metadata = make(map[string]string, len(sortedMetadata)) - } - for _, md := range opts.GetSortedMetadata() { - pin.Metadata[md.Key] = md.Value - } - - pinUpdate, err := CastCid(opts.GetPinUpdate()) - if err == nil { - pin.PinUpdate = pinUpdate - } - - // We do not store the PinMode option but we can - // derive it from the MaxDepth setting. - pin.Mode = pin.MaxDepth.ToPinMode() - - pbOrigins := opts.GetOrigins() - origins := make([]Multiaddr, len(pbOrigins)) - for i, orig := range pbOrigins { - maOrig, err := multiaddr.NewMultiaddrBytes(orig) - if err != nil { - return err - } - origins[i] = NewMultiaddrWithValue(maOrig) - } - pin.Origins = origins - - return nil -} - -// Equals checks if two pins are the same (with the same allocations). -// If allocations are the same but in different order, they are still -// considered equivalent. -func (pin Pin) Equals(pin2 Pin) bool { - if !pin.Cid.Equals(pin2.Cid) { - return false - } - - if pin.Type != pin2.Type { - return false - } - - if pin.MaxDepth != pin2.MaxDepth { - return false - } - - if pin.Reference != nil && pin2.Reference == nil || - pin.Reference == nil && pin2.Reference != nil { - return false - } - - if pin.Reference != nil && pin2.Reference != nil && - !pin.Reference.Equals(*pin2.Reference) { - return false - } - - allocs1 := PeersToStrings(pin.Allocations) - sort.Strings(allocs1) - allocs2 := PeersToStrings(pin2.Allocations) - sort.Strings(allocs2) - - if strings.Join(allocs1, ",") != strings.Join(allocs2, ",") { - return false - } - - return pin.PinOptions.Equals(pin2.PinOptions) -} - -// IsRemotePin determines whether a Pin's ReplicationFactor has -// been met, so as to either pin or unpin it from the peer. -func (pin Pin) IsRemotePin(pid peer.ID) bool { - if pin.IsPinEverywhere() { - return false - } - - for _, p := range pin.Allocations { - if p == pid { - return false - } - } - return true -} - -// ExpiredAt returns whether the pin has expired at the given time. -func (pin Pin) ExpiredAt(t time.Time) bool { - if pin.ExpireAt.IsZero() || pin.ExpireAt.Equal(unixZero) { - return false - } - - return pin.ExpireAt.Before(t) -} - -// Defined returns true if this is not a zero-object pin (the CID must be set). -func (pin Pin) Defined() bool { - return pin.Cid.Defined() -} - -// NodeWithMeta specifies a block of data and a set of optional metadata fields -// carrying information about the encoded ipld node -type NodeWithMeta struct { - Data []byte `codec:"d,omitempty"` - Cid Cid `codec:"c,omitempty"` - CumSize uint64 `codec:"s,omitempty"` // Cumulative size -} - -// Size returns how big is the block. It is different from CumSize, which -// records the size of the underlying tree. -func (n *NodeWithMeta) Size() uint64 { - return uint64(len(n.Data)) -} - -// MetricsSet is a map to carry slices of metrics indexed by type. -type MetricsSet map[string][]Metric - -// Metric transports information about a peer.ID. It is used to decide -// pin allocations by a PinAllocator. IPFS cluster is agnostic to -// the Value, which should be interpreted by the PinAllocator. -// The ReceivedAt value is a timestamp representing when a peer has received -// the metric value. -type Metric struct { - Name string `json:"name" codec:"n,omitempty"` - Peer peer.ID `json:"peer" codec:"p,omitempty"` - Value string `json:"value" codec:"v,omitempty"` - Expire int64 `json:"expire" codec:"e,omitempty"` - Valid bool `json:"valid" codec:"d,omitempty"` - Weight int64 `json:"weight" codec:"w,omitempty"` - Partitionable bool `json:"partitionable" codec:"o,omitempty"` - ReceivedAt int64 `json:"received_at" codec:"t,omitempty"` // ReceivedAt contains a UnixNano timestamp -} - -func (m Metric) String() string { - return fmt.Sprintf("%s | %s | %s | Recv: %d | Exp: %d | W: %d | Part: %t | Valid: %t", - m.Name, - m.Peer, - m.Value, - m.ReceivedAt, - m.Expire, - m.Weight, - m.Partitionable, - m.Valid, - ) -} - -// Defined returns true if the metric name is set. -func (m Metric) Defined() bool { - return m.Name != "" -} - -// SetTTL sets Metric to expire after the given time.Duration -func (m *Metric) SetTTL(d time.Duration) { - exp := time.Now().Add(d) - m.Expire = exp.UnixNano() -} - -// GetTTL returns the time left before the Metric expires -func (m Metric) GetTTL() time.Duration { - expDate := time.Unix(0, m.Expire) - ttl := time.Until(expDate) - if ttl < 0 { - ttl = 0 - } - return ttl -} - -// Expired returns if the Metric has expired -func (m Metric) Expired() bool { - expDate := time.Unix(0, m.Expire) - return time.Now().After(expDate) -} - -// Discard returns if the metric not valid or has expired -func (m Metric) Discard() bool { - return !m.Valid || m.Expired() -} - -// GetWeight returns the weight of the metric. -// This is for compatibility. -func (m Metric) GetWeight() int64 { - return m.Weight -} - -// MetricSlice is a sortable Metric array. -type MetricSlice []Metric - -func (es MetricSlice) Len() int { return len(es) } -func (es MetricSlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } -func (es MetricSlice) Less(i, j int) bool { - if es[i].Peer == es[j].Peer { - return es[i].Expire < es[j].Expire - } - return es[i].Peer < es[j].Peer -} - -// Alert carries alerting information about a peer. -type Alert struct { - Metric - TriggeredAt time.Time `json:"triggered_at" codec:"r,omitempty"` -} - -// Error can be used by APIs to return errors. -type Error struct { - Code int `json:"code" codec:"o,omitempty"` - Message string `json:"message" codec:"m,omitempty"` -} - -// Error implements the error interface and returns the error's message. -func (e Error) Error() string { - return fmt.Sprintf("%s (%d)", e.Message, e.Code) -} - -// IPFSRepoStat wraps information about the IPFS repository. -type IPFSRepoStat struct { - RepoSize uint64 `codec:"r,omitempty"` - StorageMax uint64 `codec:"s, omitempty"` -} - -// IPFSRepoGC represents the streaming response sent from repo gc API of IPFS. -type IPFSRepoGC struct { - Key Cid `json:"key,omitempty" codec:"k,omitempty"` - Error string `json:"error,omitempty" codec:"e,omitempty"` -} - -// RepoGC contains garbage collected CIDs from a cluster peer's IPFS daemon. -type RepoGC struct { - Peer peer.ID `json:"peer" codec:"p,omitempty"` // the Cluster peer ID - Peername string `json:"peername" codec:"pn,omitempty"` - Keys []IPFSRepoGC `json:"keys" codec:"k"` - Error string `json:"error,omitempty" codec:"e,omitempty"` -} - -// GlobalRepoGC contains cluster-wide information about garbage collected CIDs -// from IPFS. -type GlobalRepoGC struct { - PeerMap map[string]RepoGC `json:"peer_map" codec:"pm,omitempty"` -} diff --git a/packages/networking/ipfs-cluster/api/types_test.go b/packages/networking/ipfs-cluster/api/types_test.go deleted file mode 100644 index ab09f6d..0000000 --- a/packages/networking/ipfs-cluster/api/types_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package api - -import ( - "bytes" - "net/url" - "reflect" - "strings" - "testing" - "time" - - peer "github.com/libp2p/go-libp2p/core/peer" - multiaddr "github.com/multiformats/go-multiaddr" - - "github.com/ugorji/go/codec" -) - -func TestTrackerFromString(t *testing.T) { - testcases := []string{"cluster_error", "pin_error", "unpin_error", "pinned", "pinning", "unpinning", "unpinned", "remote"} - for i, tc := range testcases { - if TrackerStatusFromString(tc).String() != TrackerStatus(1< 30*time.Second || ttl < 29*time.Second { - t.Error("looks like a bad ttl") - } -} - -func TestConvertPinType(t *testing.T) { - for _, t1 := range []PinType{BadType, ShardType} { - i := convertPinType(t1) - t2 := PinType(1 << uint64(i)) - if t2 != t1 { - t.Error("bad conversion") - } - } -} - -func checkDupTags(t *testing.T, name string, typ reflect.Type, tags map[string]struct{}) { - if tags == nil { - tags = make(map[string]struct{}) - } - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - - if f.Type.Kind() == reflect.Struct && f.Anonymous { - checkDupTags(t, name, f.Type, tags) - continue - } - - tag := f.Tag.Get(name) - if tag == "" { - continue - } - val := strings.Split(tag, ",")[0] - - t.Logf("%s: '%s:%s'", f.Name, name, val) - _, ok := tags[val] - if ok { - t.Errorf("%s: tag %s already used", f.Name, val) - } - tags[val] = struct{}{} - } -} - -// TestDupTags checks that we are not re-using the same codec tag for -// different fields in the types objects. -func TestDupTags(t *testing.T) { - typ := reflect.TypeOf(Pin{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(ID{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(GlobalPinInfo{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(PinInfo{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(ConnectGraph{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(ID{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(NodeWithMeta{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(Metric{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(Error{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(IPFSRepoStat{}) - checkDupTags(t, "codec", typ, nil) - - typ = reflect.TypeOf(AddedOutput{}) - checkDupTags(t, "codec", typ, nil) -} - -func TestPinOptionsQuery(t *testing.T) { - testcases := []*PinOptions{ - { - ReplicationFactorMax: 3, - ReplicationFactorMin: 2, - Name: "abc", - ShardSize: 33, - UserAllocations: StringsToPeers([]string{ - "QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc", - "QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6", - }), - ExpireAt: time.Now().Add(12 * time.Hour), - Metadata: map[string]string{ - "hello": "bye", - "hello2": "bye2", - }, - Origins: []Multiaddr{ - NewMultiaddrWithValue(multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234/p2p/12D3KooWKewdAMAU3WjYHm8qkAJc5eW6KHbHWNigWraXXtE1UCng")), - NewMultiaddrWithValue(multiaddr.StringCast("/ip4/2.3.3.4/tcp/1234/p2p/12D3KooWF6BgwX966ge5AVFs9Gd2wVTBmypxZVvaBR12eYnUmXkR")), - }, - }, - { - ReplicationFactorMax: -1, - ReplicationFactorMin: 0, - Name: "", - ShardSize: 0, - UserAllocations: []peer.ID{}, - Metadata: nil, - }, - { - ReplicationFactorMax: -1, - ReplicationFactorMin: 0, - Name: "", - ShardSize: 0, - UserAllocations: nil, - Metadata: map[string]string{ - "": "bye", - }, - }, - } - - for _, tc := range testcases { - queryStr, err := tc.ToQuery() - if err != nil { - t.Fatal("error converting to query", err) - } - q, err := url.ParseQuery(queryStr) - if err != nil { - t.Error("error parsing query", err) - } - po2 := PinOptions{} - err = po2.FromQuery(q) - if err != nil { - t.Fatal("error parsing options", err) - } - if !tc.Equals(po2) { - t.Error("expected equal PinOptions") - t.Error(queryStr) - t.Errorf("%+v\n", tc) - t.Errorf("%+v\n", po2) - } - } -} - -func TestIDCodec(t *testing.T) { - TestPeerID1, _ := peer.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc") - TestPeerID2, _ := peer.Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6") - TestPeerID3, _ := peer.Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa") - addr, _ := NewMultiaddr("/ip4/1.2.3.4") - id := &ID{ - ID: TestPeerID1, - Addresses: []Multiaddr{addr}, - ClusterPeers: []peer.ID{TestPeerID2}, - ClusterPeersAddresses: []Multiaddr{addr}, - Version: "2", - Commit: "", - RPCProtocolVersion: "abc", - Error: "", - IPFS: IPFSID{ - ID: TestPeerID3, - Addresses: []Multiaddr{addr}, - Error: "", - }, - Peername: "hi", - } - - var buf bytes.Buffer - enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{}) - err := enc.Encode(id) - if err != nil { - t.Fatal(err) - } - - var buf2 = bytes.NewBuffer(buf.Bytes()) - dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{}) - - var id2 ID - - err = dec.Decode(&id2) - if err != nil { - t.Fatal(err) - } -} - -func TestPinCodec(t *testing.T) { - ci, _ := DecodeCid("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc") - pin := PinCid(ci) - var buf bytes.Buffer - enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{}) - err := enc.Encode(pin) - if err != nil { - t.Fatal(err) - } - - var buf2 = bytes.NewBuffer(buf.Bytes()) - dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{}) - - var pin2 Pin - - err = dec.Decode(&pin2) - if err != nil { - t.Fatal(err) - } -} diff --git a/packages/networking/ipfs-cluster/api/util.go b/packages/networking/ipfs-cluster/api/util.go deleted file mode 100644 index e1128ef..0000000 --- a/packages/networking/ipfs-cluster/api/util.go +++ /dev/null @@ -1,29 +0,0 @@ -package api - -import ( - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// PeersToStrings Encodes a list of peers. -func PeersToStrings(peers []peer.ID) []string { - strs := make([]string, len(peers)) - for i, p := range peers { - if p != "" { - strs[i] = p.String() - } - } - return strs -} - -// StringsToPeers decodes peer.IDs from strings. -func StringsToPeers(strs []string) []peer.ID { - peers := []peer.ID{} - for _, p := range strs { - pid, err := peer.Decode(p) - if err != nil { - continue - } - peers = append(peers, pid) - } - return peers -} diff --git a/packages/networking/ipfs-cluster/cluster.go b/packages/networking/ipfs-cluster/cluster.go deleted file mode 100644 index b6f48a7..0000000 --- a/packages/networking/ipfs-cluster/cluster.go +++ /dev/null @@ -1,2302 +0,0 @@ -package ipfscluster - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "mime/multipart" - "sync" - "time" - - "github.com/coreos/go-systemd/v22/daemon" - "github.com/ipfs-cluster/ipfs-cluster/adder" - "github.com/ipfs-cluster/ipfs-cluster/adder/sharding" - "github.com/ipfs-cluster/ipfs-cluster/adder/single" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/pstoremgr" - "github.com/ipfs-cluster/ipfs-cluster/rpcutil" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/version" - "go.uber.org/multierr" - - ds "github.com/ipfs/go-datastore" - rpc "github.com/libp2p/go-libp2p-gorpc" - dual "github.com/libp2p/go-libp2p-kad-dht/dual" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - mdns "github.com/libp2p/go-libp2p/p2p/discovery/mdns" - ma "github.com/multiformats/go-multiaddr" - - ocgorpc "github.com/lanzafame/go-libp2p-ocgorpc" - trace "go.opencensus.io/trace" -) - -// ReadyTimeout specifies the time before giving up -// during startup (waiting for consensus to be ready) -// It may need adjustment according to timeouts in the -// consensus layer. -var ReadyTimeout = 30 * time.Second - -const ( - pingMetricName = "ping" - bootstrapCount = 3 - reBootstrapInterval = 30 * time.Second - mdnsServiceTag = "_ipfs-cluster-discovery._udp" - maxAlerts = 1000 -) - -var errFollowerMode = errors.New("this peer is configured to be in follower mode. Write operations are disabled") - -// Cluster is the main IPFS cluster component. It provides -// the go-API for it and orchestrates the components that make up the system. -type Cluster struct { - ctx context.Context - cancel func() - - id peer.ID - config *Config - host host.Host - dht *dual.DHT - discovery mdns.Service - datastore ds.Datastore - - rpcServer *rpc.Server - rpcClient *rpc.Client - peerManager *pstoremgr.Manager - - consensus Consensus - apis []API - ipfs IPFSConnector - tracker PinTracker - monitor PeerMonitor - allocator PinAllocator - informers []Informer - tracer Tracer - - alerts []api.Alert - alertsMux sync.Mutex - - doneCh chan struct{} - readyCh chan struct{} - readyB bool - wg sync.WaitGroup - - // peerAdd - paMux sync.Mutex - - // shutdown function and related variables - shutdownLock sync.Mutex - shutdownB bool - removed bool - - curPingVal pingValue -} - -// NewCluster builds a new IPFS Cluster peer. It initializes a LibP2P host, -// creates and RPC Server and client and sets up all components. -// -// The new cluster peer may still be performing initialization tasks when -// this call returns (consensus may still be bootstrapping). Use Cluster.Ready() -// if you need to wait until the peer is fully up. -func NewCluster( - ctx context.Context, - host host.Host, - dht *dual.DHT, - cfg *Config, - datastore ds.Datastore, - consensus Consensus, - apis []API, - ipfs IPFSConnector, - tracker PinTracker, - monitor PeerMonitor, - allocator PinAllocator, - informers []Informer, - tracer Tracer, -) (*Cluster, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - if host == nil { - return nil, errors.New("cluster host is nil") - } - - if len(informers) == 0 { - return nil, errors.New("no informers are passed") - } - - ctx, cancel := context.WithCancel(ctx) - - listenAddrs := "" - for _, addr := range host.Addrs() { - listenAddrs += fmt.Sprintf(" %s/p2p/%s\n", addr, host.ID().Pretty()) - } - - logger.Infof("IPFS Cluster v%s listening on:\n%s\n", version.Version, listenAddrs) - - peerManager := pstoremgr.New(ctx, host, cfg.GetPeerstorePath()) - - var mdnsSvc mdns.Service - if cfg.MDNSInterval > 0 { - mdnsSvc = mdns.NewMdnsService(host, mdnsServiceTag, peerManager) - err = mdnsSvc.Start() - if err != nil { - logger.Warnf("mDNS could not be started: %s", err) - } - } - - c := &Cluster{ - ctx: ctx, - cancel: cancel, - id: host.ID(), - config: cfg, - host: host, - dht: dht, - discovery: mdnsSvc, - datastore: datastore, - consensus: consensus, - apis: apis, - ipfs: ipfs, - tracker: tracker, - monitor: monitor, - allocator: allocator, - informers: informers, - tracer: tracer, - alerts: []api.Alert{}, - peerManager: peerManager, - shutdownB: false, - removed: false, - doneCh: make(chan struct{}), - readyCh: make(chan struct{}), - readyB: false, - } - - // Import known cluster peers from peerstore file and config. Set - // a non permanent TTL. - c.peerManager.ImportPeersFromPeerstore(false, peerstore.AddressTTL) - c.peerManager.ImportPeers(c.config.PeerAddresses, false, peerstore.AddressTTL) - // Attempt to connect to some peers (up to bootstrapCount) - connectedPeers := c.peerManager.Bootstrap(bootstrapCount) - // We cannot warn when count is low as this as this is normal if going - // to Join() later. - logger.Debugf("bootstrap count %d", len(connectedPeers)) - // Log a ping metric for every connected peer. This will make them - // visible as peers without having to wait for them to send one. - for _, p := range connectedPeers { - if err := c.logPingMetric(ctx, p); err != nil { - logger.Warn(err) - } - } - - // After setupRPC components can do their tasks with a fully operative - // routed libp2p host with some connections and a working DHT (hopefully). - err = c.setupRPC() - if err != nil { - c.Shutdown(ctx) - return nil, err - } - c.setupRPCClients() - - // Note: It is very important to first call Add() once in a non-racy - // place - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.ready(ReadyTimeout) - c.run() - }() - - return c, nil -} - -func (c *Cluster) setupRPC() error { - rpcServer, err := newRPCServer(c) - if err != nil { - return err - } - c.rpcServer = rpcServer - - var rpcClient *rpc.Client - if c.config.Tracing { - csh := &ocgorpc.ClientHandler{} - rpcClient = rpc.NewClientWithServer( - c.host, - version.RPCProtocol, - rpcServer, - rpc.WithClientStatsHandler(csh), - ) - } else { - rpcClient = rpc.NewClientWithServer(c.host, version.RPCProtocol, rpcServer) - } - c.rpcClient = rpcClient - return nil -} - -func (c *Cluster) setupRPCClients() { - c.ipfs.SetClient(c.rpcClient) - c.tracker.SetClient(c.rpcClient) - for _, api := range c.apis { - api.SetClient(c.rpcClient) - } - c.consensus.SetClient(c.rpcClient) - c.monitor.SetClient(c.rpcClient) - c.allocator.SetClient(c.rpcClient) - for _, informer := range c.informers { - informer.SetClient(c.rpcClient) - } -} - -// watchPinset triggers recurrent operations that loop on the pinset. -func (c *Cluster) watchPinset() { - ctx, span := trace.StartSpan(c.ctx, "cluster/watchPinset") - defer span.End() - - stateSyncTimer := time.NewTimer(c.config.StateSyncInterval) - - // Upon start, every item in the state that is not pinned will appear - // as PinError when doing a Status, we should proceed to recover - // (try pinning) all of those right away. - recoverTimer := time.NewTimer(0) // 0 so that it does an initial recover right away - - // This prevents doing an StateSync while doing a RecoverAllLocal, - // which is intended behavior as for very large pinsets - for { - select { - case <-stateSyncTimer.C: - logger.Debug("auto-triggering StateSync()") - c.StateSync(ctx) - stateSyncTimer.Reset(c.config.StateSyncInterval) - case <-recoverTimer.C: - logger.Debug("auto-triggering RecoverAllLocal()") - - out := make(chan api.PinInfo, 1024) - go func() { - for range out { - } - }() - err := c.RecoverAllLocal(ctx, out) - if err != nil { - logger.Error(err) - } - recoverTimer.Reset(c.config.PinRecoverInterval) - case <-c.ctx.Done(): - if !stateSyncTimer.Stop() { - <-stateSyncTimer.C - } - if !recoverTimer.Stop() { - <-recoverTimer.C - } - return - } - } -} - -// returns the smallest ttl from the metrics pushed by the informer. -func (c *Cluster) sendInformerMetrics(ctx context.Context, informer Informer) (time.Duration, error) { - ctx, span := trace.StartSpan(ctx, "cluster/sendInformerMetric") - defer span.End() - - var minTTL time.Duration - var errors error - metrics := informer.GetMetrics(ctx) - if len(metrics) == 0 { - logger.Errorf("informer %s produced no metrics", informer.Name()) - return minTTL, nil - } - - for _, metric := range metrics { - if metric.Discard() { // do not publish invalid metrics - // the tags informer creates an invalid metric - // when no tags are defined. - continue - } - metric.Peer = c.id - ttl := metric.GetTTL() - if ttl > 0 && (ttl < minTTL || minTTL == 0) { - minTTL = ttl - } - err := c.monitor.PublishMetric(ctx, metric) - - if multierr.AppendInto(&errors, err) { - logger.Warnf("error sending metric %s: %s", metric.Name, err) - } - } - return minTTL, errors -} - -func (c *Cluster) sendInformersMetrics(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "cluster/sendInformersMetrics") - defer span.End() - - var errors error - for _, informer := range c.informers { - _, err := c.sendInformerMetrics(ctx, informer) - if multierr.AppendInto(&errors, err) { - logger.Warnf("informer %s did not send all metrics", informer.Name()) - } - } - return errors -} - -// pushInformerMetrics loops and publishes informers metrics using the -// cluster monitor. Metrics are pushed normally at a TTL/2 rate. If an error -// occurs, they are pushed at a TTL/4 rate. -func (c *Cluster) pushInformerMetrics(ctx context.Context, informer Informer) { - ctx, span := trace.StartSpan(ctx, "cluster/pushInformerMetrics") - defer span.End() - - timer := time.NewTimer(0) // fire immediately first - - // retries counts how many retries we have made - retries := 0 - // retryWarnMod controls how often do we log - // "error broadcasting metric". - // It will do it in the first error, and then on every - // 10th. - retryWarnMod := 10 - - for { - select { - case <-ctx.Done(): - return - case <-timer.C: - // wait - } - - minTTL, err := c.sendInformerMetrics(ctx, informer) - if minTTL == 0 { - minTTL = 30 * time.Second - } - if err != nil { - if (retries % retryWarnMod) == 0 { - logger.Errorf("error broadcasting metric: %s", err) - retries++ - } - // retry sooner - timer.Reset(minTTL / 4) - continue - } - - retries = 0 - // send metric again in TTL/2 - timer.Reset(minTTL / 2) - } -} - -func (c *Cluster) sendPingMetric(ctx context.Context) (api.Metric, error) { - ctx, span := trace.StartSpan(ctx, "cluster/sendPingMetric") - defer span.End() - - id := c.ID(ctx) - newPingVal := pingValue{ - Peername: id.Peername, - IPFSID: id.IPFS.ID, - IPFSAddresses: publicIPFSAddresses(id.IPFS.Addresses), - } - if c.curPingVal.Valid() && - !newPingVal.Valid() { // i.e. ipfs down - newPingVal = c.curPingVal // use last good value - } - c.curPingVal = newPingVal - - v, err := json.Marshal(newPingVal) - if err != nil { - logger.Error(err) - // continue anyways - } - - metric := api.Metric{ - Name: pingMetricName, - Peer: c.id, - Valid: true, - Value: string(v), - } - metric.SetTTL(c.config.MonitorPingInterval * 2) - return metric, c.monitor.PublishMetric(ctx, metric) -} - -// logPingMetric logs a ping metric as if it had been sent from PID. It is -// used to make peers appear available as soon as we connect to them (without -// having to wait for them to broadcast a metric). -// -// We avoid specifically sending a metric to a peer when we "connect" to it -// because: a) this requires an extra. OPEN RPC endpoint (LogMetric) that can -// be called by everyone b) We have no way of verifying that the peer ID in a -// metric pushed is actually the issuer of the metric (something the regular -// "pubsub" way of pushing metrics allows (by verifying the signature on the -// message). Thus, this reduces chances of abuse until we have something -// better. -func (c *Cluster) logPingMetric(ctx context.Context, pid peer.ID) error { - m := api.Metric{ - Name: pingMetricName, - Peer: pid, - Valid: true, - } - m.SetTTL(c.config.MonitorPingInterval * 2) - return c.monitor.LogMetric(ctx, m) -} - -func (c *Cluster) pushPingMetrics(ctx context.Context) { - ctx, span := trace.StartSpan(ctx, "cluster/pushPingMetrics") - defer span.End() - - ticker := time.NewTicker(c.config.MonitorPingInterval) - for { - select { - case <-ctx.Done(): - return - default: - } - - c.sendPingMetric(ctx) - - select { - case <-ctx.Done(): - return - case <-ticker.C: - } - } -} - -// Alerts returns the last alerts recorded by this cluster peer with the most -// recent first. -func (c *Cluster) Alerts() []api.Alert { - c.alertsMux.Lock() - alerts := make([]api.Alert, len(c.alerts)) - { - total := len(alerts) - for i, a := range c.alerts { - alerts[total-1-i] = a - } - } - c.alertsMux.Unlock() - - return alerts -} - -// read the alerts channel from the monitor and triggers repins -func (c *Cluster) alertsHandler() { - for { - select { - case <-c.ctx.Done(): - return - case alrt := <-c.monitor.Alerts(): - // Follower peers do not care about alerts. - // They can do nothing about them. - if c.config.FollowerMode { - continue - } - - logger.Warnf("metric alert for %s: Peer: %s.", alrt.Name, alrt.Peer) - c.alertsMux.Lock() - { - if len(c.alerts) > maxAlerts { - c.alerts = c.alerts[:0] - } - - c.alerts = append(c.alerts, alrt) - } - c.alertsMux.Unlock() - - if alrt.Name != pingMetricName { - continue // only handle ping alerts - } - - if c.config.DisableRepinning { - logger.Debugf("repinning is disabled. Will not re-allocate pins on alerts") - return - } - - cState, err := c.consensus.State(c.ctx) - if err != nil { - logger.Warn(err) - return - } - - distance, err := c.distances(c.ctx, alrt.Peer) - if err != nil { - logger.Warn(err) - return - } - - pinCh := make(chan api.Pin, 1024) - go func() { - err = cState.List(c.ctx, pinCh) - if err != nil { - logger.Warn(err) - } - }() - - for pin := range pinCh { - if containsPeer(pin.Allocations, alrt.Peer) && distance.isClosest(pin.Cid) { - c.repinFromPeer(c.ctx, alrt.Peer, pin) - } - } - } - } -} - -// detects any changes in the peerset and saves the configuration. When it -// detects that we have been removed from the peerset, it shuts down this peer. -func (c *Cluster) watchPeers() { - ticker := time.NewTicker(c.config.PeerWatchInterval) - defer ticker.Stop() - - for { - select { - case <-c.ctx.Done(): - return - default: - } - - select { - case <-c.ctx.Done(): - return - case <-ticker.C: - //logger.Debugf("%s watching peers", c.id) - hasMe := false - peers, err := c.consensus.Peers(c.ctx) - if err != nil { - logger.Error(err) - continue - } - for _, p := range peers { - if p == c.id { - hasMe = true - break - } - } - - if !hasMe { - c.shutdownLock.Lock() - defer c.shutdownLock.Unlock() - logger.Info("peer no longer in peerset. Initiating shutdown") - c.removed = true - go c.Shutdown(c.ctx) - return - } - } - } -} - -// reBootstrap regularly attempts to bootstrap (re-connect to peers from the -// peerstore). This should ensure that we auto-recover from situations in -// which the network was completely gone and we lost all peers. -func (c *Cluster) reBootstrap() { - ticker := time.NewTicker(reBootstrapInterval) - defer ticker.Stop() - - for { - select { - case <-c.ctx.Done(): - return - case <-ticker.C: - connected := c.peerManager.Bootstrap(bootstrapCount) - for _, p := range connected { - logger.Infof("reconnected to %s", p) - } - } - } -} - -// find all Cids pinned to a given peer and triggers re-pins on them. -func (c *Cluster) vacatePeer(ctx context.Context, p peer.ID) { - ctx, span := trace.StartSpan(ctx, "cluster/vacatePeer") - defer span.End() - - if c.config.DisableRepinning { - logger.Warnf("repinning is disabled. Will not re-allocate cids from %s", p.Pretty()) - return - } - - cState, err := c.consensus.State(ctx) - if err != nil { - logger.Warn(err) - return - } - - pinCh := make(chan api.Pin, 1024) - go func() { - err = cState.List(ctx, pinCh) - if err != nil { - logger.Warn(err) - } - }() - - for pin := range pinCh { - if containsPeer(pin.Allocations, p) { - c.repinFromPeer(ctx, p, pin) - } - } -} - -// repinFromPeer triggers a repin on a given pin object blacklisting one of the -// allocations. -func (c *Cluster) repinFromPeer(ctx context.Context, p peer.ID, pin api.Pin) { - ctx, span := trace.StartSpan(ctx, "cluster/repinFromPeer") - defer span.End() - - pin.Allocations = nil // force re-allocations - // note that pin() should not result in different allocations - // if we are not under the replication-factor min. - _, ok, err := c.pin(ctx, pin, []peer.ID{p}) - if ok && err == nil { - logger.Infof("repinned %s out of %s", pin.Cid, p.Pretty()) - } -} - -// run launches some go-routines which live throughout the cluster's life -func (c *Cluster) run() { - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.watchPinset() - }() - - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.pushPingMetrics(c.ctx) - }() - - c.wg.Add(len(c.informers)) - for _, informer := range c.informers { - go func(inf Informer) { - defer c.wg.Done() - c.pushInformerMetrics(c.ctx, inf) - }(informer) - } - - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.watchPeers() - }() - - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.alertsHandler() - }() - - c.wg.Add(1) - go func() { - defer c.wg.Done() - c.reBootstrap() - }() -} - -func (c *Cluster) ready(timeout time.Duration) { - ctx, span := trace.StartSpan(c.ctx, "cluster/ready") - defer span.End() - - // We bootstrapped first because with dirty state consensus - // may have a peerset and not find a leader so we cannot wait - // for it. - timer := time.NewTimer(timeout) - select { - case <-timer.C: - logger.Error("***** ipfs-cluster consensus start timed out (tips below) *****") - logger.Error(` -************************************************** -This peer was not able to become part of the cluster. -This might be due to one or several causes: - - Check the logs above this message for errors - - Check that there is connectivity to the "peers" multiaddresses - - Check that all cluster peers are using the same "secret" - - Check that this peer is reachable on its "listen_multiaddress" by all peers - - Check that the current cluster is healthy (has a leader). Otherwise make - sure to start enough peers so that a leader election can happen. - - Check that the peer(s) you are trying to connect to is running the - same version of IPFS-cluster. -************************************************** -`) - c.Shutdown(ctx) - return - case <-c.consensus.Ready(ctx): - // Consensus ready means the state is up to date. - case <-c.ctx.Done(): - return - } - - // Cluster is ready. - - peers, err := c.consensus.Peers(ctx) - if err != nil { - logger.Error(err) - c.Shutdown(ctx) - return - } - - logger.Info("Cluster Peers (without including ourselves):") - if len(peers) == 1 { - logger.Info(" - No other peers") - } - - for _, p := range peers { - if p != c.id { - logger.Infof(" - %s", p.Pretty()) - } - } - - close(c.readyCh) - c.shutdownLock.Lock() - c.readyB = true - c.shutdownLock.Unlock() - logger.Info("** IPFS Cluster is READY **") - daemon.SdNotify(false, daemon.SdNotifyReady) -} - -// Ready returns a channel which signals when this peer is -// fully initialized (including consensus). -func (c *Cluster) Ready() <-chan struct{} { - return c.readyCh -} - -// Shutdown performs all the necessary operations to shutdown -// the IPFS Cluster peer: -// * Save peerstore with the current peers -// * Remove itself from consensus when LeaveOnShutdown is set -// * It Shutdowns all the components -// * Collects all goroutines -// -// Shutdown does not close the libp2p host, the DHT, the datastore or -// generally anything that Cluster did not create. -func (c *Cluster) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "cluster/Shutdown") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - c.shutdownLock.Lock() - defer c.shutdownLock.Unlock() - - if c.shutdownB { - logger.Debug("Cluster is already shutdown") - return nil - } - - logger.Info("shutting down Cluster") - - // Shutdown APIs first, avoids more requests coming through. - for _, api := range c.apis { - if err := api.Shutdown(ctx); err != nil { - logger.Errorf("error stopping API: %s", err) - return err - } - } - - // Cancel discovery service (this shutdowns announcing). Handling - // entries is canceled along with the context below. - if c.discovery != nil { - c.discovery.Close() - } - - // Try to store peerset file for all known peers whatsoever - // if we got ready (otherwise, don't overwrite anything) - if c.readyB { - // Ignoring error since it's a best-effort - c.peerManager.SavePeerstoreForPeers(c.host.Peerstore().Peers()) - } - - // Only attempt to leave if: - // - consensus is initialized - // - cluster was ready (no bootstrapping error) - // - We are not removed already (means watchPeers() called us) - if c.consensus != nil && c.config.LeaveOnShutdown && c.readyB && !c.removed { - c.removed = true - _, err := c.consensus.Peers(ctx) - if err == nil { - // best effort - logger.Warn("attempting to leave the cluster. This may take some seconds") - err := c.consensus.RmPeer(ctx, c.id) - if err != nil { - logger.Error("leaving cluster: " + err.Error()) - } - } - } - - if con := c.consensus; con != nil { - if err := con.Shutdown(ctx); err != nil { - logger.Errorf("error stopping consensus: %s", err) - return err - } - } - - // We left the cluster or were removed. Remove any consensus-specific - // state. - if c.removed && c.readyB { - err := c.consensus.Clean(ctx) - if err != nil { - logger.Error("cleaning consensus: ", err) - } - } - - if err := c.monitor.Shutdown(ctx); err != nil { - logger.Errorf("error stopping monitor: %s", err) - return err - } - - if err := c.ipfs.Shutdown(ctx); err != nil { - logger.Errorf("error stopping IPFS Connector: %s", err) - return err - } - - if err := c.tracker.Shutdown(ctx); err != nil { - logger.Errorf("error stopping PinTracker: %s", err) - return err - } - - for _, inf := range c.informers { - if err := inf.Shutdown(ctx); err != nil { - logger.Errorf("error stopping informer: %s", err) - return err - } - } - - if err := c.tracer.Shutdown(ctx); err != nil { - logger.Errorf("error stopping Tracer: %s", err) - return err - } - - c.cancel() - c.wg.Wait() - - c.shutdownB = true - close(c.doneCh) - return nil -} - -// Done provides a way to learn if the Peer has been shutdown -// (for example, because it has been removed from the Cluster) -func (c *Cluster) Done() <-chan struct{} { - return c.doneCh -} - -// ID returns information about the Cluster peer -func (c *Cluster) ID(ctx context.Context) api.ID { - _, span := trace.StartSpan(ctx, "cluster/ID") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - // ignore error since it is included in response object - ipfsID, err := c.ipfs.ID(ctx) - if err != nil { - ipfsID = api.IPFSID{ - Error: err.Error(), - } - } - - var addrs []api.Multiaddr - mAddrs, err := peer.AddrInfoToP2pAddrs(&peer.AddrInfo{ID: c.id, Addrs: c.host.Addrs()}) - if err == nil { - for _, mAddr := range mAddrs { - addrs = append(addrs, api.NewMultiaddrWithValue(mAddr)) - } - } - - peers := []peer.ID{} - // This method might get called very early by a remote peer - // and might catch us when consensus is not set - if c.consensus != nil { - peers, _ = c.consensus.Peers(ctx) - } - - clusterPeerInfos := c.peerManager.PeerInfos(peers) - addresses := []api.Multiaddr{} - for _, pinfo := range clusterPeerInfos { - addrs, err := peer.AddrInfoToP2pAddrs(&pinfo) - if err != nil { - continue - } - for _, a := range addrs { - addresses = append(addresses, api.NewMultiaddrWithValue(a)) - } - } - - id := api.ID{ - ID: c.id, - // PublicKey: c.host.Peerstore().PubKey(c.id), - Addresses: addrs, - ClusterPeers: peers, - ClusterPeersAddresses: addresses, - Version: version.Version.String(), - RPCProtocolVersion: version.RPCProtocol, - IPFS: ipfsID, - Peername: c.config.Peername, - } - if err != nil { - id.Error = err.Error() - } - - return id -} - -// PeerAdd adds a new peer to this Cluster. -// -// For it to work well, the new peer should be discoverable -// (part of our peerstore or connected to one of the existing peers) -// and reachable. Since PeerAdd allows to add peers which are -// not running, or reachable, it is recommended to call Join() from the -// new peer instead. -// -// The new peer ID will be passed to the consensus -// component to be added to the peerset. -func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) { - _, span := trace.StartSpan(ctx, "cluster/PeerAdd") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - // starting 10 nodes on the same box for testing - // causes deadlock and a global lock here - // seems to help. - c.paMux.Lock() - defer c.paMux.Unlock() - logger.Debugf("peerAdd called with %s", pid.Pretty()) - - // Let the consensus layer be aware of this peer - err := c.consensus.AddPeer(ctx, pid) - if err != nil { - logger.Error(err) - id := &api.ID{ID: pid, Error: err.Error()} - return id, err - } - - logger.Info("Peer added ", pid.Pretty()) - addedID, err := c.getIDForPeer(ctx, pid) - if err != nil { - return addedID, err - } - if !containsPeer(addedID.ClusterPeers, c.id) { - addedID.ClusterPeers = append(addedID.ClusterPeers, c.id) - } - return addedID, nil -} - -// PeerRemove removes a peer from this Cluster. -// -// The peer will be removed from the consensus peerset. -// This may first trigger repinnings for all content if not disabled. -func (c *Cluster) PeerRemove(ctx context.Context, pid peer.ID) error { - _, span := trace.StartSpan(ctx, "cluster/PeerRemove") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - // We need to repin before removing the peer, otherwise, it won't - // be able to submit the pins. - logger.Infof("re-allocating all CIDs directly associated to %s", pid) - c.vacatePeer(ctx, pid) - - err := c.consensus.RmPeer(ctx, pid) - if err != nil { - logger.Error(err) - return err - } - logger.Info("Peer removed ", pid.Pretty()) - return nil -} - -// Join adds this peer to an existing cluster by bootstrapping to a -// given multiaddress. It works by calling PeerAdd on the destination -// cluster and making sure that the new peer is ready to discover and contact -// the rest. -func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error { - _, span := trace.StartSpan(ctx, "cluster/Join") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - logger.Debugf("Join(%s)", addr) - - // Add peer to peerstore so we can talk to it - pid, err := c.peerManager.ImportPeer(addr, false, peerstore.PermanentAddrTTL) - if err != nil { - return err - } - if pid == c.id { - return nil - } - - // Note that PeerAdd() on the remote peer will - // figure out what our real address is (obviously not - // ListenAddr). - var myID api.ID - err = c.rpcClient.CallContext( - ctx, - pid, - "Cluster", - "PeerAdd", - c.id, - &myID, - ) - if err != nil { - logger.Error(err) - return err - } - - // Log a fake but valid metric from the peer we are - // contacting. This will signal a CRDT component that - // we know that peer since we have metrics for it without - // having to wait for the next metric round. - if err := c.logPingMetric(ctx, pid); err != nil { - logger.Warn(err) - } - - // Broadcast our metrics to the world - err = c.sendInformersMetrics(ctx) - if err != nil { - logger.Warn(err) - } - - _, err = c.sendPingMetric(ctx) - if err != nil { - logger.Warn(err) - } - - // We need to trigger a DHT bootstrap asap for this peer to not be - // lost if the peer it bootstrapped to goes down. We do this manually - // by triggering 1 round of bootstrap in the background. - // Note that our regular bootstrap process is still running in the - // background since we created the cluster. - c.wg.Add(1) - go func() { - defer c.wg.Done() - select { - case err := <-c.dht.LAN.RefreshRoutingTable(): - if err != nil { - // this error is quite chatty - // on single peer clusters - logger.Debug(err) - } - case <-c.ctx.Done(): - return - } - - select { - case err := <-c.dht.WAN.RefreshRoutingTable(): - if err != nil { - // this error is quite chatty - // on single peer clusters - logger.Debug(err) - } - case <-c.ctx.Done(): - return - } - }() - - // ConnectSwarms in the background after a while, when we have likely - // received some metrics. - time.AfterFunc(c.config.MonitorPingInterval, func() { - c.ipfs.ConnectSwarms(ctx) - }) - - // wait for leader and for state to catch up - // then sync - err = c.consensus.WaitForSync(ctx) - if err != nil { - logger.Error(err) - return err - } - - // Start pinning items in the state that are not on IPFS yet. - out := make(chan api.PinInfo, 1024) - // discard outputs - go func() { - for range out { - } - }() - go c.RecoverAllLocal(ctx, out) - - logger.Infof("%s: joined %s's cluster", c.id.Pretty(), pid.Pretty()) - return nil -} - -// Distances returns a distance checker using current trusted peers. -// It can optionally receive a peer ID to exclude from the checks. -func (c *Cluster) distances(ctx context.Context, exclude peer.ID) (*distanceChecker, error) { - trustedPeers, err := c.getTrustedPeers(ctx, exclude) - if err != nil { - logger.Error("could not get trusted peers:", err) - return nil, err - } - - return &distanceChecker{ - local: c.id, - otherPeers: trustedPeers, - cache: make(map[peer.ID]distance, len(trustedPeers)+1), - }, nil -} - -// StateSync performs maintenance tasks on the global state that require -// looping through all the items. It is triggered automatically on -// StateSyncInterval. Currently it: -// - Sends unpin for expired items for which this peer is "closest" -// (skipped for follower peers) -func (c *Cluster) StateSync(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "cluster/StateSync") - defer span.End() - logger.Debug("StateSync") - - ctx = trace.NewContext(c.ctx, span) - - if c.config.FollowerMode { - return nil - } - - cState, err := c.consensus.State(ctx) - if err != nil { - return err - } - - timeNow := time.Now() - - // Only trigger pin operations if we are the closest with respect to - // other trusted peers. We cannot know if our peer ID is trusted by - // other peers in the Cluster. This assumes yes. Setting FollowerMode - // is a way to assume the opposite and skip this completely. - distance, err := c.distances(ctx, "") - if err != nil { - return err // could not list peers - } - - clusterPins := make(chan api.Pin, 1024) - go func() { - err = cState.List(ctx, clusterPins) - if err != nil { - logger.Error(err) - } - }() - - // Unpin expired items when we are the closest peer to them. - for p := range clusterPins { - if p.ExpiredAt(timeNow) && distance.isClosest(p.Cid) { - logger.Infof("Unpinning %s: pin expired at %s", p.Cid, p.ExpireAt) - if _, err := c.Unpin(ctx, p.Cid); err != nil { - logger.Error(err) - } - } - } - - return nil -} - -// StatusAll returns the GlobalPinInfo for all tracked Cids in all peers on -// the out channel. This is done by broacasting a StatusAll to all peers. If -// an error happens, it is returned. This method blocks until it finishes. The -// operation can be aborted by canceling the context. -func (c *Cluster) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.GlobalPinInfo) error { - _, span := trace.StartSpan(ctx, "cluster/StatusAll") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - in := make(chan api.TrackerStatus, 1) - in <- filter - close(in) - return c.globalPinInfoStream(ctx, "PinTracker", "StatusAll", in, out) -} - -// StatusAllLocal returns the PinInfo for all the tracked Cids in this peer on -// the out channel. It blocks until finished. -func (c *Cluster) StatusAllLocal(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error { - _, span := trace.StartSpan(ctx, "cluster/StatusAllLocal") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.tracker.StatusAll(ctx, filter, out) -} - -// Status returns the GlobalPinInfo for a given Cid as fetched from all -// current peers. If an error happens, the GlobalPinInfo should contain -// as much information as could be fetched from the other peers. -func (c *Cluster) Status(ctx context.Context, h api.Cid) (api.GlobalPinInfo, error) { - _, span := trace.StartSpan(ctx, "cluster/Status") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.globalPinInfoCid(ctx, "PinTracker", "Status", h) -} - -// StatusLocal returns this peer's PinInfo for a given Cid. -func (c *Cluster) StatusLocal(ctx context.Context, h api.Cid) api.PinInfo { - _, span := trace.StartSpan(ctx, "cluster/StatusLocal") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.tracker.Status(ctx, h) -} - -// used for RecoverLocal and SyncLocal. -func (c *Cluster) localPinInfoOp( - ctx context.Context, - h api.Cid, - f func(context.Context, api.Cid) (api.PinInfo, error), -) (pInfo api.PinInfo, err error) { - ctx, span := trace.StartSpan(ctx, "cluster/localPinInfoOp") - defer span.End() - - cids, err := c.cidsFromMetaPin(ctx, h) - if err != nil { - return api.PinInfo{}, err - } - - for _, ci := range cids { - pInfo, err = f(ctx, ci) - if err != nil { - logger.Error("tracker.SyncCid() returned with error: ", err) - logger.Error("Is the ipfs daemon running?") - break - } - } - // return the last pInfo/err, should be the root Cid if everything ok - return pInfo, err -} - -// RecoverAll triggers a RecoverAllLocal operation on all peers and returns -// GlobalPinInfo objets for all recovered items. This method blocks until -// finished. Operation can be aborted by canceling the context. -func (c *Cluster) RecoverAll(ctx context.Context, out chan<- api.GlobalPinInfo) error { - _, span := trace.StartSpan(ctx, "cluster/RecoverAll") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.globalPinInfoStream(ctx, "Cluster", "RecoverAllLocal", nil, out) -} - -// RecoverAllLocal triggers a RecoverLocal operation for all Cids tracked -// by this peer. -// -// Recover operations ask IPFS to pin or unpin items in error state. Recover -// is faster than calling Pin on the same CID as it avoids committing an -// identical pin to the consensus layer. -// -// It returns the list of pins that were re-queued for pinning on the out -// channel. It blocks until done. -// -// RecoverAllLocal is called automatically every PinRecoverInterval. -func (c *Cluster) RecoverAllLocal(ctx context.Context, out chan<- api.PinInfo) error { - _, span := trace.StartSpan(ctx, "cluster/RecoverAllLocal") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.tracker.RecoverAll(ctx, out) -} - -// Recover triggers a recover operation for a given Cid in all -// cluster peers. -// -// Recover operations ask IPFS to pin or unpin items in error state. Recover -// is faster than calling Pin on the same CID as it avoids committing an -// identical pin to the consensus layer. -func (c *Cluster) Recover(ctx context.Context, h api.Cid) (api.GlobalPinInfo, error) { - _, span := trace.StartSpan(ctx, "cluster/Recover") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.globalPinInfoCid(ctx, "PinTracker", "Recover", h) -} - -// RecoverLocal triggers a recover operation for a given Cid in this peer only. -// It returns the updated PinInfo, after recovery. -// -// Recover operations ask IPFS to pin or unpin items in error state. Recover -// is faster than calling Pin on the same CID as it avoids committing an -// identical pin to the consensus layer. -func (c *Cluster) RecoverLocal(ctx context.Context, h api.Cid) (api.PinInfo, error) { - _, span := trace.StartSpan(ctx, "cluster/RecoverLocal") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - return c.localPinInfoOp(ctx, h, c.tracker.Recover) -} - -// Pins sends pins on the given out channel as it iterates the full -// pinset (current global state). This is the source of truth as to which pins -// are managed and their allocation, but does not indicate if the item is -// successfully pinned. For that, use the Status*() methods. -// -// The operation can be aborted by canceling the context. This methods blocks -// until the operation has completed. -func (c *Cluster) Pins(ctx context.Context, out chan<- api.Pin) error { - _, span := trace.StartSpan(ctx, "cluster/Pins") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - cState, err := c.consensus.State(ctx) - if err != nil { - logger.Error(err) - return err - } - return cState.List(ctx, out) -} - -// pinsSlice returns the list of Cids managed by Cluster and which are part -// of the current global state. This is the source of truth as to which -// pins are managed and their allocation, but does not indicate if -// the item is successfully pinned. For that, use StatusAll(). -// -// It is recommended to use PinsChannel(), as this method is equivalent to -// loading the full pinset in memory! -func (c *Cluster) pinsSlice(ctx context.Context) ([]api.Pin, error) { - out := make(chan api.Pin, 1024) - var err error - go func() { - err = c.Pins(ctx, out) - }() - - var pins []api.Pin - for pin := range out { - pins = append(pins, pin) - } - return pins, err -} - -// PinGet returns information for a single Cid managed by Cluster. -// The information is obtained from the current global state. The -// returned api.Pin provides information about the allocations -// assigned for the requested Cid, but does not indicate if -// the item is successfully pinned. For that, use Status(). PinGet -// returns an error if the given Cid is not part of the global state. -func (c *Cluster) PinGet(ctx context.Context, h api.Cid) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "cluster/PinGet") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - st, err := c.consensus.State(ctx) - if err != nil { - return api.Pin{}, err - } - pin, err := st.Get(ctx, h) - if err != nil { - return api.Pin{}, err - } - return pin, nil -} - -// Pin makes the cluster Pin a Cid. This implies adding the Cid -// to the IPFS Cluster peers shared-state. Depending on the cluster -// pinning strategy, the PinTracker may then request the IPFS daemon -// to pin the Cid. -// -// Pin returns the Pin as stored in the global state (with the given -// allocations and an error if the operation could not be persisted. Pin does -// not reflect the success or failure of underlying IPFS daemon pinning -// operations which happen in async fashion. -// -// If the options UserAllocations are non-empty then these peers are pinned -// with priority over other peers in the cluster. If the max repl factor is -// less than the size of the specified peerset then peers are chosen from this -// set in allocation order. If the minimum repl factor is greater than the -// size of this set then the remaining peers are allocated in order from the -// rest of the cluster. Priority allocations are best effort. If any priority -// peers are unavailable then Pin will simply allocate from the rest of the -// cluster. -// -// If the Update option is set, the pin options (including allocations) will -// be copied from an existing one. This is equivalent to running PinUpdate. -func (c *Cluster) Pin(ctx context.Context, h api.Cid, opts api.PinOptions) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "cluster/Pin") - defer span.End() - - ctx = trace.NewContext(c.ctx, span) - pin := api.PinWithOpts(h, opts) - - result, _, err := c.pin(ctx, pin, []peer.ID{}) - return result, err -} - -// sets the default replication factor in a pin when it's set to 0 -func (c *Cluster) setupReplicationFactor(pin api.Pin) (api.Pin, error) { - rplMin := pin.ReplicationFactorMin - rplMax := pin.ReplicationFactorMax - if rplMin == 0 { - rplMin = c.config.ReplicationFactorMin - pin.ReplicationFactorMin = rplMin - } - if rplMax == 0 { - rplMax = c.config.ReplicationFactorMax - pin.ReplicationFactorMax = rplMax - } - - // When pinning everywhere, remove all allocations. - // Allocations may have been preset by the adder - // for the cases when the replication factor is > -1. - // Fixes part of #1319: allocations when adding - // are kept. - if pin.IsPinEverywhere() { - pin.Allocations = nil - } - - return pin, isReplicationFactorValid(rplMin, rplMax) -} - -// basic checks on the pin type to check it's well-formed. -func checkPinType(pin api.Pin) error { - switch pin.Type { - case api.DataType: - if pin.Reference != nil { - return errors.New("data pins should not reference other pins") - } - case api.ShardType: - if pin.MaxDepth != 1 { - return errors.New("must pin shards go depth 1") - } - // FIXME: indirect shard pins could have max-depth 2 - // FIXME: repinning a shard type will overwrite replication - // factor from previous: - // if existing.ReplicationFactorMin != rplMin || - // existing.ReplicationFactorMax != rplMax { - // return errors.New("shard update with wrong repl factors") - //} - case api.ClusterDAGType: - if pin.MaxDepth != 0 { - return errors.New("must pin roots directly") - } - if pin.Reference == nil { - return errors.New("clusterDAG pins should reference a Meta pin") - } - case api.MetaType: - if len(pin.Allocations) != 0 { - return errors.New("meta pin should not specify allocations") - } - if pin.Reference == nil { - return errors.New("metaPins should reference a ClusterDAG") - } - - default: - return errors.New("unrecognized pin type") - } - return nil -} - -// setupPin ensures that the Pin object is fit for pinning. We check -// and set the replication factors and ensure that the pinType matches the -// metadata consistently. -func (c *Cluster) setupPin(ctx context.Context, pin, existing api.Pin) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "cluster/setupPin") - defer span.End() - var err error - - pin, err = c.setupReplicationFactor(pin) - if err != nil { - return pin, err - } - - if !pin.ExpireAt.IsZero() && pin.ExpireAt.Before(time.Now()) { - return pin, errors.New("pin.ExpireAt set before current time") - } - - if !existing.Defined() { - return pin, nil - } - - // If an pin CID is already pin, we do a couple more checks - if existing.Type != pin.Type { - msg := "cannot repin CID with different tracking method, " - msg += "clear state with pin rm to proceed. " - msg += "New: %s. Was: %s" - return pin, fmt.Errorf(msg, pin.Type, existing.Type) - } - - if existing.Mode == api.PinModeRecursive && pin.Mode != api.PinModeRecursive { - msg := "cannot repin a CID which is already pinned in " - msg += "recursive mode (new pin is pinned as %s). Unpin it first." - return pin, fmt.Errorf(msg, pin.Mode) - } - - return pin, checkPinType(pin) -} - -// pin performs the actual pinning and supports a blacklist to be able to -// evacuate a node and returns the pin object that it tried to pin, whether -// the pin was submitted to the consensus layer or skipped (due to error or to -// the fact that it was already valid) and error. -// -// This is the method called by the Cluster.Pin RPC endpoint. -func (c *Cluster) pin( - ctx context.Context, - pin api.Pin, - blacklist []peer.ID, -) (api.Pin, bool, error) { - ctx, span := trace.StartSpan(ctx, "cluster/pin") - defer span.End() - - if c.config.FollowerMode { - return api.Pin{}, false, errFollowerMode - } - - if !pin.Cid.Defined() { - return pin, false, errors.New("bad pin object") - } - - // Handle pin updates when the option is set - if update := pin.PinUpdate; update.Defined() && !update.Equals(pin.Cid) { - pin, err := c.PinUpdate(ctx, update, pin.Cid, pin.PinOptions) - return pin, true, err - } - - existing, err := c.PinGet(ctx, pin.Cid) - if err != nil && err != state.ErrNotFound { - return pin, false, err - } - - pin, err = c.setupPin(ctx, pin, existing) - if err != nil { - return pin, false, err - } - - // Set the Pin timestamp to now(). This is not an user-controllable - // "option". - pin.Timestamp = time.Now() - - if pin.Type == api.MetaType { - return pin, true, c.consensus.LogPin(ctx, pin) - } - - // We did not change ANY options and the pin exists so we just repin - // what there is without doing new allocations. While this submits - // pins to the consensus layer even if they are, this should trigger the - // pin tracker and allows users to get re-pin operations by re-adding - // without having to use recover, which is naturally expected. - // - // blacklist is set on repinFromPeer having any blacklisted peers - // means we are repinning and need to trigger allocate(), therefore we - // can't overwrite the incoming pin (which has Allocations set to - // nil). - if existing.Defined() && - pin.PinOptions.Equals(existing.PinOptions) && - len(blacklist) == 0 { - pin = existing - } - - // Usually allocations are unset when pinning normally, however, the - // allocations may have been preset by the adder in which case they - // need to be respected. Whenever allocations are set. We don't - // re-allocate. repinFromPeer() unsets allocations for this reason. - // allocate() will check which peers are currently allocated - // and try to respect them. - if len(pin.Allocations) == 0 { - // If replication factor is -1, this will return empty - // allocations. - allocs, err := c.allocate( - ctx, - pin.Cid, - existing, - pin.ReplicationFactorMin, - pin.ReplicationFactorMax, - blacklist, - pin.UserAllocations, - ) - if err != nil { - return pin, false, err - } - pin.Allocations = allocs - } - - // If this is true, replication factor should be -1. - if len(pin.Allocations) == 0 { - logger.Infof("pinning %s everywhere:", pin.Cid) - } else { - logger.Infof("pinning %s on %s:", pin.Cid, pin.Allocations) - } - - return pin, true, c.consensus.LogPin(ctx, pin) -} - -// Unpin removes a previously pinned Cid from Cluster. It returns -// the global state Pin object as it was stored before removal, or -// an error if it was not possible to update the global state. -// -// Unpin does not reflect the success or failure of underlying IPFS daemon -// unpinning operations, which happen in async fashion. -func (c *Cluster) Unpin(ctx context.Context, h api.Cid) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "cluster/Unpin") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - if c.config.FollowerMode { - return api.Pin{}, errFollowerMode - } - - logger.Info("IPFS cluster unpinning:", h) - pin, err := c.PinGet(ctx, h) - if err != nil { - return api.Pin{}, err - } - - switch pin.Type { - case api.DataType: - return pin, c.consensus.LogUnpin(ctx, pin) - case api.ShardType: - err := "cannot unpin a shard directly. Unpin content root CID instead" - return pin, errors.New(err) - case api.MetaType: - // Unpin cluster dag and referenced shards - err := c.unpinClusterDag(pin) - if err != nil { - return pin, err - } - return pin, c.consensus.LogUnpin(ctx, pin) - case api.ClusterDAGType: - err := "cannot unpin a Cluster DAG directly. Unpin content root CID instead" - return pin, errors.New(err) - default: - return pin, errors.New("unrecognized pin type") - } -} - -// unpinClusterDag unpins the clusterDAG metadata node and the shard metadata -// nodes that it references. It handles the case where multiple parents -// reference the same metadata node, only unpinning those nodes without -// existing references -func (c *Cluster) unpinClusterDag(metaPin api.Pin) error { - ctx, span := trace.StartSpan(c.ctx, "cluster/unpinClusterDag") - defer span.End() - - cids, err := c.cidsFromMetaPin(ctx, metaPin.Cid) - if err != nil { - return err - } - - // TODO: FIXME: potentially unpinning shards which are referenced - // by other clusterDAGs. - for _, ci := range cids { - err = c.consensus.LogUnpin(ctx, api.PinCid(ci)) - if err != nil { - return err - } - } - return nil -} - -// PinUpdate pins a new CID based on an existing cluster Pin. The allocations -// and most pin options (replication factors) are copied from the existing -// Pin. The options object can be used to set the Name for the new pin and -// might support additional options in the future. -// -// The from pin is NOT unpinned upon completion. The new pin might take -// advantage of efficient pin/update operation on IPFS-side (if the -// IPFSConnector supports it - the default one does). This may offer -// significant speed when pinning items which are similar to previously pinned -// content. -func (c *Cluster) PinUpdate(ctx context.Context, from api.Cid, to api.Cid, opts api.PinOptions) (api.Pin, error) { - existing, err := c.PinGet(ctx, from) - if err != nil { // including when the existing pin is not found - return api.Pin{}, err - } - - // Hector: I am not sure whether it has any point to update something - // like a MetaType. - if existing.Type != api.DataType { - return api.Pin{}, errors.New("this pin type cannot be updated") - } - - existing.Cid = to - existing.PinUpdate = from - existing.Timestamp = time.Now() - if opts.Name != "" { - existing.Name = opts.Name - } - if !opts.ExpireAt.IsZero() && opts.ExpireAt.After(time.Now()) { - existing.ExpireAt = opts.ExpireAt - } - return existing, c.consensus.LogPin(ctx, existing) -} - -// PinPath pins an CID resolved from its IPFS Path. It returns the resolved -// Pin object. -func (c *Cluster) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "cluster/PinPath") - defer span.End() - - ctx = trace.NewContext(c.ctx, span) - ci, err := c.ipfs.Resolve(ctx, path) - if err != nil { - return api.Pin{}, err - } - - return c.Pin(ctx, ci, opts) -} - -// UnpinPath unpins a CID resolved from its IPFS Path. If returns the -// previously pinned Pin object. -func (c *Cluster) UnpinPath(ctx context.Context, path string) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "cluster/UnpinPath") - defer span.End() - - ctx = trace.NewContext(c.ctx, span) - ci, err := c.ipfs.Resolve(ctx, path) - if err != nil { - return api.Pin{}, err - } - - return c.Unpin(ctx, ci) -} - -// AddFile adds a file to the ipfs daemons of the cluster. The ipfs importer -// pipeline is used to DAGify the file. Depending on input parameters this -// DAG can be added locally to the calling cluster peer's ipfs repo, or -// sharded across the entire cluster. -func (c *Cluster) AddFile(ctx context.Context, reader *multipart.Reader, params api.AddParams) (api.Cid, error) { - // TODO: add context param and tracing - - var dags adder.ClusterDAGService - if params.Shard { - dags = sharding.New(ctx, c.rpcClient, params, nil) - } else { - dags = single.New(ctx, c.rpcClient, params, params.Local) - } - add := adder.New(dags, params, nil) - return add.FromMultipart(ctx, reader) -} - -// Version returns the current IPFS Cluster version. -func (c *Cluster) Version() string { - return version.Version.String() -} - -// Peers returns the IDs of the members of this Cluster on the out channel. -// This method blocks until it has finished. -func (c *Cluster) Peers(ctx context.Context, out chan<- api.ID) { - _, span := trace.StartSpan(ctx, "cluster/Peers") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - peers, err := c.consensus.Peers(ctx) - if err != nil { - logger.Error(err) - logger.Error("an empty list of peers will be returned") - close(out) - return - } - c.peersWithFilter(ctx, peers, out) -} - -// requests IDs from a given number of peers. -func (c *Cluster) peersWithFilter(ctx context.Context, peers []peer.ID, out chan<- api.ID) { - defer close(out) - - // We should be done relatively quickly with this call. Otherwise - // report errors. - timeout := 15 * time.Second - ctxCall, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - in := make(chan struct{}) - close(in) - idsOut := make(chan api.ID, len(peers)) - errCh := make(chan []error, 1) - - go func() { - defer close(errCh) - - errCh <- c.rpcClient.MultiStream( - ctxCall, - peers, - "Cluster", - "IDStream", - in, - idsOut, - ) - }() - - // Unfortunately, we need to use idsOut as intermediary channel - // because it is closed when MultiStream ends and we cannot keep - // adding things on it (the errors below). - for id := range idsOut { - select { - case <-ctx.Done(): - logger.Errorf("Peers call aborted: %s", ctx.Err()) - return - case out <- id: - } - } - - // ErrCh will always be closed on context cancellation too. - errs := <-errCh - for i, err := range errs { - if err == nil { - continue - } - if rpc.IsAuthorizationError(err) { - continue - } - select { - case <-ctx.Done(): - logger.Errorf("Peers call aborted: %s", ctx.Err()) - case out <- api.ID{ - ID: peers[i], - Error: err.Error(), - }: - } - } -} - -// getTrustedPeers gives listed of trusted peers except the current peer and -// the excluded peer if provided. -func (c *Cluster) getTrustedPeers(ctx context.Context, exclude peer.ID) ([]peer.ID, error) { - peers, err := c.consensus.Peers(ctx) - if err != nil { - return nil, err - } - - trustedPeers := make([]peer.ID, 0, len(peers)) - - for _, p := range peers { - if p == c.id || p == exclude || !c.consensus.IsTrustedPeer(ctx, p) { - continue - } - trustedPeers = append(trustedPeers, p) - } - - return trustedPeers, nil -} - -func (c *Cluster) setTrackerStatus(gpin *api.GlobalPinInfo, h api.Cid, peers []peer.ID, status api.TrackerStatus, pin api.Pin, t time.Time) { - for _, p := range peers { - pv := pingValueFromMetric(c.monitor.LatestForPeer(c.ctx, pingMetricName, p)) - gpin.Add(api.PinInfo{ - Cid: h, - Name: pin.Name, - Allocations: pin.Allocations, - Origins: pin.Origins, - Created: pin.Timestamp, - Metadata: pin.Metadata, - Peer: p, - PinInfoShort: api.PinInfoShort{ - PeerName: pv.Peername, - IPFS: pv.IPFSID, - IPFSAddresses: pv.IPFSAddresses, - Status: status, - TS: t, - }, - }) - } -} - -func (c *Cluster) globalPinInfoCid(ctx context.Context, comp, method string, h api.Cid) (api.GlobalPinInfo, error) { - ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoCid") - defer span.End() - - // The object we will return - gpin := api.GlobalPinInfo{} - - // allocated peers, we will contact them through rpc - var dests []peer.ID - // un-allocated peers, we will set remote status - var remote []peer.ID - - timeNow := time.Now() - - // If pin is not part of the pinset, mark it unpinned - pin, err := c.PinGet(ctx, h) - if err != nil && err != state.ErrNotFound { - logger.Error(err) - return api.GlobalPinInfo{}, err - } - - // When NotFound return directly with an unpinned - // status. - if err == state.ErrNotFound { - var members []peer.ID - if c.config.FollowerMode { - members = []peer.ID{c.host.ID()} - } else { - members, err = c.consensus.Peers(ctx) - if err != nil { - logger.Error(err) - return api.GlobalPinInfo{}, err - } - } - - c.setTrackerStatus( - &gpin, - h, - members, - api.TrackerStatusUnpinned, - api.PinCid(h), - timeNow, - ) - return gpin, nil - } - - // The pin exists. - gpin.Cid = h - gpin.Name = pin.Name - - // Make the list of peers that will receive the request. - if c.config.FollowerMode { - // during follower mode return only local status. - dests = []peer.ID{c.host.ID()} - remote = []peer.ID{} - } else { - members, err := c.consensus.Peers(ctx) - if err != nil { - logger.Error(err) - return api.GlobalPinInfo{}, err - } - - if !pin.IsPinEverywhere() { - dests = pin.Allocations - remote = peersSubtract(members, dests) - } else { - dests = members - remote = []peer.ID{} - } - } - - // set status remote on un-allocated peers - c.setTrackerStatus(&gpin, h, remote, api.TrackerStatusRemote, pin, timeNow) - - lenDests := len(dests) - replies := make([]api.PinInfo, lenDests) - - // a globalPinInfo type of request should be relatively fast. We - // cannot block response indefinitely due to an unresponsive node. - timeout := 15 * time.Second - ctxs, cancels := rpcutil.CtxsWithTimeout(ctx, lenDests, timeout) - defer rpcutil.MultiCancel(cancels) - - errs := c.rpcClient.MultiCall( - ctxs, - dests, - comp, - method, - h, - rpcutil.CopyPinInfoToIfaces(replies), - ) - - for i, r := range replies { - e := errs[i] - - // No error. Parse and continue - if e == nil { - gpin.Add(r) - continue - } - - if rpc.IsAuthorizationError(e) { - logger.Debug("rpc auth error:", e) - continue - } - - // Deal with error cases (err != nil): wrap errors in PinInfo - logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, dests[i], e) - - pv := pingValueFromMetric(c.monitor.LatestForPeer(ctx, pingMetricName, dests[i])) - gpin.Add(api.PinInfo{ - Cid: h, - Name: pin.Name, - Peer: dests[i], - Allocations: pin.Allocations, - Origins: pin.Origins, - Created: pin.Timestamp, - Metadata: pin.Metadata, - PinInfoShort: api.PinInfoShort{ - PeerName: pv.Peername, - IPFS: pv.IPFSID, - IPFSAddresses: pv.IPFSAddresses, - Status: api.TrackerStatusClusterError, - TS: timeNow, - Error: e.Error(), - }, - }) - } - - return gpin, nil -} - -func (c *Cluster) globalPinInfoStream(ctx context.Context, comp, method string, inChan interface{}, out chan<- api.GlobalPinInfo) error { - defer close(out) - - ctx, span := trace.StartSpan(ctx, "cluster/globalPinInfoStream") - defer span.End() - - if inChan == nil { - emptyChan := make(chan struct{}) - close(emptyChan) - inChan = emptyChan - } - - fullMap := make(map[api.Cid]api.GlobalPinInfo) - - var members []peer.ID - var err error - if c.config.FollowerMode { - members = []peer.ID{c.host.ID()} - } else { - members, err = c.consensus.Peers(ctx) - if err != nil { - logger.Error(err) - return err - } - } - - // We don't have a good timeout proposal for this. Depending on the - // size of the state and the peformance of IPFS and the network, this - // may take moderately long. - // If we did, this is the place to put it. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - msOut := make(chan api.PinInfo) - errsCh := make(chan []error, 1) - go func() { - defer close(errsCh) - errsCh <- c.rpcClient.MultiStream( - ctx, - members, - comp, - method, - inChan, - msOut, - ) - }() - - setPinInfo := func(p api.PinInfo) { - if !p.Defined() { - return - } - info, ok := fullMap[p.Cid] - if !ok { - info = api.GlobalPinInfo{} - } - info.Add(p) - // Set the new/updated info - fullMap[p.Cid] = info - } - - // make the big collection. - for pin := range msOut { - setPinInfo(pin) - } - - // This WAITs until MultiStream is DONE. - erroredPeers := make(map[peer.ID]string) - errs, ok := <-errsCh - if ok { - for i, err := range errs { - if err == nil { - continue - } - if rpc.IsAuthorizationError(err) { - logger.Debug("rpc auth error", err) - continue - } - logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, members[i], err) - erroredPeers[members[i]] = err.Error() - } - } - - // Merge any errors - for p, msg := range erroredPeers { - pv := pingValueFromMetric(c.monitor.LatestForPeer(ctx, pingMetricName, p)) - for c := range fullMap { - setPinInfo(api.PinInfo{ - Cid: c, - Name: "", - Peer: p, - Allocations: nil, - Origins: nil, - // Created: // leave unitialized - Metadata: nil, - PinInfoShort: api.PinInfoShort{ - PeerName: pv.Peername, - IPFS: pv.IPFSID, - IPFSAddresses: pv.IPFSAddresses, - Status: api.TrackerStatusClusterError, - TS: time.Now(), - Error: msg, - }, - }) - } - } - - for _, v := range fullMap { - select { - case <-ctx.Done(): - err := fmt.Errorf("%s.%s aborted: %w", comp, method, ctx.Err()) - logger.Error(err) - return err - case out <- v: - } - } - - return nil -} - -func (c *Cluster) getIDForPeer(ctx context.Context, pid peer.ID) (*api.ID, error) { - ctx, span := trace.StartSpan(ctx, "cluster/getIDForPeer") - defer span.End() - - var id api.ID - err := c.rpcClient.CallContext( - ctx, - pid, - "Cluster", - "ID", - struct{}{}, - &id, - ) - if err != nil { - logger.Error(err) - id.ID = pid - id.Error = err.Error() - } - return &id, err -} - -// cidsFromMetaPin expands a meta-pin and returns a list of Cids that -// Cluster handles for it: the ShardPins, the ClusterDAG and the MetaPin, in -// that order (the MetaPin is the last element). -// It returns a slice with only the given Cid if it's not a known Cid or not a -// MetaPin. -func (c *Cluster) cidsFromMetaPin(ctx context.Context, h api.Cid) ([]api.Cid, error) { - ctx, span := trace.StartSpan(ctx, "cluster/cidsFromMetaPin") - defer span.End() - - cState, err := c.consensus.State(ctx) - if err != nil { - return nil, err - } - - list := []api.Cid{h} - - pin, err := cState.Get(ctx, h) - if err != nil { - return nil, err - } - - if pin.Type != api.MetaType { - return list, nil - } - - if pin.Reference == nil { - return nil, errors.New("metaPin.Reference is unset") - } - list = append([]api.Cid{*pin.Reference}, list...) - clusterDagPin, err := c.PinGet(ctx, *pin.Reference) - if err != nil { - return list, fmt.Errorf("could not get clusterDAG pin from state. Malformed pin?: %s", err) - } - - clusterDagBlock, err := c.ipfs.BlockGet(ctx, clusterDagPin.Cid) - if err != nil { - return list, fmt.Errorf("error reading clusterDAG block from ipfs: %s", err) - } - - clusterDagNode, err := sharding.CborDataToNode(clusterDagBlock, "cbor") - if err != nil { - return list, fmt.Errorf("error parsing clusterDAG block: %s", err) - } - for _, l := range clusterDagNode.Links() { - list = append([]api.Cid{api.NewCid(l.Cid)}, list...) - } - - return list, nil -} - -// // diffPeers returns the peerIDs added and removed from peers2 in relation to -// // peers1 -// func diffPeers(peers1, peers2 []peer.ID) (added, removed []peer.ID) { -// m1 := make(map[peer.ID]struct{}) -// m2 := make(map[peer.ID]struct{}) -// added = make([]peer.ID, 0) -// removed = make([]peer.ID, 0) -// if peers1 == nil && peers2 == nil { -// return -// } -// if peers1 == nil { -// added = peers2 -// return -// } -// if peers2 == nil { -// removed = peers1 -// return -// } - -// for _, p := range peers1 { -// m1[p] = struct{}{} -// } -// for _, p := range peers2 { -// m2[p] = struct{}{} -// } -// for k := range m1 { -// _, ok := m2[k] -// if !ok { -// removed = append(removed, k) -// } -// } -// for k := range m2 { -// _, ok := m1[k] -// if !ok { -// added = append(added, k) -// } -// } -// return -// } - -// RepoGC performs garbage collection sweep on all peers' IPFS repo. -func (c *Cluster) RepoGC(ctx context.Context) (api.GlobalRepoGC, error) { - _, span := trace.StartSpan(ctx, "cluster/RepoGC") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - members, err := c.consensus.Peers(ctx) - if err != nil { - logger.Error(err) - return api.GlobalRepoGC{}, err - } - - // to club `RepoGCLocal` responses of all peers into one - globalRepoGC := api.GlobalRepoGC{PeerMap: make(map[string]api.RepoGC)} - - for _, member := range members { - var repoGC api.RepoGC - err = c.rpcClient.CallContext( - ctx, - member, - "Cluster", - "RepoGCLocal", - struct{}{}, - &repoGC, - ) - if err == nil { - globalRepoGC.PeerMap[member.String()] = repoGC - continue - } - - if rpc.IsAuthorizationError(err) { - logger.Debug("rpc auth error:", err) - continue - } - - logger.Errorf("%s: error in broadcast response from %s: %s ", c.id, member, err) - - pv := pingValueFromMetric(c.monitor.LatestForPeer(c.ctx, pingMetricName, member)) - - globalRepoGC.PeerMap[member.String()] = api.RepoGC{ - Peer: member, - Peername: pv.Peername, - Keys: []api.IPFSRepoGC{}, - Error: err.Error(), - } - } - - return globalRepoGC, nil -} - -// RepoGCLocal performs garbage collection only on the local IPFS deamon. -func (c *Cluster) RepoGCLocal(ctx context.Context) (api.RepoGC, error) { - _, span := trace.StartSpan(ctx, "cluster/RepoGCLocal") - defer span.End() - ctx = trace.NewContext(c.ctx, span) - - resp, err := c.ipfs.RepoGC(ctx) - if err != nil { - return api.RepoGC{}, err - } - resp.Peer = c.id - resp.Peername = c.config.Peername - return resp, nil -} diff --git a/packages/networking/ipfs-cluster/cluster_config.go b/packages/networking/ipfs-cluster/cluster_config.go deleted file mode 100644 index bc261e0..0000000 --- a/packages/networking/ipfs-cluster/cluster_config.go +++ /dev/null @@ -1,576 +0,0 @@ -package ipfscluster - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" - - pnet "github.com/libp2p/go-libp2p/core/pnet" - ma "github.com/multiformats/go-multiaddr" - - "github.com/kelseyhightower/envconfig" -) - -const configKey = "cluster" - -// DefaultListenAddrs contains TCP and QUIC listen addresses. -var DefaultListenAddrs = []string{ - "/ip4/0.0.0.0/tcp/9096", - "/ip4/0.0.0.0/udp/9096/quic", -} - -// Configuration defaults -const ( - DefaultEnableRelayHop = true - DefaultStateSyncInterval = 5 * time.Minute - DefaultPinRecoverInterval = 12 * time.Minute - DefaultMonitorPingInterval = 15 * time.Second - DefaultPeerWatchInterval = 5 * time.Second - DefaultReplicationFactor = -1 - DefaultLeaveOnShutdown = false - DefaultPinOnlyOnTrustedPeers = false - DefaultDisableRepinning = true - DefaultPeerstoreFile = "peerstore" - DefaultConnMgrHighWater = 400 - DefaultConnMgrLowWater = 100 - DefaultConnMgrGracePeriod = 2 * time.Minute - DefaultDialPeerTimeout = 3 * time.Second - DefaultFollowerMode = false - DefaultMDNSInterval = 10 * time.Second -) - -// ConnMgrConfig configures the libp2p host connection manager. -type ConnMgrConfig struct { - HighWater int - LowWater int - GracePeriod time.Duration -} - -// Config is the configuration object containing customizable variables to -// initialize the main ipfs-cluster component. It implements the -// config.ComponentConfig interface. -type Config struct { - config.Saver - - // User-defined peername for use as human-readable identifier. - Peername string - - // Cluster secret for private network. Peers will be in the same cluster if and - // only if they have the same ClusterSecret. The cluster secret must be exactly - // 64 characters and contain only hexadecimal characters (`[0-9a-f]`). - Secret pnet.PSK - - // RPCPolicy defines access control to RPC endpoints. - RPCPolicy map[string]RPCEndpointType - - // Leave Cluster on shutdown. Politely informs other peers - // of the departure and removes itself from the consensus - // peer set. The Cluster size will be reduced by one. - LeaveOnShutdown bool - - // Listen parameters for the Cluster libp2p Host. Used by - // the RPC and Consensus components. - ListenAddr []ma.Multiaddr - - // Enables HOP relay for the node. If this is enabled, the node will act as - // an intermediate (Hop Relay) node in relay circuits for connected peers. - EnableRelayHop bool - - // ConnMgr holds configuration values for the connection manager for - // the libp2p host. - // FIXME: This only applies to ipfs-cluster-service. - ConnMgr ConnMgrConfig - - // Sets the default dial timeout for libp2p connections to other - // peers. - DialPeerTimeout time.Duration - - // Time between syncs of the consensus state to the - // tracker state. Normally states are synced anyway, but this helps - // when new nodes are joining the cluster. Reduce for faster - // consistency, increase with larger states. - StateSyncInterval time.Duration - - // Time between automatic runs of the "recover" operation - // which will retry to pin/unpin items in error state. - PinRecoverInterval time.Duration - - // ReplicationFactorMax indicates the target number of nodes - // that should pin content. For exampe, a replication_factor of - // 3 will have cluster allocate each pinned hash to 3 peers if - // possible. - // See also ReplicationFactorMin. A ReplicationFactorMax of -1 - // will allocate to every available node. - ReplicationFactorMax int - - // ReplicationFactorMin indicates the minimum number of healthy - // nodes pinning content. If the number of nodes available to pin - // is less than this threshold, an error will be returned. - // In the case of peer health issues, content pinned will be - // re-allocated if the threshold is crossed. - // For exampe, a ReplicationFactorMin of 2 will allocate at least - // two peer to hold content, and return an error if this is not - // possible. - ReplicationFactorMin int - - // MonitorPingInterval is the frequency with which a cluster peer - // sends a "ping" metric. The metric has a TTL set to the double of - // this value. This metric sends information about this peer to other - // peers. - MonitorPingInterval time.Duration - - // PeerWatchInterval is the frequency that we use to watch for changes - // in the consensus peerset and save new peers to the configuration - // file. This also affects how soon we realize that we have - // been removed from a cluster. - PeerWatchInterval time.Duration - - // MDNSInterval controls the time between mDNS broadcasts to the - // network announcing the peer addresses. Set to 0 to disable - // mDNS. - MDNSInterval time.Duration - - // PinOnlyOnTrustedPeers limits allocations to trusted peers only. - PinOnlyOnTrustedPeers bool - - // If true, DisableRepinning, ensures that no repinning happens - // when a node goes down. - // This is useful when doing certain types of maintenance, or simply - // when not wanting to rely on the monitoring system which needs a revamp. - DisableRepinning bool - - // FollowerMode disables broadcast requests from this peer - // (sync, recover, status) and disallows pinset management - // operations (Pin/Unpin). - FollowerMode bool - - // Peerstore file specifies the file on which we persist the - // libp2p host peerstore addresses. This file is regularly saved. - PeerstoreFile string - - // PeerAddresses stores additional addresses for peers that may or may - // not be in the peerstore file. These are considered high priority - // when bootstrapping the initial cluster connections. - PeerAddresses []ma.Multiaddr - - // Tracing flag used to skip tracing specific paths when not enabled. - Tracing bool -} - -// configJSON represents a Cluster configuration as it will look when it is -// saved using JSON. Most configuration keys are converted into simple types -// like strings, and key names aim to be self-explanatory for the user. -type configJSON struct { - ID string `json:"id,omitempty"` - Peername string `json:"peername"` - PrivateKey string `json:"private_key,omitempty" hidden:"true"` - Secret string `json:"secret" hidden:"true"` - LeaveOnShutdown bool `json:"leave_on_shutdown"` - ListenMultiaddress config.Strings `json:"listen_multiaddress"` - EnableRelayHop bool `json:"enable_relay_hop"` - ConnectionManager *connMgrConfigJSON `json:"connection_manager"` - DialPeerTimeout string `json:"dial_peer_timeout"` - StateSyncInterval string `json:"state_sync_interval"` - PinRecoverInterval string `json:"pin_recover_interval"` - ReplicationFactorMin int `json:"replication_factor_min"` - ReplicationFactorMax int `json:"replication_factor_max"` - MonitorPingInterval string `json:"monitor_ping_interval"` - PeerWatchInterval string `json:"peer_watch_interval"` - MDNSInterval string `json:"mdns_interval"` - PinOnlyOnTrustedPeers bool `json:"pin_only_on_trusted_peers"` - DisableRepinning bool `json:"disable_repinning"` - FollowerMode bool `json:"follower_mode,omitempty"` - PeerstoreFile string `json:"peerstore_file,omitempty"` - PeerAddresses []string `json:"peer_addresses"` -} - -// connMgrConfigJSON configures the libp2p host connection manager. -type connMgrConfigJSON struct { - HighWater int `json:"high_water"` - LowWater int `json:"low_water"` - GracePeriod string `json:"grace_period"` -} - -// ConfigKey returns a human-readable string to identify -// a cluster Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default fills in all the Config fields with -// default working values. This means, it will -// generate a Secret. -func (cfg *Config) Default() error { - cfg.setDefaults() - - clusterSecret := make([]byte, 32) - n, err := rand.Read(clusterSecret) - if err != nil { - return err - } - if n != 32 { - return errors.New("did not generate 32-byte secret") - } - - cfg.Secret = clusterSecret - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg, err := cfg.toConfigJSON() - if err != nil { - return err - } - - err = envconfig.Process(cfg.ConfigKey(), jcfg) - if err != nil { - return err - } - - return cfg.applyConfigJSON(jcfg) -} - -// Validate will check that the values of this config -// seem to be working ones. -func (cfg *Config) Validate() error { - if cfg.ListenAddr == nil { - return errors.New("cluster.listen_multiaddress is undefined") - } - - if len(cfg.ListenAddr) == 0 { - return errors.New("cluster.listen_multiaddress is empty") - } - - if cfg.ConnMgr.LowWater <= 0 { - return errors.New("cluster.connection_manager.low_water is invalid") - } - - if cfg.ConnMgr.HighWater <= 0 { - return errors.New("cluster.connection_manager.high_water is invalid") - } - - if cfg.ConnMgr.LowWater > cfg.ConnMgr.HighWater { - return errors.New("cluster.connection_manager.low_water is greater than high_water") - } - - if cfg.ConnMgr.GracePeriod == 0 { - return errors.New("cluster.connection_manager.grace_period is invalid") - } - - if cfg.DialPeerTimeout <= 0 { - return errors.New("cluster.dial_peer_timeout is invalid") - } - - if cfg.StateSyncInterval <= 0 { - return errors.New("cluster.state_sync_interval is invalid") - } - - if cfg.PinRecoverInterval <= 0 { - return errors.New("cluster.pin_recover_interval is invalid") - } - - if cfg.MonitorPingInterval <= 0 { - return errors.New("cluster.monitoring_interval is invalid") - } - - if cfg.PeerWatchInterval <= 0 { - return errors.New("cluster.peer_watch_interval is invalid") - } - - rfMax := cfg.ReplicationFactorMax - rfMin := cfg.ReplicationFactorMin - - if err := isReplicationFactorValid(rfMin, rfMax); err != nil { - return err - } - - return isRPCPolicyValid(cfg.RPCPolicy) -} - -func isReplicationFactorValid(rplMin, rplMax int) error { - // check Max and Min are correct - if rplMin == 0 || rplMax == 0 { - return errors.New("cluster.replication_factor_min and max must be set") - } - - if rplMin > rplMax { - return errors.New("cluster.replication_factor_min is larger than max") - } - - if rplMin < -1 { - return errors.New("cluster.replication_factor_min is wrong") - } - - if rplMax < -1 { - return errors.New("cluster.replication_factor_max is wrong") - } - - if (rplMin == -1 && rplMax != -1) || (rplMin != -1 && rplMax == -1) { - return errors.New("cluster.replication_factor_min and max must be -1 when one of them is") - } - return nil -} - -func isRPCPolicyValid(p map[string]RPCEndpointType) error { - rpcComponents := []interface{}{ - &ClusterRPCAPI{}, - &PinTrackerRPCAPI{}, - &IPFSConnectorRPCAPI{}, - &ConsensusRPCAPI{}, - &PeerMonitorRPCAPI{}, - } - - total := 0 - for _, c := range rpcComponents { - t := reflect.TypeOf(c) - for i := 0; i < t.NumMethod(); i++ { - total++ - method := t.Method(i) - name := fmt.Sprintf("%s.%s", RPCServiceID(c), method.Name) - _, ok := p[name] - if !ok { - return fmt.Errorf("RPCPolicy is missing the %s method", name) - } - } - } - if len(p) != total { - logger.Warn("defined RPC policy has more entries than needed") - } - return nil -} - -// this just sets non-generated defaults -func (cfg *Config) setDefaults() { - hostname, err := os.Hostname() - if err != nil { - hostname = "" - } - cfg.Peername = hostname - - listenAddrs := []ma.Multiaddr{} - for _, m := range DefaultListenAddrs { - addr, _ := ma.NewMultiaddr(m) - listenAddrs = append(listenAddrs, addr) - } - cfg.ListenAddr = listenAddrs - cfg.EnableRelayHop = DefaultEnableRelayHop - cfg.ConnMgr = ConnMgrConfig{ - HighWater: DefaultConnMgrHighWater, - LowWater: DefaultConnMgrLowWater, - GracePeriod: DefaultConnMgrGracePeriod, - } - cfg.DialPeerTimeout = DefaultDialPeerTimeout - cfg.LeaveOnShutdown = DefaultLeaveOnShutdown - cfg.StateSyncInterval = DefaultStateSyncInterval - cfg.PinRecoverInterval = DefaultPinRecoverInterval - cfg.ReplicationFactorMin = DefaultReplicationFactor - cfg.ReplicationFactorMax = DefaultReplicationFactor - cfg.MonitorPingInterval = DefaultMonitorPingInterval - cfg.PeerWatchInterval = DefaultPeerWatchInterval - cfg.MDNSInterval = DefaultMDNSInterval - cfg.PinOnlyOnTrustedPeers = DefaultPinOnlyOnTrustedPeers - cfg.DisableRepinning = DefaultDisableRepinning - cfg.FollowerMode = DefaultFollowerMode - cfg.PeerstoreFile = "" // empty so it gets omitted. - cfg.PeerAddresses = []ma.Multiaddr{} - cfg.RPCPolicy = DefaultRPCPolicy -} - -// LoadJSON receives a raw json-formatted configuration and -// sets the Config fields from it. Note that it should be JSON -// as generated by ToJSON(). -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &configJSON{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling cluster config") - return err - } - - cfg.setDefaults() - - return cfg.applyConfigJSON(jcfg) -} - -func (cfg *Config) applyConfigJSON(jcfg *configJSON) error { - config.SetIfNotDefault(jcfg.PeerstoreFile, &cfg.PeerstoreFile) - - config.SetIfNotDefault(jcfg.Peername, &cfg.Peername) - - clusterSecret, err := DecodeClusterSecret(jcfg.Secret) - if err != nil { - err = fmt.Errorf("error loading cluster secret from config: %s", err) - return err - } - cfg.Secret = clusterSecret - - var listenAddrs []ma.Multiaddr - for _, addr := range jcfg.ListenMultiaddress { - listenAddr, err := ma.NewMultiaddr(addr) - if err != nil { - err = fmt.Errorf("error parsing a listen_multiaddress: %s", err) - return err - } - listenAddrs = append(listenAddrs, listenAddr) - } - - cfg.ListenAddr = listenAddrs - cfg.EnableRelayHop = jcfg.EnableRelayHop - if conman := jcfg.ConnectionManager; conman != nil { - cfg.ConnMgr = ConnMgrConfig{ - HighWater: jcfg.ConnectionManager.HighWater, - LowWater: jcfg.ConnectionManager.LowWater, - } - err = config.ParseDurations("cluster", - &config.DurationOpt{Duration: jcfg.ConnectionManager.GracePeriod, Dst: &cfg.ConnMgr.GracePeriod, Name: "connection_manager.grace_period"}, - ) - if err != nil { - return err - } - } - - rplMin := jcfg.ReplicationFactorMin - rplMax := jcfg.ReplicationFactorMax - config.SetIfNotDefault(rplMin, &cfg.ReplicationFactorMin) - config.SetIfNotDefault(rplMax, &cfg.ReplicationFactorMax) - - err = config.ParseDurations("cluster", - &config.DurationOpt{Duration: jcfg.DialPeerTimeout, Dst: &cfg.DialPeerTimeout, Name: "dial_peer_timeout"}, - &config.DurationOpt{Duration: jcfg.StateSyncInterval, Dst: &cfg.StateSyncInterval, Name: "state_sync_interval"}, - &config.DurationOpt{Duration: jcfg.PinRecoverInterval, Dst: &cfg.PinRecoverInterval, Name: "pin_recover_interval"}, - &config.DurationOpt{Duration: jcfg.MonitorPingInterval, Dst: &cfg.MonitorPingInterval, Name: "monitor_ping_interval"}, - &config.DurationOpt{Duration: jcfg.PeerWatchInterval, Dst: &cfg.PeerWatchInterval, Name: "peer_watch_interval"}, - &config.DurationOpt{Duration: jcfg.MDNSInterval, Dst: &cfg.MDNSInterval, Name: "mdns_interval"}, - ) - if err != nil { - return err - } - - // PeerAddresses - peerAddrs := []ma.Multiaddr{} - for _, addr := range jcfg.PeerAddresses { - peerAddr, err := ma.NewMultiaddr(addr) - if err != nil { - err = fmt.Errorf("error parsing peer_addresses: %s", err) - return err - } - peerAddrs = append(peerAddrs, peerAddr) - } - cfg.PeerAddresses = peerAddrs - cfg.LeaveOnShutdown = jcfg.LeaveOnShutdown - cfg.PinOnlyOnTrustedPeers = jcfg.PinOnlyOnTrustedPeers - cfg.DisableRepinning = jcfg.DisableRepinning - cfg.FollowerMode = jcfg.FollowerMode - - return cfg.Validate() -} - -// ToJSON generates a human-friendly version of Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg, err := cfg.toConfigJSON() - if err != nil { - return - } - - raw, err = json.MarshalIndent(jcfg, "", " ") - return -} - -func (cfg *Config) toConfigJSON() (jcfg *configJSON, err error) { - // Multiaddress String() may panic - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%s", r) - } - }() - - jcfg = &configJSON{} - - // Set all configuration fields - jcfg.Peername = cfg.Peername - jcfg.Secret = EncodeProtectorKey(cfg.Secret) - jcfg.ReplicationFactorMin = cfg.ReplicationFactorMin - jcfg.ReplicationFactorMax = cfg.ReplicationFactorMax - jcfg.LeaveOnShutdown = cfg.LeaveOnShutdown - var listenAddrs config.Strings - for _, addr := range cfg.ListenAddr { - listenAddrs = append(listenAddrs, addr.String()) - } - jcfg.ListenMultiaddress = config.Strings(listenAddrs) - jcfg.EnableRelayHop = cfg.EnableRelayHop - jcfg.ConnectionManager = &connMgrConfigJSON{ - HighWater: cfg.ConnMgr.HighWater, - LowWater: cfg.ConnMgr.LowWater, - GracePeriod: cfg.ConnMgr.GracePeriod.String(), - } - jcfg.DialPeerTimeout = cfg.DialPeerTimeout.String() - jcfg.StateSyncInterval = cfg.StateSyncInterval.String() - jcfg.PinRecoverInterval = cfg.PinRecoverInterval.String() - jcfg.MonitorPingInterval = cfg.MonitorPingInterval.String() - jcfg.PeerWatchInterval = cfg.PeerWatchInterval.String() - jcfg.MDNSInterval = cfg.MDNSInterval.String() - jcfg.PinOnlyOnTrustedPeers = cfg.PinOnlyOnTrustedPeers - jcfg.DisableRepinning = cfg.DisableRepinning - jcfg.PeerstoreFile = cfg.PeerstoreFile - jcfg.PeerAddresses = []string{} - for _, addr := range cfg.PeerAddresses { - jcfg.PeerAddresses = append(jcfg.PeerAddresses, addr.String()) - } - jcfg.FollowerMode = cfg.FollowerMode - - return -} - -// GetPeerstorePath returns the full path of the -// PeerstoreFile, obtained by concatenating that value -// with BaseDir of the configuration, if set. -// An empty string is returned when BaseDir is not set. -func (cfg *Config) GetPeerstorePath() string { - if cfg.BaseDir == "" { - return "" - } - - filename := DefaultPeerstoreFile - if cfg.PeerstoreFile != "" { - filename = cfg.PeerstoreFile - } - - return filepath.Join(cfg.BaseDir, filename) -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - jcfg, err := cfg.toConfigJSON() - if err != nil { - return nil, err - } - return config.DisplayJSON(jcfg) -} - -// DecodeClusterSecret parses a hex-encoded string, checks that it is exactly -// 32 bytes long and returns its value as a byte-slice.x -func DecodeClusterSecret(hexSecret string) ([]byte, error) { - secret, err := hex.DecodeString(hexSecret) - if err != nil { - return nil, err - } - switch secretLen := len(secret); secretLen { - case 0: - logger.Warn("Cluster secret is empty, cluster will start on unprotected network.") - return nil, nil - case 32: - return secret, nil - default: - return nil, fmt.Errorf("input secret is %d bytes, cluster secret should be 32", secretLen) - } -} diff --git a/packages/networking/ipfs-cluster/cluster_config_test.go b/packages/networking/ipfs-cluster/cluster_config_test.go deleted file mode 100644 index a945a44..0000000 --- a/packages/networking/ipfs-cluster/cluster_config_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package ipfscluster - -import ( - "encoding/json" - "os" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" -) - -var ccfgTestJSON = []byte(` -{ - "peername": "testpeer", - "secret": "2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed", - "leave_on_shutdown": true, - "connection_manager": { - "high_water": 501, - "low_water": 500, - "grace_period": "100m0s" - }, - "listen_multiaddress": [ - "/ip4/127.0.0.1/tcp/10000", - "/ip4/127.0.0.1/udp/10000/quic" - ], - "state_sync_interval": "1m0s", - "pin_recover_interval": "1m", - "replication_factor_min": 5, - "replication_factor_max": 5, - "monitor_ping_interval": "2s", - "pin_only_on_trusted_peers": true, - "disable_repinning": true, - "peer_addresses": [ "/ip4/127.0.0.1/tcp/1234/p2p/QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc" ] -} -`) - -func TestLoadJSON(t *testing.T) { - loadJSON := func(t *testing.T) *Config { - cfg := &Config{} - err := cfg.LoadJSON(ccfgTestJSON) - if err != nil { - t.Fatal(err) - } - return cfg - } - - t.Run("basic", func(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(ccfgTestJSON) - if err != nil { - t.Fatal(err) - } - }) - - t.Run("peername", func(t *testing.T) { - cfg := loadJSON(t) - if cfg.Peername != "testpeer" { - t.Error("expected peername 'testpeer'") - } - }) - - t.Run("expected replication factor", func(t *testing.T) { - cfg := loadJSON(t) - if cfg.ReplicationFactorMin != 5 { - t.Error("expected replication factor min == 5") - } - }) - - t.Run("expected disable_repinning", func(t *testing.T) { - cfg := loadJSON(t) - if !cfg.DisableRepinning { - t.Error("expected disable_repinning to be true") - } - }) - - t.Run("expected pin_only_on_trusted_peers", func(t *testing.T) { - cfg := loadJSON(t) - if !cfg.PinOnlyOnTrustedPeers { - t.Error("expected pin_only_on_trusted_peers to be true") - } - }) - - t.Run("expected pin_recover_interval", func(t *testing.T) { - cfg := loadJSON(t) - if cfg.PinRecoverInterval != time.Minute { - t.Error("expected pin_recover_interval of 1m") - } - }) - - t.Run("expected connection_manager", func(t *testing.T) { - cfg := loadJSON(t) - if cfg.ConnMgr.LowWater != 500 { - t.Error("expected low_water to be 500") - } - if cfg.ConnMgr.HighWater != 501 { - t.Error("expected high_water to be 501") - } - if cfg.ConnMgr.GracePeriod != 100*time.Minute { - t.Error("expected grace_period to be 100m") - } - }) - - t.Run("expected peer addresses", func(t *testing.T) { - cfg := loadJSON(t) - if len(cfg.PeerAddresses) != 1 { - t.Error("expected 1 peer address") - } - }) - - loadJSON2 := func(t *testing.T, f func(j *configJSON)) (*Config, error) { - cfg := &Config{} - j := &configJSON{} - json.Unmarshal(ccfgTestJSON, j) - f(j) - tst, err := json.Marshal(j) - if err != nil { - return cfg, err - } - err = cfg.LoadJSON(tst) - if err != nil { - return cfg, err - } - return cfg, nil - } - - t.Run("empty default peername", func(t *testing.T) { - cfg, err := loadJSON2(t, func(j *configJSON) { j.Peername = "" }) - if err != nil { - t.Error(err) - } - if cfg.Peername == "" { - t.Error("expected default peername") - } - }) - - t.Run("bad listen multiaddress", func(t *testing.T) { - _, err := loadJSON2(t, func(j *configJSON) { j.ListenMultiaddress = config.Strings{"abc"} }) - if err == nil { - t.Error("expected error parsing listen_multiaddress") - } - }) - - t.Run("bad secret", func(t *testing.T) { - _, err := loadJSON2(t, func(j *configJSON) { j.Secret = "abc" }) - if err == nil { - t.Error("expected error decoding secret") - } - }) - - t.Run("default replication factors", func(t *testing.T) { - cfg, err := loadJSON2( - t, - func(j *configJSON) { - j.ReplicationFactorMin = 0 - j.ReplicationFactorMax = 0 - }, - ) - if err != nil { - t.Error(err) - } - if cfg.ReplicationFactorMin != -1 || cfg.ReplicationFactorMax != -1 { - t.Error("expected default replication factor") - } - }) - - t.Run("only replication factor min set to -1", func(t *testing.T) { - _, err := loadJSON2(t, func(j *configJSON) { j.ReplicationFactorMin = -1 }) - if err == nil { - t.Error("expected error when only one replication factor is -1") - } - }) - - t.Run("replication factor min > max", func(t *testing.T) { - _, err := loadJSON2( - t, - func(j *configJSON) { - j.ReplicationFactorMin = 5 - j.ReplicationFactorMax = 4 - }, - ) - if err == nil { - t.Error("expected error when only rplMin > rplMax") - } - }) - - t.Run("default replication factor", func(t *testing.T) { - cfg, err := loadJSON2( - t, - func(j *configJSON) { - j.ReplicationFactorMin = 0 - j.ReplicationFactorMax = 0 - }, - ) - if err != nil { - t.Error(err) - } - if cfg.ReplicationFactorMin != -1 || cfg.ReplicationFactorMax != -1 { - t.Error("expected default replication factors") - } - }) - - t.Run("conn manager default", func(t *testing.T) { - cfg, err := loadJSON2( - t, - func(j *configJSON) { - j.ConnectionManager = nil - }, - ) - if err != nil { - t.Fatal(err) - } - if cfg.ConnMgr.LowWater != DefaultConnMgrLowWater { - t.Error("default conn manager values not set") - } - }) -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(ccfgTestJSON) - if err != nil { - t.Fatal(err) - } - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if err := cfg.Validate(); err != nil { - t.Fatal(err) - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_PEERNAME", "envsetpeername") - cfg := &Config{} - cfg.Default() - cfg.ApplyEnvVars() - if cfg.Peername != "envsetpeername" { - t.Fatal("failed to override peername with env var") - } -} - -func TestValidate(t *testing.T) { - cfg := &Config{} - - cfg.Default() - cfg.MonitorPingInterval = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ReplicationFactorMin = 10 - cfg.ReplicationFactorMax = 5 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ReplicationFactorMin = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.ConnMgr.GracePeriod = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.PinRecoverInterval = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} diff --git a/packages/networking/ipfs-cluster/cluster_test.go b/packages/networking/ipfs-cluster/cluster_test.go deleted file mode 100644 index 957b3ae..0000000 --- a/packages/networking/ipfs-cluster/cluster_test.go +++ /dev/null @@ -1,1024 +0,0 @@ -package ipfscluster - -import ( - "context" - "errors" - "mime/multipart" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/adder/sharding" - "github.com/ipfs-cluster/ipfs-cluster/allocator/balanced" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/informer/numpin" - "github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/test" - "github.com/ipfs-cluster/ipfs-cluster/version" - - gopath "github.com/ipfs/go-path" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -type mockComponent struct { - rpcClient *rpc.Client -} - -func (c *mockComponent) Shutdown(ctx context.Context) error { - return nil -} - -func (c *mockComponent) SetClient(client *rpc.Client) { - c.rpcClient = client -} - -type mockAPI struct { - mockComponent -} - -type mockProxy struct { - mockComponent -} - -type mockConnector struct { - mockComponent - - pins sync.Map - blocks sync.Map -} - -func (ipfs *mockConnector) ID(ctx context.Context) (api.IPFSID, error) { - return api.IPFSID{ - ID: test.PeerID1, - }, nil -} - -func (ipfs *mockConnector) Pin(ctx context.Context, pin api.Pin) error { - if pin.Cid == test.ErrorCid { - return errors.New("trying to pin ErrorCid") - } - ipfs.pins.Store(pin.Cid, pin.MaxDepth) - return nil -} - -func (ipfs *mockConnector) Unpin(ctx context.Context, c api.Cid) error { - ipfs.pins.Delete(c) - return nil -} - -func (ipfs *mockConnector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinStatus, error) { - dI, ok := ipfs.pins.Load(pin.Cid) - if !ok { - return api.IPFSPinStatusUnpinned, nil - } - depth := dI.(api.PinDepth) - if depth == 0 { - return api.IPFSPinStatusDirect, nil - } - return api.IPFSPinStatusRecursive, nil -} - -func (ipfs *mockConnector) PinLs(ctx context.Context, in []string, out chan<- api.IPFSPinInfo) error { - defer close(out) - - var st api.IPFSPinStatus - ipfs.pins.Range(func(k, v interface{}) bool { - switch v.(api.PinDepth) { - case 0: - st = api.IPFSPinStatusDirect - default: - st = api.IPFSPinStatusRecursive - } - c := k.(api.Cid) - - out <- api.IPFSPinInfo{Cid: api.Cid(c), Type: st} - return true - }) - - return nil -} - -func (ipfs *mockConnector) SwarmPeers(ctx context.Context) ([]peer.ID, error) { - return []peer.ID{test.PeerID4, test.PeerID5}, nil -} - -func (ipfs *mockConnector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) { - return api.IPFSRepoStat{RepoSize: 100, StorageMax: 1000}, nil -} - -func (ipfs *mockConnector) RepoGC(ctx context.Context) (api.RepoGC, error) { - return api.RepoGC{ - Keys: []api.IPFSRepoGC{ - { - Key: test.Cid1, - }, - }, - }, nil -} - -func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (api.Cid, error) { - _, err := gopath.ParsePath(path) - if err != nil { - return api.CidUndef, err - } - - return test.CidResolved, nil -} -func (ipfs *mockConnector) ConnectSwarms(ctx context.Context) error { return nil } -func (ipfs *mockConnector) ConfigKey(keypath string) (interface{}, error) { return nil, nil } - -func (ipfs *mockConnector) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta) error { - for n := range in { - ipfs.blocks.Store(n.Cid.String(), n.Data) - } - return nil -} - -func (ipfs *mockConnector) BlockGet(ctx context.Context, c api.Cid) ([]byte, error) { - d, ok := ipfs.blocks.Load(c.String()) - if !ok { - return nil, errors.New("block not found") - } - return d.([]byte), nil -} - -type mockTracer struct { - mockComponent -} - -func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, PinTracker) { - ident, clusterCfg, _, _, _, badgerCfg, levelDBCfg, raftCfg, crdtCfg, statelesstrackerCfg, psmonCfg, _, _, _ := testingConfigs() - ctx := context.Background() - - host, pubsub, dht := createHost(t, ident.PrivateKey, clusterCfg.Secret, clusterCfg.ListenAddr) - - folder := filepath.Join(testsFolder, host.ID().Pretty()) - cleanState() - clusterCfg.SetBaseDir(folder) - raftCfg.DataFolder = folder - badgerCfg.Folder = filepath.Join(folder, "badger") - levelDBCfg.Folder = filepath.Join(folder, "leveldb") - - api := &mockAPI{} - proxy := &mockProxy{} - ipfs := &mockConnector{} - - tracer := &mockTracer{} - - store := makeStore(t, badgerCfg, levelDBCfg) - cons := makeConsensus(t, store, host, pubsub, dht, raftCfg, false, crdtCfg) - tracker := stateless.New(statelesstrackerCfg, ident.ID, clusterCfg.Peername, cons.State) - - var peersF func(context.Context) ([]peer.ID, error) - if consensus == "raft" { - peersF = cons.Peers - } - psmonCfg.CheckInterval = 2 * time.Second - mon, err := pubsubmon.New(ctx, psmonCfg, pubsub, peersF) - if err != nil { - t.Fatal(err) - } - - alloc, err := balanced.New(&balanced.Config{ - AllocateBy: []string{"numpin"}, - }) - if err != nil { - t.Fatal(err) - } - numpinCfg := &numpin.Config{} - numpinCfg.Default() - inf, _ := numpin.NewInformer(numpinCfg) - - ReadyTimeout = raftCfg.WaitForLeaderTimeout + 1*time.Second - - cl, err := NewCluster( - ctx, - host, - dht, - clusterCfg, - store, - cons, - []API{api, proxy}, - ipfs, - tracker, - mon, - alloc, - []Informer{inf}, - tracer, - ) - if err != nil { - t.Fatal("cannot create cluster:", err) - } - <-cl.Ready() - return cl, api, ipfs, tracker -} - -func cleanState() { - os.RemoveAll(testsFolder) -} - -func TestClusterShutdown(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - err := cl.Shutdown(ctx) - if err != nil { - t.Error("cluster shutdown failed:", err) - } - cl.Shutdown(ctx) - cl, _, _, _ = testingCluster(t) - err = cl.Shutdown(ctx) - if err != nil { - t.Error("cluster shutdown failed:", err) - } -} - -func TestClusterStateSync(t *testing.T) { - ctx := context.Background() - cleanState() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - c := test.Cid1 - _, err := cl.Pin(ctx, c, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - - err = cl.StateSync(ctx) - if err != nil { - t.Fatal("sync after pinning should have worked:", err) - } - - // Modify state on the side so the sync does not - // happen on an empty slide - st, err := cl.consensus.State(ctx) - if err != nil { - t.Fatal(err) - } - st.(state.State).Rm(ctx, c) - err = cl.StateSync(ctx) - if err != nil { - t.Fatal("sync with recover should have worked:", err) - } -} - -func TestClusterID(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - id := cl.ID(ctx) - if len(id.Addresses) == 0 { - t.Error("expected more addresses") - } - if id.ID == "" { - t.Error("expected a cluster ID") - } - if id.Version != version.Version.String() { - t.Error("version should match current version") - } - //if id.PublicKey == nil { - // t.Error("publicKey should not be empty") - //} -} - -func TestClusterPin(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - c := test.Cid1 - res, err := cl.Pin(ctx, c, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - - if res.Type != api.DataType { - t.Error("unexpected pin type") - } - - switch consensus { - case "crdt": - return - case "raft": - // test an error case - cl.consensus.Shutdown(ctx) - opts := api.PinOptions{ - ReplicationFactorMax: 1, - ReplicationFactorMin: 1, - } - _, err = cl.Pin(ctx, c, opts) - if err == nil { - t.Error("expected an error but things worked") - } - } -} - -func TestPinExpired(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - c := test.Cid1 - _, err := cl.Pin(ctx, c, api.PinOptions{ - ExpireAt: time.Now(), - }) - if err == nil { - t.Fatal("pin should have errored") - } -} - -func TestClusterPinPath(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - pin, err := cl.PinPath(ctx, test.PathIPFS2, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - if !pin.Cid.Equals(test.CidResolved) { - t.Error("expected a different cid, found", pin.Cid.String()) - } - - // test an error case - _, err = cl.PinPath(ctx, test.InvalidPath1, api.PinOptions{}) - if err == nil { - t.Error("expected an error but things worked") - } -} - -func TestAddFile(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - t.Run("local", func(t *testing.T) { - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - c, err := cl.AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - if c.String() != test.ShardingDirBalancedRootCID { - t.Fatal("unexpected root CID for local add") - } - - pinDelay() - - pin := cl.StatusLocal(ctx, c) - if pin.Error != "" { - t.Fatal(pin.Error) - } - if pin.Status != api.TrackerStatusPinned { - t.Error("cid should be pinned") - } - - cl.Unpin(ctx, c) // unpin so we can pin the shard in next test - pinDelay() - }) - - t.Run("shard", func(t *testing.T) { - params := api.DefaultAddParams() - params.Shard = true - params.Name = "testshard" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - c, err := cl.AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - - if c.String() != test.ShardingDirBalancedRootCID { - t.Fatal("unexpected root CID for local add") - } - - pinDelay() - - // We know that this produces 14 shards. - sharding.VerifyShards(t, c, cl, cl.ipfs, 14) - }) -} - -func TestUnpinShard(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - - params := api.DefaultAddParams() - params.Shard = true - params.Name = "testshard" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - root, err := cl.AddFile(context.Background(), r, params) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - // We know that this produces 14 shards. - sharding.VerifyShards(t, root, cl, cl.ipfs, 14) - - // skipping errors, VerifyShards has checked - pinnedCids := []api.Cid{} - pinnedCids = append(pinnedCids, root) - metaPin, _ := cl.PinGet(ctx, root) - cDag, _ := cl.PinGet(ctx, *metaPin.Reference) - pinnedCids = append(pinnedCids, cDag.Cid) - cDagBlock, _ := cl.ipfs.BlockGet(ctx, cDag.Cid) - cDagNode, _ := sharding.CborDataToNode(cDagBlock, "cbor") - for _, l := range cDagNode.Links() { - pinnedCids = append(pinnedCids, api.NewCid(l.Cid)) - } - - t.Run("unpin clusterdag should fail", func(t *testing.T) { - _, err := cl.Unpin(ctx, cDag.Cid) - if err == nil { - t.Fatal("should not allow unpinning the cluster DAG directly") - } - t.Log(err) - }) - - t.Run("unpin shard should fail", func(t *testing.T) { - _, err := cl.Unpin(ctx, api.NewCid(cDagNode.Links()[0].Cid)) - if err == nil { - t.Fatal("should not allow unpinning shards directly") - } - t.Log(err) - }) - - t.Run("normal unpin", func(t *testing.T) { - res, err := cl.Unpin(ctx, root) - if err != nil { - t.Fatal(err) - } - - if res.Type != api.MetaType { - t.Fatal("unexpected root pin type") - } - - pinDelay() - - for _, c := range pinnedCids { - st := cl.StatusLocal(ctx, c) - if st.Status != api.TrackerStatusUnpinned { - t.Errorf("%s should have been unpinned but is %s", c, st.Status) - } - - st2, err := cl.ipfs.PinLsCid(context.Background(), api.PinCid(c)) - if err != nil { - t.Fatal(err) - } - if st2 != api.IPFSPinStatusUnpinned { - t.Errorf("%s should have been unpinned in ipfs but is %d", c, st2) - } - } - }) -} - -// func singleShardedPin(t *testing.T, cl *Cluster) { -// cShard, _ := cid.Decode(test.ShardCid) -// cCdag, _ := cid.Decode(test.CdagCid) -// cMeta, _ := cid.Decode(test.MetaRootCid) -// pinMeta(t, cl, []api.NewCid(cShard), cCdag, cMeta) -// } - -// func pinMeta(t *testing.T, cl *Cluster, shardCids []api.Cid, cCdag, cMeta api.Cid) { -// for _, cShard := range shardCids { -// shardPin := api.Pin{ -// Cid: cShard, -// Type: api.ShardType, -// MaxDepth: 1, -// PinOptions: api.PinOptions{ -// ReplicationFactorMin: -1, -// ReplicationFactorMax: -1, -// }, -// } -// err := cl.Pin(shardPin) -// if err != nil { -// t.Fatal("shard pin should have worked:", err) -// } -// } - -// parents := cid.NewSet() -// parents.Add(cMeta) -// cdagPin := api.Pin{ -// Cid: cCdag, -// Type: api.ClusterDAGType, -// MaxDepth: 0, -// PinOptions: api.PinOptions{ -// ReplicationFactorMin: -1, -// ReplicationFactorMax: -1, -// }, -// } -// err := cl.Pin(cdagPin) -// if err != nil { -// t.Fatal("pin should have worked:", err) -// } - -// metaPin := api.Pin{ -// Cid: cMeta, -// Type: api.MetaType, -// Clusterdag: cCdag, -// } -// err = cl.Pin(metaPin) -// if err != nil { -// t.Fatal("pin should have worked:", err) -// } -// } - -// func TestClusterPinMeta(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// singleShardedPin(t, cl) -// } - -// func TestClusterUnpinShardFail(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// singleShardedPin(t, cl) -// // verify pins -// if len(cl.Pins()) != 3 { -// t.Fatal("should have 3 pins") -// } -// // Unpinning metadata should fail -// cShard, _ := cid.Decode(test.ShardCid) -// cCdag, _ := cid.Decode(test.CdagCid) - -// err := cl.Unpin(cShard) -// if err == nil { -// t.Error("should error when unpinning shard") -// } -// err = cl.Unpin(cCdag) -// if err == nil { -// t.Error("should error when unpinning cluster dag") -// } -// } - -// func TestClusterUnpinMeta(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// singleShardedPin(t, cl) -// // verify pins -// if len(cl.Pins()) != 3 { -// t.Fatal("should have 3 pins") -// } -// // Unpinning from root should work -// cMeta, _ := cid.Decode(test.MetaRootCid) - -// err := cl.Unpin(cMeta) -// if err != nil { -// t.Error(err) -// } -// } - -// func pinTwoParentsOneShard(t *testing.T, cl *Cluster) { -// singleShardedPin(t, cl) - -// cShard, _ := cid.Decode(test.ShardCid) -// cShard2, _ := cid.Decode(test.ShardCid2) -// cCdag2, _ := cid.Decode(test.CdagCid2) -// cMeta2, _ := cid.Decode(test.MetaRootCid2) -// pinMeta(t, cl, []api.Cid{cShard, cShard2}, cCdag2, cMeta2) - -// shardPin, err := cl.PinGet(cShard) -// if err != nil { -// t.Fatal("pin should be in state") -// } -// if shardPin.Parents.Len() != 2 { -// t.Fatal("unexpected parent set in shared shard") -// } - -// shardPin2, err := cl.PinGet(cShard2) -// if shardPin2.Parents.Len() != 1 { -// t.Fatal("unexpected parent set in unshared shard") -// } -// if err != nil { -// t.Fatal("pin should be in state") -// } -// } - -// func TestClusterPinShardTwoParents(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// pinTwoParentsOneShard(t, cl) - -// cShard, _ := cid.Decode(test.ShardCid) -// shardPin, err := cl.PinGet(cShard) -// if err != nil { -// t.Fatal("double pinned shard should be pinned") -// } -// if shardPin.Parents == nil || shardPin.Parents.Len() != 2 { -// t.Fatal("double pinned shard should have two parents") -// } -// } - -// func TestClusterUnpinShardSecondParent(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// pinTwoParentsOneShard(t, cl) -// if len(cl.Pins()) != 6 { -// t.Fatal("should have 6 pins") -// } -// cMeta2, _ := cid.Decode(test.MetaRootCid2) -// err := cl.Unpin(cMeta2) -// if err != nil { -// t.Error(err) -// } - -// pinDelay() - -// if len(cl.Pins()) != 3 { -// t.Fatal("should have 3 pins") -// } - -// cShard, _ := cid.Decode(test.ShardCid) -// cCdag, _ := cid.Decode(test.CdagCid) -// shardPin, err := cl.PinGet(cShard) -// if err != nil { -// t.Fatal("double pinned shard node should still be pinned") -// } -// if shardPin.Parents == nil || shardPin.Parents.Len() != 1 || -// !shardPin.Parents.Has(cCdag) { -// t.Fatalf("shard node should have single original parent %v", shardPin.Parents.Keys()) -// } -// } - -// func TestClusterUnpinShardFirstParent(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// pinTwoParentsOneShard(t, cl) -// if len(cl.Pins()) != 6 { -// t.Fatal("should have 6 pins") -// } - -// cMeta, _ := cid.Decode(test.MetaRootCid) -// err := cl.Unpin(cMeta) -// if err != nil { -// t.Error(err) -// } -// if len(cl.Pins()) != 4 { -// t.Fatal("should have 4 pins") -// } - -// cShard, _ := cid.Decode(test.ShardCid) -// cShard2, _ := cid.Decode(test.ShardCid2) -// cCdag2, _ := cid.Decode(test.CdagCid2) -// shardPin, err := cl.PinGet(cShard) -// if err != nil { -// t.Fatal("double pinned shard node should still be pinned") -// } -// if shardPin.Parents == nil || shardPin.Parents.Len() != 1 || -// !shardPin.Parents.Has(cCdag2) { -// t.Fatal("shard node should have single original parent") -// } -// _, err = cl.PinGet(cShard2) -// if err != nil { -// t.Fatal("other shard shoud still be pinned too") -// } -// } - -// func TestClusterPinTwoMethodsFail(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// // First pin normally then sharding pin fails -// c, _ := cid.Decode(test.MetaRootCid) -// err := cl.Pin(api.PinCid(c)) -// if err != nil { -// t.Fatal("pin should have worked:", err) -// } - -// cCdag, _ := cid.Decode(test.CdagCid) -// cMeta, _ := cid.Decode(test.MetaRootCid) -// metaPin := api.Pin{ -// Cid: cMeta, -// Type: api.MetaType, -// Clusterdag: cCdag, -// } -// err = cl.Pin(metaPin) -// if err == nil { -// t.Fatal("pin should have failed:", err) -// } - -// err = cl.Unpin(c) -// if err != nil { -// t.Fatal("unpin should have worked:", err) -// } - -// singleShardedPin(t, cl) -// err = cl.Pin(api.PinCid(c)) -// if err == nil { -// t.Fatal("pin should have failed:", err) -// } -// } - -// func TestClusterRePinShard(t *testing.T) { -// cl, _, _, _ := testingCluster(t) -// defer cleanState() -// defer cl.Shutdown() - -// cCdag, _ := cid.Decode(test.CdagCid) -// cShard, _ := cid.Decode(test.ShardCid) -// shardPin := api.Pin{ -// Cid: cShard, -// Type: api.ShardType, -// ReplicationFactorMin: -1, -// ReplicationFactorMax: -1, -// Recursive: true, -// } -// err := cl.Pin(shardPin) -// if err != nil { -// t.Fatal("shard pin should have worked:", err) -// } - -// parents := cid.NewSet() -// parents.Add(cCdag) -// shardPin.Parents = parents -// err = cl.Pin(shardPin) -// if err != nil { -// t.Fatal("repinning shard pin with different parents should have worked:", err) -// } - -// shardPin.ReplicationFactorMin = 3 -// shardPin.ReplicationFactorMax = 5 -// err = cl.Pin(shardPin) -// if err == nil { -// t.Fatal("repinning shard pin with different repl factors should have failed:", err) -// } -// } - -func TestClusterPins(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - c := test.Cid1 - _, err := cl.Pin(ctx, c, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - - pinDelay() - - pins, err := cl.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 { - t.Fatal("pin should be part of the state") - } - if !pins[0].Cid.Equals(c) || pins[0].ReplicationFactorMin != -1 || pins[0].ReplicationFactorMax != -1 { - t.Error("the Pin does not look as expected") - } -} - -func TestClusterPinGet(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - c := test.Cid1 - _, err := cl.Pin(ctx, c, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - - pin, err := cl.PinGet(ctx, c) - if err != nil { - t.Fatal(err) - } - if !pin.Cid.Equals(c) || pin.ReplicationFactorMin != -1 || pin.ReplicationFactorMax != -1 { - t.Error("the Pin does not look as expected") - } - - _, err = cl.PinGet(ctx, test.Cid2) - if err == nil { - t.Fatal("expected an error") - } -} - -func TestClusterUnpin(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - c := test.Cid1 - // Unpin should error without pin being committed to state - _, err := cl.Unpin(ctx, c) - if err == nil { - t.Error("unpin should have failed") - } - - // Unpin after pin should succeed - _, err = cl.Pin(ctx, c, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - res, err := cl.Unpin(ctx, c) - if err != nil { - t.Error("unpin should have worked:", err) - } - - if res.Type != api.DataType { - t.Error("unexpected pin type returned") - } - - // test another error case - cl.consensus.Shutdown(ctx) - _, err = cl.Unpin(ctx, c) - if err == nil { - t.Error("expected an error but things worked") - } -} - -func TestClusterUnpinPath(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - // Unpin should error without pin being committed to state - _, err := cl.UnpinPath(ctx, test.PathIPFS2) - if err == nil { - t.Error("unpin with path should have failed") - } - - // Unpin after pin should succeed - pin, err := cl.PinPath(ctx, test.PathIPFS2, api.PinOptions{}) - if err != nil { - t.Fatal("pin with path should have worked:", err) - } - if !pin.Cid.Equals(test.CidResolved) { - t.Error("expected a different cid, found", pin.Cid.String()) - } - - pin, err = cl.UnpinPath(ctx, test.PathIPFS2) - if err != nil { - t.Error("unpin with path should have worked:", err) - } - if !pin.Cid.Equals(test.CidResolved) { - t.Error("expected a different cid, found", pin.Cid.String()) - } -} - -func TestClusterPeers(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - out := make(chan api.ID, 10) - cl.Peers(ctx, out) - if len(out) != 1 { - t.Fatal("expected 1 peer") - } - - ident := &config.Identity{} - err := ident.LoadJSON(testingIdentity) - if err != nil { - t.Fatal(err) - } - - p := <-out - if p.ID != ident.ID { - t.Error("bad member") - } -} - -func TestVersion(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - if cl.Version() != version.Version.String() { - t.Error("bad Version()") - } -} - -func TestClusterRecoverAllLocal(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - _, err := cl.Pin(ctx, test.ErrorCid, api.PinOptions{}) - if err != nil { - t.Fatal("pin should have worked:", err) - } - - pinDelay() - - out := make(chan api.PinInfo, 10) - go func() { - err := cl.RecoverAllLocal(ctx, out) - if err != nil { - t.Error("did not expect an error") - } - }() - - recov := collectPinInfos(t, out) - - if len(recov) != 1 { - t.Fatalf("there should be one pin recovered, got = %d", len(recov)) - } - // Recovery will fail, but the pin appearing in the response is good enough to know it was requeued. -} - -func TestClusterRepoGC(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - gRepoGC, err := cl.RepoGC(ctx) - if err != nil { - t.Fatal("gc should have worked:", err) - } - - if gRepoGC.PeerMap == nil { - t.Fatal("expected a non-nil peer map") - } - - if len(gRepoGC.PeerMap) != 1 { - t.Error("expected repo gc information for one peer") - } - for _, repoGC := range gRepoGC.PeerMap { - testRepoGC(t, repoGC) - } - -} - -func TestClusterRepoGCLocal(t *testing.T) { - ctx := context.Background() - cl, _, _, _ := testingCluster(t) - defer cleanState() - defer cl.Shutdown(ctx) - - repoGC, err := cl.RepoGCLocal(ctx) - if err != nil { - t.Fatal("gc should have worked:", err) - } - - testRepoGC(t, repoGC) -} - -func testRepoGC(t *testing.T, repoGC api.RepoGC) { - if repoGC.Peer == "" { - t.Error("expected a cluster ID") - } - if repoGC.Error != "" { - t.Error("did not expect any error") - } - - if repoGC.Keys == nil { - t.Fatal("expected a non-nil array of IPFSRepoGC") - } - - if len(repoGC.Keys) == 0 { - t.Fatal("expected at least one key, but found none") - } - - if !repoGC.Keys[0].Key.Equals(test.Cid1) { - t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC.Keys[0].Key) - } -} diff --git a/packages/networking/ipfs-cluster/clusterhost.go b/packages/networking/ipfs-cluster/clusterhost.go deleted file mode 100644 index 6bbf258..0000000 --- a/packages/networking/ipfs-cluster/clusterhost.go +++ /dev/null @@ -1,169 +0,0 @@ -package ipfscluster - -import ( - "context" - "encoding/hex" - - config "github.com/ipfs-cluster/ipfs-cluster/config" - ds "github.com/ipfs/go-datastore" - namespace "github.com/ipfs/go-datastore/namespace" - ipns "github.com/ipfs/go-ipns" - libp2p "github.com/libp2p/go-libp2p" - crypto "github.com/libp2p/go-libp2p/core/crypto" - host "github.com/libp2p/go-libp2p/core/host" - network "github.com/libp2p/go-libp2p/core/network" - corepnet "github.com/libp2p/go-libp2p/core/pnet" - routing "github.com/libp2p/go-libp2p/core/routing" - dht "github.com/libp2p/go-libp2p-kad-dht" - dual "github.com/libp2p/go-libp2p-kad-dht/dual" - pubsub "github.com/libp2p/go-libp2p-pubsub" - record "github.com/libp2p/go-libp2p-record" - connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr" - identify "github.com/libp2p/go-libp2p/p2p/protocol/identify" - noise "github.com/libp2p/go-libp2p/p2p/security/noise" - libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" - libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic" - tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp" - websocket "github.com/libp2p/go-libp2p/p2p/transport/websocket" -) - -const dhtNamespace = "dht" - -var _ = libp2pquic.NewTransport - -func init() { - // Cluster peers should advertise their public IPs as soon as they - // learn about them. Default for this is 4, which prevents clusters - // with less than 4 peers to advertise an external address they know - // of, therefore they cannot be remembered by other peers asap. This - // affects dockerized setups mostly. This may announce non-dialable - // NATed addresses too eagerly, but they should progressively be - // cleaned up. - identify.ActivationThresh = 1 -} - -// NewClusterHost creates a fully-featured libp2p Host with the options from -// the provided cluster configuration. Using that host, it creates pubsub and -// a DHT instances (persisting to the given datastore), for shared use by all -// cluster components. The returned host uses the DHT for routing. Relay and -// NATService are additionally setup for this host. -func NewClusterHost( - ctx context.Context, - ident *config.Identity, - cfg *Config, - ds ds.Datastore, -) (host.Host, *pubsub.PubSub, *dual.DHT, error) { - - // Set the default dial timeout for all libp2p connections. It is not - // very good to touch this global variable here, but the alternative - // is to used a modify context everywhere, even if the user supplies - // it. - network.DialPeerTimeout = cfg.DialPeerTimeout - - connman, err := connmgr.NewConnManager(cfg.ConnMgr.LowWater, cfg.ConnMgr.HighWater, connmgr.WithGracePeriod(cfg.ConnMgr.GracePeriod)) - if err != nil { - return nil, nil, nil, err - } - - var idht *dual.DHT - opts := []libp2p.Option{ - libp2p.ListenAddrs(cfg.ListenAddr...), - libp2p.NATPortMap(), - libp2p.ConnectionManager(connman), - libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { - idht, err = newDHT(ctx, h, ds) - return idht, err - }), - libp2p.EnableNATService(), - libp2p.EnableRelay(), - libp2p.EnableAutoRelay(), - libp2p.EnableHolePunching(), - } - - if cfg.EnableRelayHop { - opts = append(opts, libp2p.EnableRelayService()) - } - - h, err := newHost( - ctx, - cfg.Secret, - ident.PrivateKey, - opts..., - ) - if err != nil { - return nil, nil, nil, err - } - - psub, err := newPubSub(ctx, h) - if err != nil { - h.Close() - return nil, nil, nil, err - } - - return h, psub, idht, nil -} - -// newHost creates a base cluster host without dht, pubsub, relay or nat etc. -// mostly used for testing. -func newHost(ctx context.Context, psk corepnet.PSK, priv crypto.PrivKey, opts ...libp2p.Option) (host.Host, error) { - finalOpts := []libp2p.Option{ - libp2p.Identity(priv), - } - finalOpts = append(finalOpts, baseOpts(psk)...) - finalOpts = append(finalOpts, opts...) - - h, err := libp2p.New( - finalOpts..., - ) - if err != nil { - return nil, err - } - - return h, nil -} - -func baseOpts(psk corepnet.PSK) []libp2p.Option { - return []libp2p.Option{ - libp2p.PrivateNetwork(psk), - libp2p.EnableNATService(), - libp2p.Security(noise.ID, noise.New), - libp2p.Security(libp2ptls.ID, libp2ptls.New), - // TODO: quic does not support private networks - // libp2p.DefaultTransports, - libp2p.NoTransports, - libp2p.Transport(tcp.NewTCPTransport), - libp2p.Transport(websocket.New), - } -} - -func newDHT(ctx context.Context, h host.Host, store ds.Datastore, extraopts ...dual.Option) (*dual.DHT, error) { - opts := []dual.Option{ - dual.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), - dual.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})), - dual.DHTOption(dht.Concurrency(10)), - } - - opts = append(opts, extraopts...) - - if batchingDs, ok := store.(ds.Batching); ok { - dhtDatastore := namespace.Wrap(batchingDs, ds.NewKey(dhtNamespace)) - opts = append(opts, dual.DHTOption(dht.Datastore(dhtDatastore))) - logger.Debug("enabling DHT record persistence to datastore") - } - - return dual.New(ctx, h, opts...) -} - -func newPubSub(ctx context.Context, h host.Host) (*pubsub.PubSub, error) { - return pubsub.NewGossipSub( - ctx, - h, - pubsub.WithMessageSigning(true), - pubsub.WithStrictSignatureVerification(true), - ) -} - -// EncodeProtectorKey converts a byte slice to its hex string representation. -func EncodeProtectorKey(secretBytes []byte) string { - return hex.EncodeToString(secretBytes) -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/Makefile b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/Makefile deleted file mode 100644 index f46d0f1..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# go source files -SRC := $(shell find ../.. -type f -name '*.go') -GOPATH := $(shell go env GOPATH) -GOFLAGS := "-trimpath" - -all: ipfs-cluster-ctl - -ipfs-cluster-ctl: $(SRC) - go build $(GOFLAGS) -mod=readonly - -build: ipfs-cluster-ctl - -install: - go install $(GOFLAGS) - -clean: - rm -f ipfs-cluster-ctl - -.PHONY: clean install build diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE deleted file mode 100644 index 0020f2a..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Dual-licensed under MIT and ASLv2, by way of the [Permissive License -Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). - -Apache-2.0: https://www.apache.org/licenses/license-2.0 -MIT: https://www.opensource.org/licenses/mit diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE-APACHE b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE-MIT b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/README.md b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/README.md deleted file mode 100644 index c0b58c2..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/dist/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# `ipfs-cluster-ctl` - -> IPFS cluster management tool - -`ipfs-cluster-ctl` is the client application to manage the cluster nodes and perform actions. `ipfs-cluster-ctl` uses the HTTP API provided by the nodes and it is completely separate from the cluster service. - -### Usage - -Usage information can be obtained by running: - -``` -$ ipfs-cluster-ctl --help -``` - -You can also obtain command-specific help with `ipfs-cluster-ctl help [cmd]`. The (`--host`) can be used to talk to any remote cluster peer (`localhost` is used by default). - -For more information, please check the [Documentation](https://ipfscluster.io/documentation), in particular the [`ipfs-cluster-ctl` section](https://ipfscluster.io/documentation/ipfs-cluster-ctl). diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/formatters.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/formatters.go deleted file mode 100644 index 1ea9e27..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/formatters.go +++ /dev/null @@ -1,329 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strings" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - humanize "github.com/dustin/go-humanize" -) - -type addedOutputQuiet struct { - api.AddedOutput - quiet bool -} - -func jsonFormatObject(resp interface{}) { - switch r := resp.(type) { - case nil: - return - case []addedOutputQuiet: - // print original objects as in JSON it makes - // no sense to have a human "quiet" output - var actual []api.AddedOutput - for _, s := range r { - actual = append(actual, s.AddedOutput) - } - jsonFormatPrint(actual) - default: - jsonFormatPrint(resp) - } -} - -func jsonFormatPrint(obj interface{}) { - print := func(o interface{}) { - j, err := json.MarshalIndent(o, "", " ") - checkErr("generating json output", err) - fmt.Printf("%s\n", j) - } - - switch r := obj.(type) { - case chan api.Pin: - for o := range r { - print(o) - } - case chan api.GlobalPinInfo: - for o := range r { - print(o) - } - case chan api.ID: - for o := range r { - print(o) - } - default: - print(obj) - } - -} - -func textFormatObject(resp interface{}) { - switch r := resp.(type) { - case nil: - return - case string: - fmt.Println(resp) - case api.ID: - textFormatPrintID(r) - case api.GlobalPinInfo: - textFormatPrintGPInfo(r) - case api.Pin: - textFormatPrintPin(r) - case api.AddedOutput: - textFormatPrintAddedOutput(r) - case addedOutputQuiet: - textFormatPrintAddedOutputQuiet(r) - case api.Version: - textFormatPrintVersion(r) - case api.Error: - textFormatPrintError(r) - case api.Metric: - textFormatPrintMetric(r) - case api.Alert: - textFormatPrintAlert(r) - case chan api.ID: - for item := range r { - textFormatObject(item) - } - case chan api.GlobalPinInfo: - for item := range r { - textFormatObject(item) - } - case chan api.Pin: - for item := range r { - textFormatObject(item) - } - case []api.AddedOutput: - for _, item := range r { - textFormatObject(item) - } - case []addedOutputQuiet: - for _, item := range r { - textFormatObject(item) - } - case []api.Metric: - for _, item := range r { - textFormatObject(item) - } - case api.GlobalRepoGC: - textFormatPrintGlobalRepoGC(r) - case []string: - for _, item := range r { - textFormatObject(item) - } - case []api.Alert: - for _, item := range r { - textFormatObject(item) - } - default: - checkErr("", errors.New("unsupported type returned"+reflect.TypeOf(r).String())) - } -} - -func textFormatPrintID(obj api.ID) { - if obj.Error != "" { - fmt.Printf("%s | ERROR: %s\n", obj.ID.Pretty(), obj.Error) - return - } - - fmt.Printf( - "%s | %s | Sees %d other peers\n", - obj.ID.Pretty(), - obj.Peername, - len(obj.ClusterPeers)-1, - ) - - addrs := make(sort.StringSlice, 0, len(obj.Addresses)) - for _, a := range obj.Addresses { - addrs = append(addrs, a.String()) - } - addrs.Sort() - fmt.Println(" > Addresses:") - for _, a := range addrs { - fmt.Printf(" - %s\n", a) - } - if obj.IPFS.Error != "" { - fmt.Printf(" > IPFS ERROR: %s\n", obj.IPFS.Error) - return - } - - ipfsAddrs := make(sort.StringSlice, 0, len(obj.Addresses)) - for _, a := range obj.IPFS.Addresses { - ipfsAddrs = append(ipfsAddrs, a.String()) - } - ipfsAddrs.Sort() - fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID.Pretty()) - for _, a := range ipfsAddrs { - fmt.Printf(" - %s\n", a) - } -} - -func textFormatPrintGPInfo(obj api.GlobalPinInfo) { - var b strings.Builder - - peers := make([]string, 0, len(obj.PeerMap)) - for k := range obj.PeerMap { - peers = append(peers, k) - } - sort.Strings(peers) - - fmt.Fprintf(&b, "%s", obj.Cid) - if obj.Name != "" { - fmt.Fprintf(&b, " | %s", obj.Name) - } - - b.WriteString(":\n") - - for _, k := range peers { - v := obj.PeerMap[k] - if len(v.PeerName) > 0 { - fmt.Fprintf(&b, " > %-20s : %s", v.PeerName, strings.ToUpper(v.Status.String())) - } else { - fmt.Fprintf(&b, " > %-20s : %s", k, strings.ToUpper(v.Status.String())) - } - if v.Error != "" { - fmt.Fprintf(&b, ": %s", v.Error) - } - txt, _ := v.TS.MarshalText() - fmt.Fprintf(&b, " | %s", txt) - fmt.Fprintf(&b, " | Attempts: %d", v.AttemptCount) - fmt.Fprintf(&b, " | Priority: %t", v.PriorityPin) - fmt.Fprintf(&b, "\n") - } - fmt.Print(b.String()) -} - -func textFormatPrintVersion(obj api.Version) { - fmt.Println(obj.Version) -} - -func textFormatPrintPin(obj api.Pin) { - t := strings.ToUpper(obj.Type.String()) - if obj.Mode == api.PinModeDirect { - t = t + "-DIRECT" - } - - fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, t) - - if obj.IsPinEverywhere() { - fmt.Printf("Repl. Factor: -1 | Allocations: [everywhere]") - } else { - sortAlloc := api.PeersToStrings(obj.Allocations) - sort.Strings(sortAlloc) - fmt.Printf("Repl. Factor: %d--%d | Allocations: %s", - obj.ReplicationFactorMin, obj.ReplicationFactorMax, - sortAlloc) - } - var recStr string - switch obj.MaxDepth { - case 0: - recStr = "Direct" - case -1: - recStr = "Recursive" - default: - recStr = fmt.Sprintf("Recursive-%d", obj.MaxDepth) - } - - fmt.Printf(" | %s", recStr) - - fmt.Printf(" | Metadata:") - if len(obj.Metadata) == 0 { - fmt.Printf(" no") - } else { - fmt.Printf(" yes") - } - expireAt := "∞" - if !obj.ExpireAt.IsZero() { - expireAt = obj.ExpireAt.Format("2006-01-02 15:04:05") - } - fmt.Printf(" | Exp: %s", expireAt) - - added := "unknown" - if !obj.Timestamp.IsZero() { - added = obj.Timestamp.Format("2006-01-02 15:04:05") - } - fmt.Printf(" | Added: %s\n", added) -} - -func textFormatPrintAddedOutput(obj api.AddedOutput) { - fmt.Printf("added %s %s\n", obj.Cid, obj.Name) -} - -func textFormatPrintAddedOutputQuiet(obj addedOutputQuiet) { - if obj.quiet { - fmt.Printf("%s\n", obj.AddedOutput.Cid) - } else { - textFormatPrintAddedOutput(obj.AddedOutput) - } -} - -func textFormatPrintMetric(obj api.Metric) { - v := obj.Value - if obj.Name == "freespace" && obj.Weight > 0 { - v = humanize.Bytes(uint64(obj.Weight)) - } - - fmt.Printf("%s | %s: %s | Expires in: %s\n", obj.Peer, obj.Name, v, humanize.Time(time.Unix(0, obj.Expire))) -} - -func textFormatPrintAlert(obj api.Alert) { - fmt.Printf("%s: %s. Expired at: %s. Triggered at: %s\n", - obj.Peer, - obj.Name, - humanize.Time(time.Unix(0, obj.Expire)), - humanize.Time(obj.TriggeredAt), - ) -} - -func textFormatPrintGlobalRepoGC(obj api.GlobalRepoGC) { - peers := make(sort.StringSlice, 0, len(obj.PeerMap)) - for peer := range obj.PeerMap { - peers = append(peers, peer) - } - peers.Sort() - - for _, peer := range peers { - item := obj.PeerMap[peer] - // If peer name is set, use it instead of peer ID. - if len(item.Peername) > 0 { - peer = item.Peername - } - if item.Error != "" { - fmt.Printf("%-15s | ERROR: %s\n", peer, item.Error) - } else { - fmt.Printf("%-15s\n", peer) - } - - fmt.Printf(" > CIDs:\n") - for _, key := range item.Keys { - if key.Error != "" { - // key.Key will be empty - fmt.Printf(" - ERROR: %s\n", key.Error) - continue - } - - fmt.Printf(" - %s\n", key.Key) - } - } -} - -func textFormatPrintError(obj api.Error) { - fmt.Printf("An error occurred:\n") - fmt.Printf(" Code: %d\n", obj.Code) - fmt.Printf(" Message: %s\n", obj.Message) -} - -func trackerStatusAllString() string { - var strs []string - for _, st := range api.TrackerStatusAll() { - strs = append(strs, " - "+st.String()) - } - - sort.Strings(strs) - return strings.Join(strs, "\n") -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/graph.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/graph.go deleted file mode 100644 index 417370f..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/graph.go +++ /dev/null @@ -1,265 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "io" - "sort" - - dot "github.com/kishansagathiya/go-dot" - peer "github.com/libp2p/go-libp2p/core/peer" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -/* - These functions are used to write an IPFS Cluster connectivity graph to a - graphviz-style dot file. Input an api.ConnectGraphSerial object, makeDot - does some preprocessing and then passes all 3 link maps to a - cluster-dotWriter which handles iterating over the link maps and writing - dot file node and edge statements to make a dot-file graph. Nodes are - labeled with the go-libp2p-peer shortened peer id. IPFS nodes are rendered - with turquoise boundaries, Cluster nodes with orange. Currently preprocessing - consists of moving IPFS swarm peers not connected to any cluster peer to - the IPFSLinks map in the event that the function was invoked with the - allIpfs flag. This allows all IPFS peers connected to the cluster to be - rendered as nodes in the final graph. -*/ - -// nodeType specifies the type of node being represented in the dot file: -// either IPFS or Cluster -type nodeType int - -const ( - tSelfCluster nodeType = iota // cluster self node - tCluster // cluster node - tTrustedCluster // trusted cluster node - tIPFS // IPFS node - tIPFSMissing // Missing IPFS node -) - -var errUnknownNodeType = errors.New("unsupported node type. Expected cluster or ipfs") - -func makeDot(cg api.ConnectGraph, w io.Writer, allIpfs bool) error { - ipfsEdges := make(map[string][]peer.ID) - for k, v := range cg.IPFSLinks { - ipfsEdges[k] = make([]peer.ID, 0) - for _, id := range v { - strPid := id.String() - if _, ok := cg.IPFSLinks[strPid]; ok || allIpfs { - ipfsEdges[k] = append(ipfsEdges[k], id) - } - if allIpfs { // include all swarm peers in the graph - if _, ok := ipfsEdges[strPid]; !ok { - // if id in IPFSLinks this will be overwritten - // if id not in IPFSLinks this will stay blank - ipfsEdges[strPid] = make([]peer.ID, 0) - } - } - } - } - - dW := dotWriter{ - w: w, - dotGraph: dot.NewGraph("cluster"), - self: cg.ClusterID.String(), - trustMap: cg.ClusterTrustLinks, - idToPeername: cg.IDtoPeername, - ipfsEdges: ipfsEdges, - clusterEdges: cg.ClusterLinks, - clusterIpfsEdges: cg.ClustertoIPFS, - clusterNodes: make(map[string]*dot.VertexDescription), - ipfsNodes: make(map[string]*dot.VertexDescription), - } - return dW.print() -} - -type dotWriter struct { - clusterNodes map[string]*dot.VertexDescription - ipfsNodes map[string]*dot.VertexDescription - - w io.Writer - dotGraph dot.Graph - - self string - idToPeername map[string]string - trustMap map[string]bool - ipfsEdges map[string][]peer.ID - clusterEdges map[string][]peer.ID - clusterIpfsEdges map[string]peer.ID -} - -func (dW *dotWriter) addSubGraph(sGraph dot.Graph, rank string) { - sGraph.IsSubGraph = true - sGraph.Rank = rank - dW.dotGraph.AddSubGraph(&sGraph) -} - -// writes nodes to dot file output and creates and stores an ordering over nodes -func (dW *dotWriter) addNode(graph *dot.Graph, id string, nT nodeType) error { - node := dot.NewVertexDescription("") - node.Group = id - node.ColorScheme = "x11" - node.FontName = "Arial" - node.Style = "filled" - node.FontColor = "black" - switch nT { - case tSelfCluster: - node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes)) - node.Shape = "box3d" - node.Label = label(dW.idToPeername[id], shorten(id)) - node.Color = "orange" - node.Peripheries = 2 - dW.clusterNodes[id] = &node - case tTrustedCluster: - node.ID = fmt.Sprintf("T%d", len(dW.clusterNodes)) - node.Shape = "box3d" - node.Label = label(dW.idToPeername[id], shorten(id)) - node.Color = "orange" - dW.clusterNodes[id] = &node - case tCluster: - node.Shape = "box3d" - node.Label = label(dW.idToPeername[id], shorten(id)) - node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes)) - node.Color = "darkorange3" - dW.clusterNodes[id] = &node - case tIPFS: - node.ID = fmt.Sprintf("I%d", len(dW.ipfsNodes)) - node.Shape = "cylinder" - node.Label = label("IPFS", shorten(id)) - node.Color = "turquoise3" - dW.ipfsNodes[id] = &node - case tIPFSMissing: - node.ID = fmt.Sprintf("I%d", len(dW.ipfsNodes)) - node.Shape = "cylinder" - node.Label = label("IPFS", "Errored") - node.Color = "firebrick1" - dW.ipfsNodes[id] = &node - default: - return errUnknownNodeType - } - - graph.AddVertex(&node) - return nil -} - -func shorten(id string) string { - return id[:2] + "*" + id[len(id)-6:] -} - -func label(peername, id string) string { - return fmt.Sprintf("< %s
%s >", peername, id) -} - -func (dW *dotWriter) print() error { - dW.dotGraph.AddComment("The nodes of the connectivity graph") - dW.dotGraph.AddComment("The cluster-service peers") - // Write cluster nodes, use sorted order for consistent labels - sGraphCluster := dot.NewGraph("") - sGraphCluster.IsSubGraph = true - sortedClusterEdges := sortedKeys(dW.clusterEdges) - for _, k := range sortedClusterEdges { - var err error - if k == dW.self { - err = dW.addNode(&sGraphCluster, k, tSelfCluster) - } else if dW.trustMap[k] { - err = dW.addNode(&sGraphCluster, k, tTrustedCluster) - } else { - err = dW.addNode(&sGraphCluster, k, tCluster) - } - if err != nil { - return err - } - } - dW.addSubGraph(sGraphCluster, "min") - dW.dotGraph.AddNewLine() - - dW.dotGraph.AddComment("The ipfs peers") - sGraphIPFS := dot.NewGraph("") - sGraphIPFS.IsSubGraph = true - // Write ipfs nodes, use sorted order for consistent labels - for _, k := range sortedKeys(dW.ipfsEdges) { - err := dW.addNode(&sGraphIPFS, k, tIPFS) - if err != nil { - return err - } - } - - for _, k := range sortedClusterEdges { - if _, ok := dW.clusterIpfsEdges[k]; !ok { - err := dW.addNode(&sGraphIPFS, k, tIPFSMissing) - if err != nil { - return err - } - } - } - - dW.addSubGraph(sGraphIPFS, "max") - dW.dotGraph.AddNewLine() - - dW.dotGraph.AddComment("Edges representing active connections in the cluster") - dW.dotGraph.AddComment("The connections among cluster-service peers") - // Write cluster edges - for _, k := range sortedClusterEdges { - v := dW.clusterEdges[k] - for _, id := range v { - toNode := dW.clusterNodes[k] - fromNode := dW.clusterNodes[id.String()] - dW.dotGraph.AddEdge(toNode, fromNode, true, "") - } - } - dW.dotGraph.AddNewLine() - - dW.dotGraph.AddComment("The connections between cluster peers and their ipfs daemons") - // Write cluster to ipfs edges - for _, k := range sortedClusterEdges { - var fromNode *dot.VertexDescription - toNode := dW.clusterNodes[k] - ipfsID, ok := dW.clusterIpfsEdges[k] - if !ok { - fromNode, ok2 := dW.ipfsNodes[k] - if !ok2 { - logger.Error("expected a node at this id") - continue - } - dW.dotGraph.AddEdge(toNode, fromNode, true, "dotted") - continue - } - - fromNode, ok = dW.ipfsNodes[ipfsID.String()] - if !ok { - logger.Error("expected a node at this id") - continue - } - dW.dotGraph.AddEdge(toNode, fromNode, true, "") - } - dW.dotGraph.AddNewLine() - - dW.dotGraph.AddComment("The swarm peer connections among ipfs daemons in the cluster") - // Write ipfs edges - for _, k := range sortedKeys(dW.ipfsEdges) { - v := dW.ipfsEdges[k] - toNode := dW.ipfsNodes[k] - for _, id := range v { - idStr := id.String() - fromNode, ok := dW.ipfsNodes[idStr] - if !ok { - logger.Error("expected a node here") - continue - } - dW.dotGraph.AddEdge(toNode, fromNode, true, "") - } - } - return dW.dotGraph.Write(dW.w) -} - -func sortedKeys(dict map[string][]peer.ID) []string { - keys := make([]string, len(dict)) - i := 0 - for k := range dict { - keys[i] = k - i++ - } - sort.Strings(keys) - return keys -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/graph_test.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/graph_test.go deleted file mode 100644 index b77ee56..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/graph_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "strings" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func verifyOutput(t *testing.T, outStr string, trueStr string) { - outLines := strings.Split(outStr, "\n") - trueLines := strings.Split(trueStr, "\n") - if len(outLines) != len(trueLines) { - fmt.Printf("expected:\n-%s-\n\n\nactual:\n-%s-", trueStr, outStr) - t.Fatal("Number of output lines does not match expectation") - } - for i := range outLines { - if outLines[i] != trueLines[i] { - t.Errorf("Difference in sorted outputs (%d): %s vs %s", i, outLines[i], trueLines[i]) - } - } -} - -var simpleIpfs = `digraph cluster { -/* The nodes of the connectivity graph */ -/* The cluster-service peers */ -subgraph { -rank="min" -C0 [label=<
Qm*eqhEhD > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" peripheries="2" ] -C1 [label=<
Qm*cgHDQJ > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ] -C2 [label=<
Qm*6MQmJu > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ] -} - -/* The ipfs peers */ -subgraph { -rank="max" -I0 [label=< IPFS
Qm*N5LSsq > group="QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I1 [label=< IPFS
Qm*R3DZDV > group="QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I2 [label=< IPFS
Qm*wbBsuL > group="QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -} - -/* Edges representing active connections in the cluster */ -/* The connections among cluster-service peers */ -C0 -> C1 -C0 -> C2 -C1 -> C0 -C1 -> C2 -C2 -> C0 -C2 -> C1 - -/* The connections between cluster peers and their ipfs daemons */ -C0 -> I1 -C1 -> I0 -C2 -> I2 - -/* The swarm peer connections among ipfs daemons in the cluster */ -I0 -> I1 -I0 -> I2 -I1 -> I0 -I1 -> I2 -I2 -> I1 -I2 -> I0 -}` - -var ( - pid1, _ = peer.Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD") - pid2, _ = peer.Decode("QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ") - pid3, _ = peer.Decode("QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu") - pid4, _ = peer.Decode("QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV") - pid5, _ = peer.Decode("QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq") - pid6, _ = peer.Decode("QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL") - - pid7, _ = peer.Decode("QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb") - pid8, _ = peer.Decode("QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8") - pid9, _ = peer.Decode("QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD") -) - -func TestSimpleIpfsGraphs(t *testing.T) { - cg := api.ConnectGraph{ - ClusterID: pid1, - ClusterLinks: map[string][]peer.ID{ - pid1.String(): { - pid2, - pid3, - }, - pid2.String(): { - pid1, - pid3, - }, - pid3.String(): { - pid1, - pid2, - }, - }, - IPFSLinks: map[string][]peer.ID{ - pid4.String(): { - pid5, - pid6, - }, - pid5.String(): { - pid4, - pid6, - }, - pid6.String(): { - pid4, - pid5, - }, - }, - ClustertoIPFS: map[string]peer.ID{ - pid1.String(): pid4, - pid2.String(): pid5, - pid3.String(): pid6, - }, - } - buf := new(bytes.Buffer) - err := makeDot(cg, buf, false) - if err != nil { - t.Fatal(err) - } - verifyOutput(t, buf.String(), simpleIpfs) -} - -var allIpfs = `digraph cluster { -/* The nodes of the connectivity graph */ -/* The cluster-service peers */ -subgraph { -rank="min" -C0 [label=<
Qm*eqhEhD > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" peripheries="2" ] -C1 [label=<
Qm*cgHDQJ > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ] -C2 [label=<
Qm*6MQmJu > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ] -} - -/* The ipfs peers */ -subgraph { -rank="max" -I0 [label=< IPFS
Qm*N5LSsq > group="QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I1 [label=< IPFS
Qm*S8xccb > group="QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I2 [label=< IPFS
Qm*aaanM8 > group="QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I3 [label=< IPFS
Qm*R3DZDV > group="QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I4 [label=< IPFS
Qm*wbBsuL > group="QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -I5 [label=< IPFS
Qm*tWZdeD > group="QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ] -} - -/* Edges representing active connections in the cluster */ -/* The connections among cluster-service peers */ -C0 -> C1 -C0 -> C2 -C1 -> C0 -C1 -> C2 -C2 -> C0 -C2 -> C1 - -/* The connections between cluster peers and their ipfs daemons */ -C0 -> I3 -C1 -> I0 -C2 -> I4 - -/* The swarm peer connections among ipfs daemons in the cluster */ -I0 -> I3 -I0 -> I4 -I0 -> I1 -I0 -> I2 -I0 -> I5 -I3 -> I0 -I3 -> I4 -I3 -> I1 -I3 -> I2 -I3 -> I5 -I4 -> I3 -I4 -> I0 -I4 -> I1 -I4 -> I2 -I4 -> I5 -}` - -func TestIpfsAllGraphs(t *testing.T) { - cg := api.ConnectGraph{ - ClusterID: pid1, - ClusterLinks: map[string][]peer.ID{ - pid1.String(): { - pid2, - pid3, - }, - pid2.String(): { - pid1, - pid3, - }, - pid3.String(): { - pid1, - pid2, - }, - }, - IPFSLinks: map[string][]peer.ID{ - pid4.String(): { - pid5, - pid6, - pid7, - pid8, - pid9, - }, - pid5.String(): { - pid4, - pid6, - pid7, - pid8, - pid9, - }, - pid6.String(): { - pid4, - pid5, - pid7, - pid8, - pid9, - }, - }, - ClustertoIPFS: map[string]peer.ID{ - pid1.String(): pid4, - pid2.String(): pid5, - pid3.String(): pid6, - }, - } - - buf := new(bytes.Buffer) - err := makeDot(cg, buf, true) - if err != nil { - t.Fatal(err) - } - verifyOutput(t, buf.String(), allIpfs) -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/main.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/main.go deleted file mode 100644 index 6b5f9fa..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-ctl/main.go +++ /dev/null @@ -1,1292 +0,0 @@ -// The ipfs-cluster-ctl application. -package main - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "io" - "os" - "strings" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/rest/client" - - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - - uuid "github.com/google/uuid" - cli "github.com/urfave/cli" -) - -const programName = `ipfs-cluster-ctl` - -// Version is the cluster-ctl tool version. It should match -// the IPFS cluster's version -const Version = "1.0.2" - -var ( - defaultHost = "/ip4/127.0.0.1/tcp/9094" - defaultTimeout = 0 - defaultWaitCheckFreq = time.Second - defaultAddParams = api.DefaultAddParams() -) - -var logger = logging.Logger("cluster-ctl") - -var globalClient client.Client - -// Description provides a short summary of the functionality of this tool -var Description = fmt.Sprintf(` -%s is a tool to manage IPFS Cluster nodes. - -Use "%s help" to list all available commands and -"%s help " to get usage information for a -specific one. - -%s uses the IPFS Cluster API to perform requests and -display responses in a user-readable format. The location of the IPFS -Cluster server is assumed to be %s, but can be configured -with the --host option. If several multiaddresses are specified -(comma-separated), requests will be sent to the first one and fail-over -over to the others. This also works for dns-based addresses which resolve -to multiple values. - -To use the secure libp2p-http API endpoint, use "--host" with -the full cluster libp2p listener address, including the "/p2p/" -part, or a /dnsaddr that resolves to it. Provide the cluster secret with ---secret as needed. - -For feedback, bug reports or any additional information, visit -https://github.com/ipfs-cluster/ipfs-cluster. -`, - programName, - programName, - programName, - programName, - defaultHost) - -var ( - waitFlagDesc = "Wait for the pin to reach the minimum replication factor before returning" - waitTimeoutFlagDesc = "How long to --wait (in seconds). Default: forever" -) - -// type peerAddBody struct { -// Addr string `json:"peer_multiaddress"` -// } - -func out(m string, a ...interface{}) { - fmt.Fprintf(os.Stderr, m, a...) -} - -func checkErr(doing string, err error) { - if err != nil { - out("error %s: %s\n", doing, err) - os.Exit(1) - } -} - -func main() { - ctx := context.Background() - - app := cli.NewApp() - app.Name = programName - app.Usage = "CLI for IPFS Cluster" - app.Description = Description - app.Version = Version - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "host, l", - Value: defaultHost, - Usage: `API endpoint multiaddresses (comma-separated)`, - }, - cli.StringFlag{ - Name: "secret", - Value: "", - Usage: "cluster secret (32 byte pnet-key) as needed. Only when using the LibP2P endpoint", - }, - cli.BoolFlag{ - Name: "https, s", - Usage: "use https to connect to the API", - }, - cli.BoolFlag{ - Name: "no-check-certificate", - Usage: "do not verify server TLS certificate. only valid with --https flag", - }, - cli.StringFlag{ - Name: "encoding, enc", - Value: "text", - Usage: "output format encoding [text, json]", - }, - cli.IntFlag{ - Name: "timeout, t", - Value: defaultTimeout, - Usage: "number of seconds to wait before timing out a request", - }, - cli.BoolFlag{ - Name: "debug, d", - Usage: "set debug log level", - }, - cli.StringFlag{ - Name: "basic-auth", - Usage: `[:] specify BasicAuth credentials for server that -requires authorization. implies --https, which you can disable with --force-http`, - EnvVar: "CLUSTER_CREDENTIALS", - }, - cli.BoolFlag{ - Name: "force-http, f", - Usage: "force HTTP. only valid when using BasicAuth", - }, - } - - app.Before = func(c *cli.Context) error { - cfg := &client.Config{} - - if c.Bool("debug") { - logging.SetLogLevel("cluster-ctl", "debug") - logging.SetLogLevel("apitypes", "debug") - cfg.LogLevel = "debug" - logger.Debug("debug level enabled") - } - - if hexSecret := c.String("secret"); hexSecret != "" { - secret, err := hex.DecodeString(hexSecret) - checkErr("parsing secret", err) - cfg.ProtectorKey = secret - } - - cfg.Timeout = time.Duration(c.Int("timeout")) * time.Second - - cfg.SSL = c.Bool("https") - cfg.NoVerifyCert = c.Bool("no-check-certificate") - user, pass := parseCredentials(c.String("basic-auth")) - cfg.Username = user - cfg.Password = pass - if user != "" && !cfg.SSL && !c.Bool("force-http") { - logger.Warn("SSL automatically enabled with basic auth credentials. Set \"force-http\" to disable") - cfg.SSL = true - } - - enc := c.String("encoding") - if enc != "text" && enc != "json" { - checkErr("", errors.New("unsupported encoding")) - } - - var configs []*client.Config - var err error - for _, addr := range strings.Split(c.String("host"), ",") { - multiaddr, err := ma.NewMultiaddr(addr) - checkErr("parsing host multiaddress", err) - - if client.IsPeerAddress(multiaddr) && c.Bool("https") { - logger.Warn("Using libp2p-http for %s. The https flag will be ignored for this connection", addr) - } - - var cfgs []*client.Config - - // We can auto round-robin on DNS records when using - // libp2p-http or not using SSL. When using SSL we - // cannot use the resolve-IPs directly. - if client.IsPeerAddress(multiaddr) || !cfg.SSL { - cfgs, err = cfg.AsTemplateForResolvedAddress(ctx, multiaddr) - } else { - cfgs = cfg.AsTemplateFor([]ma.Multiaddr{multiaddr}) - } - checkErr("creating configs", err) - configs = append(configs, cfgs...) - } - - retries := len(configs) - globalClient, err = client.NewLBClient(&client.Failover{}, configs, retries) - checkErr("creating API client", err) - - // TODO: need to figure out best way to configure tracing for ctl - // leaving the following as it is still useful for local debugging. - // tracingCfg := &observations.Config{} - // tracingCfg.Default() - // tracingCfg.EnableTracing = true - // tracingCfg.TracingServiceName = "cluster-ctl" - // tracingCfg.TracingSamplingProb = 1 - // tracer = observations.SetupTracing(tracingCfg) - return nil - } - app.After = func(c *cli.Context) error { - // TODO: need to figure out best way to configure tracing for ctl - // leaving the following as it is still useful for local debugging. - // tracer.Flush() - return nil - } - - app.Commands = []cli.Command{ - { - Name: "id", - Usage: "Retrieve peer information", - Description: ` -This command displays information about the peer that the tool is contacting -(usually running in localhost). -`, - Flags: []cli.Flag{}, - Action: func(c *cli.Context) error { - resp, cerr := globalClient.ID(ctx) - formatResponse(c, resp, cerr) - return nil - }, - }, - { - Name: "peers", - Usage: "List and manage IPFS Cluster peers", - Description: "List and manage IPFS Cluster peers", - Subcommands: []cli.Command{ - { - Name: "ls", - Usage: "list the nodes participating in the IPFS Cluster", - Description: ` -This command provides a list of the ID information of all the peers in the Cluster. -`, - Flags: []cli.Flag{}, - ArgsUsage: " ", - Action: func(c *cli.Context) error { - out := make(chan api.ID, 1024) - errCh := make(chan error, 1) - go func() { - defer close(errCh) - errCh <- globalClient.Peers(ctx, out) - }() - formatResponse(c, out, nil) - err := <-errCh - formatResponse(c, nil, err) - return nil - }, - }, - { - Name: "rm", - Usage: "remove a peer from the Cluster", - Description: ` -This command removes a peer from the cluster. If the peer is online, it will -automatically shut down. All other cluster peers should be online for the -operation to succeed, otherwise some nodes may be left with an outdated list of -cluster peers. -`, - ArgsUsage: "", - Flags: []cli.Flag{}, - Action: func(c *cli.Context) error { - pid := c.Args().First() - p, err := peer.Decode(pid) - checkErr("parsing peer ID", err) - cerr := globalClient.PeerRm(ctx, p) - formatResponse(c, nil, cerr) - return nil - }, - }, - }, - }, - { - Name: "add", - Usage: "Add a file or directory to ipfs and pin it in the cluster", - ArgsUsage: "", - Description: ` -Add allows to add and replicate content to several ipfs daemons, performing -a Cluster Pin operation on success. It takes elements from local paths as -well as from web URLs (accessed with a GET request). Providing several -arguments will automatically set --wrap-in-directory. - -Cluster "add" works, by default, just like "ipfs add" and has similar options -in terms of DAG layout, chunker, hash function etc. It also supports adding -CAR files directly (--format car), as long as they have a single root. When -adding CAR files, all the options related to dag-building are ignored. - -Added content will be allocated and sent block by block to the peers that -should pin it (among which may not necessarily be the local ipfs daemon). -Once all the blocks have arrived, they will be "cluster-pinned". This makes -cluster add slower than a local ipfs add, but the result is a fully replicated -on completion. If you prefer faster adding, use the --local flag to add -directly to the local IPFS node and pin in the destinations after that. -Note that the local IPFS node may not be among the destinations, which will -leave the unpinned content in it. - -Optional replication-min and replication-max factors can be provided: -1 means -"pin everywhere" and 0 means use cluster's default setting (i.e., replication -factor set in config). Positive values indicate how many peers should pin this -content. -`, - /* - Cluster Add supports handling huge files and sharding the resulting DAG among - several ipfs daemons (--shard). In this case, a single ipfs daemon will not - contain the full dag, but only parts of it (shards). Desired shard size can - be provided with the --shard-size flag. - - We recommend setting a --name for sharded pins. Otherwise, it will be - automatically generated. - */ - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "recursive, r", - Usage: "Add directory paths recursively", - }, - cli.BoolFlag{ - Name: "quiet, q", - Usage: "Write only hashes to output (one per line)", - }, - cli.BoolFlag{ - Name: "quieter, Q", - Usage: "Write only final hash to output", - }, - cli.BoolFlag{ - Name: "no-stream", - Usage: "Buffer output locally. Produces a valid JSON array with --enc=json.", - }, - cli.BoolFlag{ - Name: "local", - Usage: "Add to local peer but pin normally", - }, - cli.StringFlag{ - Name: "name, n", - Value: defaultAddParams.Name, - Usage: "Sets a name for this pin", - }, - cli.IntFlag{ - Name: "replication-min, rmin", - Value: defaultAddParams.ReplicationFactorMin, - Usage: "Sets the minimum replication factor for pinning this file", - }, - cli.IntFlag{ - Name: "replication-max, rmax", - Value: defaultAddParams.ReplicationFactorMax, - Usage: "Sets the maximum replication factor for pinning this file", - }, - cli.StringFlag{ - Name: "expire-in", - Usage: "Duration after which the pin should be unpinned automatically", - }, - cli.StringSliceFlag{ - Name: "metadata", - Usage: "Pin metadata: key=value. Can be added multiple times", - }, - cli.StringFlag{ - Name: "allocations, allocs", - Usage: "Optional comma-separated list of peer IDs", - }, - cli.BoolFlag{ - Name: "wait", - Usage: waitFlagDesc, - }, - cli.DurationFlag{ - Name: "wait-timeout, wt", - Value: 0, - Usage: waitTimeoutFlagDesc, - }, - - cli.BoolFlag{ - Name: "wrap-with-directory, w", - Usage: "Wrap a with a directory object", - }, - - cli.StringFlag{ - Name: "format", - Value: defaultAddParams.Format, - Usage: "'unixfs' (add as unixfs DAG), 'car' (import CAR file)", - }, - - cli.StringFlag{ - Name: "layout", - Value: defaultAddParams.Layout, - Usage: "Dag layout to use for dag generation: balanced or trickle", - }, - cli.BoolFlag{ - Name: "hidden, H", - Usage: "Include files that are hidden. Only takes effect on recursive add", - }, - cli.StringFlag{ - Name: "chunker, s", - Usage: "'size-' or 'rabin---'", - Value: defaultAddParams.Chunker, - }, - cli.BoolFlag{ - Name: "raw-leaves", - Usage: "Use raw blocks for leaves (experimental)", - }, - cli.IntFlag{ - Name: "cid-version", - Usage: "CID version. Non default implies raw-leaves", - Value: defaultAddParams.CidVersion, - }, - cli.StringFlag{ - Name: "hash", - Usage: "Hash function to use. Implies cid-version=1", - Value: defaultAddParams.HashFun, - }, - cli.BoolFlag{ - Name: "nocopy", - Usage: "Add the URL using filestore. Implies raw-leaves. (experimental)", - }, - - // TODO: Uncomment when sharding is supported. - // cli.BoolFlag{ - // Name: "shard", - // Usage: "Break the file into pieces (shards) and distributed among peers", - // }, - // cli.Uint64Flag{ - // Name: "shard-size", - // Value: defaultAddParams.ShardSize, - // Usage: "Sets the maximum replication factor for pinning this file", - // }, - // TODO: Figure progress over total bar. - // cli.BoolFlag{ - // Name: "progress, p", - // Usage: "Stream progress data", - // }, - - }, - Action: func(c *cli.Context) error { - shard := c.Bool("shard") - name := c.String("name") - if shard && name == "" { - randName, err := uuid.NewRandom() - if err != nil { - return err - } - // take only first letters - name = "sharded-" + strings.Split(randName.String(), "-")[0] - } - - // Read arguments (paths) - paths := make([]string, c.NArg()) - for i, path := range c.Args() { - paths[i] = path - } - - if len(paths) == 0 { - checkErr("", errors.New("need at least one path")) - } - - // Setup AddParams - p := api.DefaultAddParams() - p.ReplicationFactorMin = c.Int("replication-min") - p.ReplicationFactorMax = c.Int("replication-max") - if expireIn := c.String("expire-in"); expireIn != "" { - d, err := time.ParseDuration(expireIn) - checkErr("parsing expire-in", err) - p.ExpireAt = time.Now().Add(d) - } - - p.Metadata = parseMetadata(c.StringSlice("metadata")) - p.Name = name - if c.String("allocations") != "" { - p.UserAllocations = api.StringsToPeers(strings.Split(c.String("allocations"), ",")) - } - p.Format = c.String("format") - //p.Shard = shard - //p.ShardSize = c.Uint64("shard-size") - p.Shard = false - p.Recursive = c.Bool("recursive") - p.Local = c.Bool("local") - p.Layout = c.String("layout") - p.Chunker = c.String("chunker") - p.RawLeaves = c.Bool("raw-leaves") - p.Hidden = c.Bool("hidden") - p.Wrap = c.Bool("wrap-with-directory") || len(paths) > 1 - p.CidVersion = c.Int("cid-version") - p.HashFun = c.String("hash") - if p.HashFun != defaultAddParams.HashFun { - p.CidVersion = 1 - } - if p.CidVersion > 0 { - p.RawLeaves = true - } - p.NoCopy = c.Bool("nocopy") - if p.NoCopy { - p.RawLeaves = true - } - - // Prevent footgun - if p.Wrap && p.Format == "car" { - checkErr("", errors.New("only a single CAR file can be added and wrap-with-directory is not supported")) - } - - out := make(chan api.AddedOutput, 1) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - - var buffered []addedOutputQuiet - var lastBuf addedOutputQuiet - var qq = c.Bool("quieter") - var q = c.Bool("quiet") || qq - var bufferResults = c.Bool("no-stream") - for v := range out { - added := addedOutputQuiet{ - AddedOutput: v, - quiet: q, - } - lastBuf = added - if bufferResults { - buffered = append(buffered, added) - continue - } - if !qq { // print things - formatResponse(c, added, nil) - } - } - if !lastBuf.AddedOutput.Cid.Defined() { - return // no elements at all - } - if bufferResults { // we buffered. - if qq { // [last elem] - formatResponse(c, []addedOutputQuiet{lastBuf}, nil) - } else { // [all elems] - formatResponse(c, buffered, nil) - } - } else if qq { // we already printed unless Quieter - formatResponse(c, lastBuf, nil) - } - if c.Bool("wait") { - // In order to wait we need to get the allocation's replication factor. - // If it errors, we use whatever we set on the request. If we set 0 or -1, then - // no limit applies so we will wait for all. - rplMin := p.ReplicationFactorMin - alloc, err := globalClient.Allocation(ctx, lastBuf.AddedOutput.Cid) - if err == nil { - rplMin = alloc.ReplicationFactorMin - } - - _, werr := waitFor(lastBuf.AddedOutput.Cid, api.TrackerStatusPinned, c.Duration("wait-timeout"), rplMin) - checkErr("waiting for pin status", werr) - } - }() - - cerr := globalClient.Add(ctx, paths, p, out) - wg.Wait() - formatResponse(c, nil, cerr) - return cerr - }, - }, - { - Name: "pin", - Usage: "Pin and unpin and list items in IPFS Cluster", - Description: "Pin and unpin and list items in IPFS Cluster", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "Pin an item in the cluster", - Description: ` -This command tells IPFS Cluster to start managing a CID. Depending on -the pinning strategy, this will trigger IPFS pin requests. The CID will -become part of the Cluster's state and will tracked from this point. - -When the request has succeeded, the command returns the status of the CID -in the cluster and should be part of the list offered by "pin ls". - -An optional replication factor can be provided: -1 means "pin everywhere" -and 0 means use cluster's default setting (i.e., replication factor set in -config). Positive values indicate how many peers should pin this content. - -An optional allocations argument can be provided, allocations should be a -comma-separated list of peer IDs on which we want to pin. Peers in allocations -are prioritized over automatically-determined ones, but replication factors -would still be respected. -`, - ArgsUsage: "", - Flags: []cli.Flag{ - cli.IntFlag{ - Name: "replication, r", - Value: 0, - Usage: "Sets a custom replication factor (overrides -rmax and -rmin)", - }, - cli.IntFlag{ - Name: "replication-min, rmin", - Value: 0, - Usage: "Sets the minimum replication factor for this pin", - }, - cli.IntFlag{ - Name: "replication-max, rmax", - Value: 0, - Usage: "Sets the maximum replication factor for this pin", - }, - cli.StringFlag{ - Name: "allocations, allocs", - Usage: "Optional comma-separated list of peer IDs", - }, - cli.StringFlag{ - Name: "name, n", - Value: "", - Usage: "Sets a name for this pin", - }, - cli.StringFlag{ - Name: "mode", - Value: "recursive", - Usage: "Select a way to pin: recursive or direct", - }, - cli.StringFlag{ - Name: "expire-in", - Usage: "Duration after which pin should be unpinned automatically", - }, - cli.StringSliceFlag{ - Name: "metadata", - Usage: "Pin metadata: key=value. Can be added multiple times", - }, - cli.BoolFlag{ - Name: "no-status, ns", - Usage: "Prevents fetching pin status after pinning (faster, quieter)", - }, - cli.BoolFlag{ - Name: "wait, w", - Usage: waitFlagDesc, - }, - cli.DurationFlag{ - Name: "wait-timeout, wt", - Value: 0, - Usage: waitTimeoutFlagDesc, - }, - }, - Action: func(c *cli.Context) error { - arg := c.Args().First() - rpl := c.Int("replication") - rplMin := c.Int("replication-min") - rplMax := c.Int("replication-max") - if rpl != 0 { - rplMin = rpl - rplMax = rpl - } - - var userAllocs []peer.ID - if c.String("allocations") != "" { - allocs := strings.Split(c.String("allocations"), ",") - for i := range allocs { - allocs[i] = strings.TrimSpace(allocs[i]) - } - userAllocs = api.StringsToPeers(allocs) - if len(userAllocs) != len(allocs) { - checkErr("decoding allocations", errors.New("some peer IDs could not be decoded")) - } - } - var expireAt time.Time - if expireIn := c.String("expire-in"); expireIn != "" { - d, err := time.ParseDuration(expireIn) - checkErr("parsing expire-in", err) - expireAt = time.Now().Add(d) - } - - opts := api.PinOptions{ - ReplicationFactorMin: rplMin, - ReplicationFactorMax: rplMax, - Name: c.String("name"), - Mode: api.PinModeFromString(c.String("mode")), - UserAllocations: userAllocs, - ExpireAt: expireAt, - Metadata: parseMetadata(c.StringSlice("metadata")), - } - - pin, cerr := globalClient.PinPath(ctx, arg, opts) - if cerr != nil { - formatResponse(c, nil, cerr) - return nil - } - handlePinResponseFormatFlags( - ctx, - c, - pin, - api.TrackerStatusPinned, - ) - return nil - }, - }, - { - Name: "rm", - Usage: "Unpin an item from the cluster", - Description: ` -This command tells IPFS Cluster to no longer manage a CID. This will -trigger unpinning operations in all the IPFS nodes holding the content. - -When the request has succeeded, the command returns the status of the CID -in the cluster. The CID should disappear from the list offered by "pin ls", -although unpinning operations in the cluster may take longer or fail. -`, - ArgsUsage: "", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "no-status, ns", - Usage: "Prevents fetching pin status after unpinning (faster, quieter)", - }, - cli.BoolFlag{ - Name: "wait, w", - Usage: waitFlagDesc, - }, - cli.DurationFlag{ - Name: "wait-timeout, wt", - Value: 0, - Usage: waitTimeoutFlagDesc, - }, - }, - Action: func(c *cli.Context) error { - arg := c.Args().First() - pin, cerr := globalClient.UnpinPath(ctx, arg) - if cerr != nil { - formatResponse(c, nil, cerr) - return nil - } - handlePinResponseFormatFlags( - ctx, - c, - pin, - api.TrackerStatusUnpinned, - ) - return nil - }, - }, - { - Name: "update", - Usage: "Pin a new item based on an existing one", - Description: ` -This command will add a new pin to the cluster taking all the options from an -existing one, including name. This means that the new pin will bypass the -allocation process and will be allocated to the same peers as the existing -one. - -The cluster peers will try to Pin the new item on IPFS using the "pin update" -command. This is especially efficient when the content of two pins (their DAGs) -are similar. - -Unlike the "pin update" command in the ipfs daemon, this will not unpin the -existing item from the cluster. Please run "pin rm" for that. -`, - ArgsUsage: " ", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "name, n", - Value: "", - Usage: "Sets a name for this updated pin", - }, - cli.StringFlag{ - Name: "expire-in", - Usage: "Duration after which the pin should be unpinned automatically after updating", - }, - cli.BoolFlag{ - Name: "no-status, ns", - Usage: "Prevents fetching pin status after updating (faster, quieter)", - }, - cli.BoolFlag{ - Name: "wait, w", - Usage: waitFlagDesc, - }, - cli.DurationFlag{ - Name: "wait-timeout, wt", - Value: 0, - Usage: waitTimeoutFlagDesc, - }, - }, - Action: func(c *cli.Context) error { - from := c.Args().Get(0) - to := c.Args().Get(1) - - fromCid, err := api.DecodeCid(from) - checkErr("parsing from Cid", err) - - var expireAt time.Time - if expireIn := c.String("expire-in"); expireIn != "" { - d, err := time.ParseDuration(expireIn) - checkErr("parsing expire-in", err) - expireAt = time.Now().Add(d) - } - - opts := api.PinOptions{ - PinUpdate: fromCid, - Name: c.String("name"), - ExpireAt: expireAt, - } - - pin, cerr := globalClient.PinPath(ctx, to, opts) - if cerr != nil { - formatResponse(c, nil, cerr) - return nil - } - handlePinResponseFormatFlags( - ctx, - c, - pin, - api.TrackerStatusPinned, - ) - return nil - }, - }, - { - Name: "ls", - Usage: "List items in the cluster pinset", - Description: ` -This command will list the CIDs which are tracked by IPFS Cluster and to -which peers they are currently allocated. This list does not include -any monitoring information about the IPFS status of the CIDs, it -merely represents the list of pins which are part of the shared state of -the cluster. For IPFS-status information about the pins, use "status". - -The filter only takes effect when listing all pins. The possible values are: - - all (default) - - pin (normal pins, recursive or direct) - - meta-pin (sharded pins) - - clusterdag-pin (sharding-dag root pins) - - shard-pin (individual shard pins) -`, - ArgsUsage: "[CID]", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "filter", - Usage: "Comma separated list of pin types. See help above.", - Value: "all", - }, - }, - Action: func(c *cli.Context) error { - cidStr := c.Args().First() - if cidStr != "" { - ci, err := api.DecodeCid(cidStr) - checkErr("parsing cid", err) - resp, cerr := globalClient.Allocation(ctx, ci) - formatResponse(c, resp, cerr) - } else { - var filter api.PinType - strFilter := strings.Split(c.String("filter"), ",") - for _, f := range strFilter { - filter |= api.PinTypeFromString(f) - } - - allocs := make(chan api.Pin, 1024) - errCh := make(chan error, 1) - go func() { - defer close(errCh) - errCh <- globalClient.Allocations(ctx, filter, allocs) - }() - formatResponse(c, allocs, nil) - err := <-errCh - formatResponse(c, nil, err) - } - return nil - }, - }, - }, - }, - { - Name: "status", - Usage: "Retrieve the status of tracked items", - Description: ` -This command retrieves the status of the CIDs tracked by IPFS -Cluster, including which member is pinning them and any errors. -If one of several CIDs are provided, the status will be only fetched -for a single item. Metadata CIDs are included in the status response. - -When the --local flag is passed, it will only fetch the status from the -contacted cluster peer. By default, status will be fetched from all peers. - -When the --filter flag is passed, it will only fetch the peer information -where status of the pin matches at least one of the filter values (a comma -separated list). The following are valid status values: - -` + trackerStatusAllString(), - ArgsUsage: "[CID1] [CID2]...", - Flags: []cli.Flag{ - localFlag(), - cli.StringFlag{ - Name: "filter", - Usage: "comma-separated list of filters", - }, - }, - Action: func(c *cli.Context) error { - cidsStr := c.Args() - cids := make([]api.Cid, len(cidsStr)) - for i, cStr := range cidsStr { - ci, err := api.DecodeCid(cStr) - checkErr("parsing cid", err) - cids[i] = ci - } - out := make(chan api.GlobalPinInfo, 1024) - chErr := make(chan error, 1) - go func() { - defer close(chErr) - - if len(cids) == 1 { - resp, cerr := globalClient.Status(ctx, cids[0], c.Bool("local")) - out <- resp - chErr <- cerr - close(out) - } else if len(cids) > 1 { - chErr <- globalClient.StatusCids(ctx, cids, c.Bool("local"), out) - } else { - filterFlag := c.String("filter") - filter := api.TrackerStatusFromString(c.String("filter")) - if filter == api.TrackerStatusUndefined && filterFlag != "" { - checkErr("parsing filter flag", errors.New("invalid filter name")) - } - chErr <- globalClient.StatusAll(ctx, filter, c.Bool("local"), out) - } - }() - - formatResponse(c, out, nil) - err := <-chErr - formatResponse(c, nil, err) - return nil - }, - }, - { - Name: "recover", - Usage: "Recover tracked items in error state", - Description: ` -This command asks Cluster peers to re-track or re-forget CIDs in -error state, usually because the IPFS pin or unpin operation has failed. - -The command will wait for any operations to succeed and will return the status -of the item upon completion. Note that, when running on the full sets of tracked -CIDs (without argument), it may take a considerably long time. - -When the --local flag is passed, it will only trigger recover -operations on the contacted peer (as opposed to on every peer). -`, - ArgsUsage: "[CID]", - Flags: []cli.Flag{ - localFlag(), - }, - Action: func(c *cli.Context) error { - cidStr := c.Args().First() - if cidStr != "" { - ci, err := api.DecodeCid(cidStr) - checkErr("parsing cid", err) - resp, cerr := globalClient.Recover(ctx, ci, c.Bool("local")) - formatResponse(c, resp, cerr) - } else { - out := make(chan api.GlobalPinInfo, 1024) - errCh := make(chan error, 1) - go func() { - defer close(errCh) - errCh <- globalClient.RecoverAll(ctx, c.Bool("local"), out) - }() - formatResponse(c, out, nil) - err := <-errCh - formatResponse(c, nil, err) - } - return nil - }, - }, - - { - Name: "version", - Usage: "Retrieve cluster version", - Description: ` -This command retrieves the IPFS Cluster version and can be used -to check that it matches the CLI version (shown by -v). -`, - ArgsUsage: " ", - Flags: []cli.Flag{}, - Action: func(c *cli.Context) error { - resp, cerr := globalClient.Version(ctx) - formatResponse(c, resp, cerr) - return nil - }, - }, - { - Name: "health", - Usage: "Cluster monitoring information", - Description: "Cluster monitoring information", - Subcommands: []cli.Command{ - { - Name: "graph", - Usage: "create a graph displaying connectivity of cluster peers", - Description: ` -This command queries all connected cluster peers and their ipfs peers to generate a -graph of the connections. Output is a dot file encoding the cluster's connection state. -`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "file, f", - Value: "", - Usage: "sets an output dot-file for the connectivity graph", - }, - cli.BoolFlag{ - Name: "all-ipfs-peers", - Usage: "causes the graph to mark nodes for ipfs peers not directly in the cluster", - }, - }, - Action: func(c *cli.Context) error { - resp, cerr := globalClient.GetConnectGraph(ctx) - if cerr != nil { - formatResponse(c, resp, cerr) - return nil - } - var w io.WriteCloser - var err error - outputPath := c.String("file") - if outputPath == "" { - w = os.Stdout - } else { - w, err = os.Create(outputPath) - checkErr("creating output file", err) - } - defer w.Close() - err = makeDot(resp, w, c.Bool("all-ipfs-peers")) - checkErr("printing graph", err) - - return nil - }, - }, - { - Name: "metrics", - Usage: "List latest metrics logged by this peer", - Description: ` -This commands displays the latest valid metrics of the given type logged -by this peer for all current cluster peers. - -If no argument is provided, the command retrieves all currently existing metric types. - -Currently supported metrics depend on the informer component used, -but usually are: - -- freespace -- ping -`, - ArgsUsage: "", - Action: func(c *cli.Context) error { - metric := c.Args().First() - if metric == "" { - resp, cerr := globalClient.MetricNames(ctx) - formatResponse(c, resp, cerr) - return nil - } - - resp, cerr := globalClient.Metrics(ctx, metric) - formatResponse(c, resp, cerr) - return nil - }, - }, - { - Name: "alerts", - Usage: "List the latest expired metric alerts", - Description: ` -This command provides a list of "alerts" that the cluster has seen. - -An alert is triggered when one of the metrics seen for a peer expires, and no -new metrics have been received. - -Different alerts may be handled in different ways. i.e. ping alerts may -trigger automatic repinnings if configured. -`, - Action: func(c *cli.Context) error { - resp, cerr := globalClient.Alerts(ctx) - formatResponse(c, resp, cerr) - return nil - }, - }, - }, - }, - { - Name: "ipfs", - Usage: "Manage IPFS daemon", - Description: "Manage IPFS daemon", - Subcommands: []cli.Command{ - { - Name: "gc", - Usage: "run garbage collection on IPFS repos of cluster peers", - Description: ` -This command will instruct current Cluster peers to run "repo gc" on their -respective IPFS daemons. - -When --local flag is passed, it will garbage collect only on the local IPFS -deamon, otherwise on all IPFS daemons. -`, - Flags: []cli.Flag{ - localFlag(), - }, - Action: func(c *cli.Context) error { - resp, cerr := globalClient.RepoGC(ctx, c.Bool("local")) - formatResponse(c, resp, cerr) - return nil - }, - }, - }, - }, - { - Name: "commands", - Usage: "List all commands", - ArgsUsage: " ", - Hidden: true, - Action: func(c *cli.Context) error { - walkCommands(c.App.Commands, "ipfs-cluster-ctl") - return nil - }, - }, - } - - err := app.Run(os.Args) - if err != nil { - os.Exit(1) - } -} - -func localFlag() cli.BoolFlag { - return cli.BoolFlag{ - Name: "local", - Usage: "run operation only on the contacted peer", - } -} - -func walkCommands(cmds []cli.Command, parentHelpName string) { - for _, c := range cmds { - h := c.HelpName - // Sometimes HelpName is empty - if h == "" { - h = fmt.Sprintf("%s %s", parentHelpName, c.FullName()) - } - fmt.Println(h) - walkCommands(c.Subcommands, h) - } -} - -func formatResponse(c *cli.Context, resp interface{}, err error) { - enc := c.GlobalString("encoding") - if resp == nil && err == nil { - return - } - - if err != nil { - cerr, ok := err.(api.Error) - if !ok { - checkErr("", err) - } - switch enc { - case "text": - textFormatPrintError(cerr) - case "json": - jsonFormatPrint(cerr) - default: - checkErr("", errors.New("unsupported encoding selected")) - } - if cerr.Code == 0 { - os.Exit(1) // problem with the call - } else { - os.Exit(2) // call went fine, response has an error - } - } - - switch enc { - case "text": - textFormatObject(resp) - case "json": - jsonFormatObject(resp) - default: - checkErr("", errors.New("unsupported encoding selected")) - } -} - -func parseCredentials(userInput string) (string, string) { - credentials := strings.SplitN(userInput, ":", 2) - switch len(credentials) { - case 1: - // only username passed in (with no trailing `:`), return empty password - return credentials[0], "" - case 2: - return credentials[0], credentials[1] - default: - err := fmt.Errorf("invalid [:] input") - checkErr("parsing credentials", err) - return "", "" - } -} - -func handlePinResponseFormatFlags( - ctx context.Context, - c *cli.Context, - pin api.Pin, - target api.TrackerStatus, -) { - - var status api.GlobalPinInfo - var cerr error - - if c.Bool("wait") { - limit := 0 - if target == api.TrackerStatusPinned { - limit = pin.ReplicationFactorMin - } - status, cerr = waitFor(pin.Cid, target, c.Duration("wait-timeout"), limit) - checkErr("waiting for pin status", cerr) - } - - if c.Bool("no-status") { - formatResponse(c, pin, nil) - return - } - - if !status.Defined() { // no status from "wait" - time.Sleep(time.Second) - status, cerr = globalClient.Status(ctx, pin.Cid, false) - } - formatResponse(c, status, cerr) -} - -func waitFor( - ci api.Cid, - target api.TrackerStatus, - timeout time.Duration, - limit int, -) (api.GlobalPinInfo, error) { - - ctx := context.Background() - - if timeout > defaultWaitCheckFreq { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - fp := client.StatusFilterParams{ - Cid: ci, - Local: false, - Target: target, - CheckFreq: defaultWaitCheckFreq, - Limit: limit, - } - - return client.WaitFor(ctx, globalClient, fp) -} - -func parseMetadata(metadata []string) map[string]string { - metadataMap := make(map[string]string) - for _, str := range metadata { - parts := strings.SplitN(str, "=", 2) - if len(parts) != 2 { - checkErr("parsing metadata", errors.New("metadata were not in the format key=value")) - } - metadataMap[parts[0]] = parts[1] - } - - return metadataMap -} - -// func setupTracing(config tracingConfig) { -// if !config.Enable { -// return -// } - -// agentEndpointURI := "0.0.0.0:6831" -// collectorEndpointURI := "http://0.0.0.0:14268" - -// if config.JaegerAgentEndpoint != "" { -// agentEndpointURI = config.JaegerAgentEndpoint -// } -// if config.JaegerCollectorEndpoint != "" { -// collectorEndpointURI = config.JaegerCollectorEndpoint -// } - -// je, err := jaeger.NewExporter(jaeger.Options{ -// AgentEndpoint: agentEndpointURI, -// CollectorEndpoint: collectorEndpointURI, -// ServiceName: "ipfs-cluster-ctl", -// }) -// if err != nil { -// log.Fatalf("Failed to create the Jaeger exporter: %v", err) -// } -// // Register/enable the trace exporter -// trace.RegisterExporter(je) - -// // For demo purposes, set the trace sampling probability to be high -// trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(1.0)}) -// } diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/Makefile b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/Makefile deleted file mode 100644 index d54dac9..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# go source files -SRC := $(shell find ../.. -type f -name '*.go') -GOPATH := $(shell go env GOPATH) -GOFLAGS := "-trimpath" - -all: ipfs-cluster-follow - -ipfs-cluster-follow: $(SRC) - go build $(GOFLAGS) -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)" - -build: ipfs-cluster-follow - -install: - go install $(GOFLAGS) -ldflags "-X main.commit=$(shell git rev-parse HEAD)" - -clean: - rm -f ipfs-cluster-follow - -.PHONY: clean install build diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/commands.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/commands.go deleted file mode 100644 index ee07ee7..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/commands.go +++ /dev/null @@ -1,555 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "path/filepath" - "strings" - "time" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - "github.com/ipfs-cluster/ipfs-cluster/allocator/balanced" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/rest" - "github.com/ipfs-cluster/ipfs-cluster/cmdutils" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/consensus/crdt" - "github.com/ipfs-cluster/ipfs-cluster/datastore/badger" - "github.com/ipfs-cluster/ipfs-cluster/datastore/leveldb" - "github.com/ipfs-cluster/ipfs-cluster/informer/disk" - "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp" - "github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - cli "github.com/urfave/cli/v2" -) - -func printFirstStart() { - fmt.Printf(` -No clusters configured yet! - -If this is the first time you are running %s, -be sure to check out the usage documentation. Here are some -examples to get you going: - -$ %s --help - general description and usage help -$ %s --help - Help and subcommands for the 's follower peer -$ %s info --help - Help for the "info" subcommand (same for others). -`, programName, programName, programName, programName) -} - -func printNotInitialized(clusterName string) { - fmt.Printf(` -This cluster peer has not been initialized. - -Try running "%s %s init " first. -`, programName, clusterName) -} - -func setLogLevels(lvl string) { - for f := range ipfscluster.LoggingFacilities { - ipfscluster.SetFacilityLogLevel(f, lvl) - } - - for f := range ipfscluster.LoggingFacilitiesExtra { - ipfscluster.SetFacilityLogLevel(f, lvl) - } -} - -// returns whether the config folder exists -func isInitialized(absPath string) bool { - _, err := os.Stat(absPath) - return err == nil -} - -func listClustersCmd(c *cli.Context) error { - absPath, _, _ := buildPaths(c, "") - f, err := os.Open(absPath) - if os.IsNotExist(err) { - printFirstStart() - return nil - } - if err != nil { - return cli.Exit(err, 1) - } - - dirs, err := f.Readdir(-1) - if err != nil { - return cli.Exit(errors.Wrapf(err, "reading %s", absPath), 1) - } - - var filteredDirs []string - for _, d := range dirs { - if d.IsDir() { - configPath := filepath.Join(absPath, d.Name(), DefaultConfigFile) - if _, err := os.Stat(configPath); err == nil { - filteredDirs = append(filteredDirs, d.Name()) - } - } - } - - if len(filteredDirs) == 0 { - printFirstStart() - return nil - } - - fmt.Printf("Configurations found for %d follower peers. For info and help, try running:\n\n", len(filteredDirs)) - for _, d := range filteredDirs { - fmt.Printf("%s \"%s\"\n", programName, d) - } - fmt.Printf("\nTip: \"%s --help\" for help and examples.\n", programName) - - return nil -} - -func infoCmd(c *cli.Context) error { - clusterName := c.String("clusterName") - - // Avoid pollution of the screen - setLogLevels("critical") - - absPath, configPath, identityPath := buildPaths(c, clusterName) - - if !isInitialized(absPath) { - printNotInitialized(clusterName) - return cli.Exit("", 1) - } - - cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath) - var url string - if err != nil { - if config.IsErrFetchingSource(err) { - url = fmt.Sprintf( - "failed retrieving configuration source (%s)", - cfgHelper.Manager().Source, - ) - ipfsCfg := ipfshttp.Config{} - ipfsCfg.Default() - cfgHelper.Configs().Ipfshttp = &ipfsCfg - } else { - return cli.Exit(errors.Wrapf(err, "reading the configurations in %s", absPath), 1) - } - } else { - url = fmt.Sprintf("Available (%s)", cfgHelper.Manager().Source) - } - cfgHelper.Manager().Shutdown() - - fmt.Printf("Information about follower peer for Cluster \"%s\":\n\n", clusterName) - fmt.Printf("Config folder: %s\n", absPath) - fmt.Printf("Config source URL: %s\n", url) - - ctx := context.Background() - client, err := getClient(absPath, clusterName) - if err != nil { - return cli.Exit(errors.Wrap(err, "error creating client"), 1) - } - _, err = client.Version(ctx) - fmt.Printf("Cluster Peer online: %t\n", err == nil) - - // Either we loaded a valid config, or we are using a default. Worth - // applying env vars in the second case. - if err := cfgHelper.Configs().Ipfshttp.ApplyEnvVars(); err != nil { - return cli.Exit(errors.Wrap(err, "applying environment variables to ipfshttp config"), 1) - } - - cfgHelper.Configs().Ipfshttp.ConnectSwarmsDelay = 0 - connector, err := ipfshttp.NewConnector(cfgHelper.Configs().Ipfshttp) - if err == nil { - _, err = connector.ID(ctx) - } - fmt.Printf("IPFS peer online: %t\n", err == nil) - - if c.Command.Name == "" { - fmt.Printf("Additional help:\n\n") - fmt.Printf("-------------------------------------------------\n\n") - return cli.ShowAppHelp(c) - } - return nil -} - -func initCmd(c *cli.Context) error { - if !c.Args().Present() { - return cli.Exit("configuration URL not provided", 1) - } - cfgURL := c.Args().First() - - return initCluster(c, false, cfgURL) -} - -func initCluster(c *cli.Context, ignoreReinit bool, cfgURL string) error { - clusterName := c.String(clusterNameFlag) - - absPath, configPath, identityPath := buildPaths(c, clusterName) - - if isInitialized(absPath) { - if ignoreReinit { - fmt.Println("Configuration for this cluster already exists. Skipping initialization.") - fmt.Printf("If you wish to re-initialize, simply delete %s\n\n", absPath) - return nil - } - cmdutils.ErrorOut("Configuration for this cluster already exists.\n") - cmdutils.ErrorOut("Please delete %s if you wish to re-initialize.", absPath) - return cli.Exit("", 1) - } - - gw := c.String("gateway") - - if !strings.HasPrefix(cfgURL, "http://") && !strings.HasPrefix(cfgURL, "https://") { - fmt.Printf("%s will be assumed to be an DNSLink-powered address: /ipns/%s.\n", cfgURL, cfgURL) - fmt.Printf("It will be resolved using the local IPFS daemon's gateway (%s).\n", gw) - fmt.Println("If this is not the case, specify the full url starting with http:// or https://.") - fmt.Println("(You can override the gateway URL by setting IPFS_GATEWAY)") - fmt.Println() - cfgURL = fmt.Sprintf("http://%s/ipns/%s", gw, cfgURL) - } - - // Setting the datastore here is useless, as we initialize with remote - // config and we will have an empty service.json with the source only. - // That source will decide which datastore is actually used. - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt", "") - cfgHelper.Manager().Shutdown() - cfgHelper.Manager().Source = cfgURL - err := cfgHelper.Manager().Default() - if err != nil { - return cli.Exit(errors.Wrap(err, "error generating default config"), 1) - } - - ident := cfgHelper.Identity() - err = ident.Default() - if err != nil { - return cli.Exit(errors.Wrap(err, "error generating identity"), 1) - } - - err = ident.ApplyEnvVars() - if err != nil { - return cli.Exit(errors.Wrap(err, "error applying environment variables to the identity"), 1) - } - - err = cfgHelper.SaveIdentityToDisk() - if err != nil { - return cli.Exit(errors.Wrapf(err, "error saving %s", identityPath), 1) - } - fmt.Printf("Identity written to %s.\n", identityPath) - - err = cfgHelper.SaveConfigToDisk() - if err != nil { - return cli.Exit(errors.Wrapf(err, "saving %s", configPath), 1) - } - - fmt.Printf("Configuration written to %s.\n", configPath) - fmt.Printf("Cluster \"%s\" follower peer initialized.\n\n", clusterName) - fmt.Printf( - "You can now use \"%s %s run\" to start a follower peer for this cluster.\n", - programName, - clusterName, - ) - fmt.Println("(Remember to start your IPFS daemon before)") - return nil -} - -func runCmd(c *cli.Context) error { - clusterName := c.String(clusterNameFlag) - - if cfgURL := c.String("init"); cfgURL != "" { - err := initCluster(c, true, cfgURL) - if err != nil { - return err - } - } - - absPath, configPath, identityPath := buildPaths(c, clusterName) - - if !isInitialized(absPath) { - printNotInitialized(clusterName) - return cli.Exit("", 1) - } - - fmt.Printf("Starting the IPFS Cluster follower peer for \"%s\".\nCTRL-C to stop it.\n", clusterName) - fmt.Println("Checking if IPFS is online (will wait for 2 minutes)...") - ctxIpfs, cancelIpfs := context.WithTimeout(context.Background(), 2*time.Minute) - defer cancelIpfs() - err := cmdutils.WaitForIPFS(ctxIpfs) - if err != nil { - return cli.Exit("timed out waiting for IPFS to be available", 1) - } - - setLogLevels(logLevel) // set to "info" by default. - // Avoid API logs polluting the screen everytime we - // run some "list" command. - ipfscluster.SetFacilityLogLevel("restapilog", "error") - - cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath) - if err != nil { - return cli.Exit(errors.Wrapf(err, "reading the configurations in %s", absPath), 1) - } - cfgHelper.Manager().Shutdown() - cfgs := cfgHelper.Configs() - - stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.GetDatastore(), cfgHelper.Identity(), cfgs) - if err != nil { - return cli.Exit(errors.Wrap(err, "creating state manager"), 1) - } - - store, err := stmgr.GetStore() - if err != nil { - return cli.Exit(errors.Wrap(err, "creating datastore"), 1) - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster, store) - if err != nil { - return cli.Exit(errors.Wrap(err, "error creating libp2p components"), 1) - } - - // Always run followers in follower mode. - cfgs.Cluster.FollowerMode = true - // Do not let trusted peers GC this peer - // Defaults to Trusted otherwise. - cfgs.Cluster.RPCPolicy["Cluster.RepoGCLocal"] = ipfscluster.RPCClosed - - // Discard API configurations and create our own - apiCfg := rest.NewConfig() - cfgs.Restapi = apiCfg - _ = apiCfg.Default() - listenSocket, err := socketAddress(absPath, clusterName) - if err != nil { - return cli.Exit(err, 1) - } - apiCfg.HTTPListenAddr = []multiaddr.Multiaddr{listenSocket} - // Allow customization via env vars - err = apiCfg.ApplyEnvVars() - if err != nil { - return cli.Exit(errors.Wrap(err, "error applying environmental variables to restapi configuration"), 1) - } - - rest, err := rest.NewAPI(ctx, apiCfg) - if err != nil { - return cli.Exit(errors.Wrap(err, "creating REST API component"), 1) - } - - connector, err := ipfshttp.NewConnector(cfgs.Ipfshttp) - if err != nil { - return cli.Exit(errors.Wrap(err, "creating IPFS Connector component"), 1) - } - - informer, err := disk.NewInformer(cfgs.DiskInf) - if err != nil { - return cli.Exit(errors.Wrap(err, "creating disk informer"), 1) - } - alloc, err := balanced.New(cfgs.BalancedAlloc) - if err != nil { - return cli.Exit(errors.Wrap(err, "creating metrics allocator"), 1) - } - - crdtcons, err := crdt.New( - host, - dht, - pubsub, - cfgs.Crdt, - store, - ) - if err != nil { - store.Close() - return cli.Exit(errors.Wrap(err, "creating CRDT component"), 1) - } - - tracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername, crdtcons.State) - - mon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, nil) - if err != nil { - store.Close() - return cli.Exit(errors.Wrap(err, "setting up PeerMonitor"), 1) - } - - // Hardcode disabled tracing and metrics to avoid mistakenly - // exposing any user data. - tracerCfg := observations.TracingConfig{} - _ = tracerCfg.Default() - tracerCfg.EnableTracing = false - cfgs.Tracing = &tracerCfg - tracer, err := observations.SetupTracing(&tracerCfg) - if err != nil { - return cli.Exit(errors.Wrap(err, "error setting up tracer"), 1) - } - - // This does nothing since we are not calling SetupMetrics anyways - // But stays just to be explicit. - metricsCfg := observations.MetricsConfig{} - _ = metricsCfg.Default() - metricsCfg.EnableStats = false - cfgs.Metrics = &metricsCfg - - // We are going to run a cluster peer and should do an - // oderly shutdown if we are interrupted: cancel default - // signal handling and leave things to HandleSignals. - signal.Stop(signalChan) - close(signalChan) - - cluster, err := ipfscluster.NewCluster( - ctx, - host, - dht, - cfgs.Cluster, - store, - crdtcons, - []ipfscluster.API{rest}, - connector, - tracker, - mon, - alloc, - []ipfscluster.Informer{informer}, - tracer, - ) - if err != nil { - store.Close() - return cli.Exit(errors.Wrap(err, "error creating cluster peer"), 1) - } - - return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht, store) -} - -// List -func listCmd(c *cli.Context) error { - clusterName := c.String("clusterName") - - absPath, configPath, identityPath := buildPaths(c, clusterName) - if !isInitialized(absPath) { - printNotInitialized(clusterName) - return cli.Exit("", 1) - } - - err := printStatusOnline(absPath, clusterName) - if err == nil { - return nil - } - - // There was an error. Try offline status - apiErr, ok := err.(*api.Error) - if ok && apiErr.Code != 0 { - return cli.Exit( - errors.Wrapf( - err, - "The Peer API seems to be running but returned with code %d", - apiErr.Code, - ), 1) - } - - // We are on offline mode so we cannot rely on IPFS being - // running and most probably our configuration is remote and - // to be loaded from IPFS. Thus we need to find a different - // way to decide whether to load badger/leveldb, and once we - // know, do it with the default settings. - hasLevelDB := false - lDBCfg := &leveldb.Config{} - lDBCfg.SetBaseDir(absPath) - lDBCfg.Default() - levelDBInfo, err := os.Stat(lDBCfg.GetFolder()) - if err == nil && levelDBInfo.IsDir() { - hasLevelDB = true - } - - hasBadger := false - badgerCfg := &badger.Config{} - badgerCfg.SetBaseDir(absPath) - badgerCfg.Default() - badgerInfo, err := os.Stat(badgerCfg.GetFolder()) - if err == nil && badgerInfo.IsDir() { - hasBadger = true - } - - if hasLevelDB && hasBadger { - return cli.Exit(errors.Wrapf(err, "found both leveldb (%s) and badger (%s) folders: cannot determine which to use in offline mode", lDBCfg.GetFolder(), badgerCfg.GetFolder()), 1) - } - - // Since things were initialized, assume there is one at least. - dstoreType := "leveldb" - if hasBadger { - dstoreType = "badger" - } - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt", dstoreType) - cfgHelper.Manager().Shutdown() // not needed - cfgHelper.Configs().Badger.SetBaseDir(absPath) - cfgHelper.Configs().LevelDB.SetBaseDir(absPath) - cfgHelper.Manager().Default() // we have a default crdt config with either leveldb or badger registered. - cfgHelper.Manager().ApplyEnvVars() - - err = printStatusOffline(cfgHelper) - if err != nil { - return cli.Exit(errors.Wrap(err, "error obtaining the pinset"), 1) - } - - return nil -} - -func printStatusOnline(absPath, clusterName string) error { - ctx := context.Background() - client, err := getClient(absPath, clusterName) - if err != nil { - return cli.Exit(errors.Wrap(err, "error creating client"), 1) - } - - out := make(chan api.GlobalPinInfo, 1024) - errCh := make(chan error, 1) - - go func() { - defer close(errCh) - errCh <- client.StatusAll(ctx, 0, true, out) - }() - - var pid string - for gpi := range out { - if pid == "" { // do this once - // PeerMap will only have one key - for k := range gpi.PeerMap { - pid = k - break - } - } - pinInfo := gpi.PeerMap[pid] - printPin(gpi.Cid, pinInfo.Status.String(), gpi.Name, pinInfo.Error) - } - err = <-errCh - return err -} - -func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error { - mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper) - if err != nil { - return err - } - store, err := mgr.GetStore() - if err != nil { - return err - } - defer store.Close() - st, err := mgr.GetOfflineState(store) - if err != nil { - return err - } - - out := make(chan api.Pin, 1024) - errCh := make(chan error, 1) - go func() { - defer close(errCh) - errCh <- st.List(context.Background(), out) - }() - - for pin := range out { - printPin(pin.Cid, "offline", pin.Name, "") - } - - err = <-errCh - return err -} - -func printPin(c api.Cid, status, name, err string) { - if err != "" { - name = name + " (" + err + ")" - } - fmt.Printf("%-20s %s %s\n", status, c, name) -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE deleted file mode 100644 index 0020f2a..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Dual-licensed under MIT and ASLv2, by way of the [Permissive License -Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). - -Apache-2.0: https://www.apache.org/licenses/license-2.0 -MIT: https://www.opensource.org/licenses/mit diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE-APACHE b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE-MIT b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/README.md b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/README.md deleted file mode 100644 index f7a6e54..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/dist/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# `ipfs-cluster-follow` - -> A tool to run IPFS Cluster follower peers - -`ipfs-cluster-follow` allows to setup and run IPFS Cluster follower peers. - -Follower peers can join collaborative clusters to track content in the -cluster. Follower peers do not have permissions to modify the cluster pinset -or access endpoints from other follower peers. - -`ipfs-cluster-follow` allows to run several peers at the same time (each -joining a different cluster) and it is intended to be a very easy to use -application with a minimal feature set. In order to run a fully-featured peer -(follower or not), use `ipfs-cluster-service`. - -### Usage - -The `ipfs-cluster-follow` command is always followed by the cluster name -that we wish to work with. Full usage information can be obtained by running: - -``` -$ ipfs-cluster-follow --help -$ ipfs-cluster-follow --help -$ ipfs-cluster-follow --help -$ ipfs-cluster-follow info --help -$ ipfs-cluster-follow init --help -$ ipfs-cluster-follow run --help -$ ipfs-cluster-follow list --help -``` - -For more information, please check the [Documentation](https://ipfscluster.io/documentation), in particular the [`ipfs-cluster-follow` section](https://ipfscluster.io/documentation/ipfs-cluster-follow). - - diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/main.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/main.go deleted file mode 100644 index d182104..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-follow/main.go +++ /dev/null @@ -1,331 +0,0 @@ -// The ipfs-cluster-follow application. -package main - -import ( - "fmt" - "os" - "os/signal" - "os/user" - "path/filepath" - "syscall" - - "github.com/ipfs-cluster/ipfs-cluster/api/rest/client" - "github.com/ipfs-cluster/ipfs-cluster/cmdutils" - "github.com/ipfs-cluster/ipfs-cluster/version" - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - - semver "github.com/blang/semver" - cli "github.com/urfave/cli/v2" -) - -const ( - // ProgramName of this application - programName = "ipfs-cluster-follow" - clusterNameFlag = "clusterName" - logLevel = "info" -) - -// Default location for the configurations and data -var ( - // DefaultFolder is the name of the cluster folder - DefaultFolder = ".ipfs-cluster-follow" - // DefaultPath is set on init() to $HOME/DefaultFolder - // and holds all the ipfs-cluster data - DefaultPath string - // The name of the configuration file inside DefaultPath - DefaultConfigFile = "service.json" - // The name of the identity file inside DefaultPath - DefaultIdentityFile = "identity.json" - DefaultGateway = "127.0.0.1:8080" -) - -var ( - commit string - configPath string - identityPath string - signalChan = make(chan os.Signal, 20) -) - -// Description provides a short summary of the functionality of this tool -var Description = fmt.Sprintf(` -%s helps running IPFS Cluster follower peers. - -Follower peers subscribe to a Cluster controlled by a set of "trusted -peers". They collaborate in pinning items as dictated by the trusted peers and -do not have the power to make Cluster-wide modifications to the pinset. - -Follower peers cannot access information nor trigger actions in other peers. - -%s can be used to follow different clusters by launching it -with different options. Each Cluster has an identity, a configuration -and a datastore associated to it, which are kept under -"~/%s/". - -For feedback, bug reports or any additional information, visit -https://github.com/ipfs-cluster/ipfs-cluster. - - -EXAMPLES: - -List configured follower peers: - -$ %s - -Display information for a follower peer: - -$ %s info - -Initialize a follower peer: - -$ %s init - -Launch a follower peer (will stay running): - -$ %s run - -List items in the pinset for a given cluster: - -$ %s list - -Getting help and usage info: - -$ %s --help -$ %s --help -$ %s info --help -$ %s init --help -$ %s run --help -$ %s list --help - -`, - programName, - programName, - DefaultFolder, - programName, - programName, - programName, - programName, - programName, - programName, - programName, - programName, - programName, - programName, - programName, -) - -func init() { - // Set build information. - if build, err := semver.NewBuildVersion(commit); err == nil { - version.Version.Build = []string{"git" + build} - } - - // We try guessing user's home from the HOME variable. This - // allows HOME hacks for things like Snapcraft builds. HOME - // should be set in all UNIX by the OS. Alternatively, we fall back to - // usr.HomeDir (which should work on Windows etc.). - home := os.Getenv("HOME") - if home == "" { - usr, err := user.Current() - if err != nil { - panic(fmt.Sprintf("cannot get current user: %s", err)) - } - home = usr.HomeDir - } - - DefaultPath = filepath.Join(home, DefaultFolder) - - // This will abort the program on signal. We close the signal channel - // when launching the peer so that we can do an orderly shutdown in - // that case though. - go func() { - signal.Notify( - signalChan, - syscall.SIGINT, - syscall.SIGTERM, - syscall.SIGHUP, - ) - _, ok := <-signalChan // channel closed. - if !ok { - return - } - os.Exit(1) - }() -} - -func main() { - app := cli.NewApp() - app.Name = programName - app.Usage = "IPFS Cluster Follower" - app.UsageText = fmt.Sprintf("%s [global options] [subcommand]...", programName) - app.Description = Description - //app.Copyright = "© Protocol Labs, Inc." - app.Version = version.Version.String() - app.Flags = []cli.Flag{ - &cli.StringFlag{ - Name: "config, c", - Value: DefaultPath, - Usage: "path to the followers configuration and data `FOLDER`", - EnvVars: []string{"IPFS_CLUSTER_PATH"}, - }, - } - - app.Action = func(c *cli.Context) error { - if !c.Args().Present() { - return listClustersCmd(c) - } - - clusterName := c.Args().Get(0) - clusterApp := cli.NewApp() - clusterApp.Name = fmt.Sprintf("%s %s", programName, clusterName) - clusterApp.HelpName = clusterApp.Name - clusterApp.Usage = fmt.Sprintf("Follower peer management for \"%s\"", clusterName) - clusterApp.UsageText = fmt.Sprintf("%s %s [subcommand]", programName, clusterName) - clusterApp.Action = infoCmd - clusterApp.HideVersion = true - clusterApp.Flags = []cli.Flag{ - &cli.StringFlag{ // pass clusterName to subcommands - Name: clusterNameFlag, - Value: clusterName, - Hidden: true, - }, - } - clusterApp.Commands = []*cli.Command{ - { - Name: "info", - Usage: "displays information for this peer", - ArgsUsage: "", - Description: fmt.Sprintf(` -This command display useful information for "%s"'s follower peer. -`, clusterName), - Action: infoCmd, - }, - { - Name: "init", - Usage: "initializes the follower peer", - ArgsUsage: "", - Description: fmt.Sprintf(` -This command initializes a follower peer for the cluster named "%s". You -will need to pass the peer configuration URL. The command will generate a new -peer identity and leave things ready to run "%s %s run". - -An error will be returned if a configuration folder for a cluster peer with -this name already exists. If you wish to re-initialize from scratch, delete -this folder first. -`, clusterName, programName, clusterName), - Action: initCmd, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "gateway", - Value: DefaultGateway, - Usage: "gateway URL", - EnvVars: []string{"IPFS_GATEWAY"}, - Hidden: true, - }, - }, - }, - { - Name: "run", - Usage: "runs the follower peer", - ArgsUsage: "", - Description: fmt.Sprintf(` - -This commands runs a "%s" cluster follower peer. The peer should have already -been initialized with "init" alternatively the --init flag needs to be -passed. - -Before running, ensure that you have connectivity and that the IPFS daemon is -running. - -You can obtain more information about this follower peer by running -"%s %s" (without any arguments). - -The peer will stay running in the foreground until manually stopped. -`, clusterName, programName, clusterName), - Action: runCmd, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "init", - Usage: "initialize cluster peer with the given URL before running", - }, - &cli.StringFlag{ - Name: "gateway", - Value: DefaultGateway, - Usage: "gateway URL", - EnvVars: []string{"IPFS_GATEWAY"}, - Hidden: true, - }, - }, - }, - { - Name: "list", - Usage: "list items in the peers' pinset", - ArgsUsage: "", - Description: ` - -This commands lists all the items pinned by this follower cluster peer on IPFS. - -If the peer is currently running, it will display status information for each -pin (such as PINNING). If not, it will just display the current list of pins -as obtained from the internal state on disk. -`, - Action: listCmd, - }, - } - return clusterApp.RunAsSubcommand(c) - } - - app.Run(os.Args) -} - -// build paths returns the path to the configuration folder, -// the identity.json and the service.json files. -func buildPaths(c *cli.Context, clusterName string) (string, string, string) { - absPath, err := filepath.Abs(c.String("config")) - if err != nil { - cmdutils.ErrorOut("error getting absolute path for %s: %s", err, clusterName) - os.Exit(1) - } - - // ~/.ipfs-cluster-follow/clusterName - absPath = filepath.Join(absPath, clusterName) - // ~/.ipfs-cluster-follow/clusterName/service.json - configPath = filepath.Join(absPath, DefaultConfigFile) - // ~/.ipfs-cluster-follow/clusterName/indentity.json - identityPath = filepath.Join(absPath, DefaultIdentityFile) - - return absPath, configPath, identityPath -} - -func socketAddress(absPath, clusterName string) (multiaddr.Multiaddr, error) { - socket := fmt.Sprintf("/unix/%s", filepath.Join(absPath, "api-socket")) - ma, err := multiaddr.NewMultiaddr(socket) - if err != nil { - return nil, errors.Wrapf(err, "error parsing socket: %s", socket) - } - return ma, nil -} - -// returns an REST API client. Points to the socket address unless -// CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS is set, in which case it uses it. -func getClient(absPath, clusterName string) (client.Client, error) { - var endp multiaddr.Multiaddr - var err error - if endpStr := os.Getenv("CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS"); endpStr != "" { - endp, err = multiaddr.NewMultiaddr(endpStr) - if err != nil { - return nil, errors.Wrapf(err, "error parsing the value of CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS: %s", endpStr) - } - } else { - endp, err = socketAddress(absPath, clusterName) - } - - if err != nil { - return nil, err - } - - cfg := client.Config{ - APIAddr: endp, - } - return client.NewDefaultClient(&cfg) -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/Makefile b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/Makefile deleted file mode 100644 index a803a0d..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# go source files -SRC := $(shell find ../.. -type f -name '*.go') -GOPATH := $(shell go env GOPATH) -GOFLAGS := "-trimpath" - -all: ipfs-cluster-service - -ipfs-cluster-service: $(SRC) - go build $(GOFLAGS) -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)" - -build: ipfs-cluster-service - -install: - go install $(GOFLAGS) -ldflags "-X main.commit=$(shell git rev-parse HEAD)" - -clean: - rm -f ipfs-cluster-service - -.PHONY: clean install build diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/daemon.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/daemon.go deleted file mode 100644 index e351e81..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/daemon.go +++ /dev/null @@ -1,309 +0,0 @@ -package main - -import ( - "context" - "strings" - "time" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - "github.com/ipfs-cluster/ipfs-cluster/allocator/balanced" - "github.com/ipfs-cluster/ipfs-cluster/api/ipfsproxy" - "github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi" - "github.com/ipfs-cluster/ipfs-cluster/api/rest" - "github.com/ipfs-cluster/ipfs-cluster/cmdutils" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/consensus/crdt" - "github.com/ipfs-cluster/ipfs-cluster/consensus/raft" - "github.com/ipfs-cluster/ipfs-cluster/informer/disk" - "github.com/ipfs-cluster/ipfs-cluster/informer/pinqueue" - "github.com/ipfs-cluster/ipfs-cluster/informer/tags" - "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp" - "github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" - "go.opencensus.io/tag" - - ds "github.com/ipfs/go-datastore" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - dual "github.com/libp2p/go-libp2p-kad-dht/dual" - pubsub "github.com/libp2p/go-libp2p-pubsub" - - ma "github.com/multiformats/go-multiaddr" - - errors "github.com/pkg/errors" - cli "github.com/urfave/cli" -) - -func parseBootstraps(flagVal []string) (bootstraps []ma.Multiaddr) { - for _, a := range flagVal { - bAddr, err := ma.NewMultiaddr(strings.TrimSpace(a)) - checkErr("error parsing bootstrap multiaddress (%s)", err, a) - bootstraps = append(bootstraps, bAddr) - } - return -} - -// Runs the cluster peer -func daemon(c *cli.Context) error { - logger.Info("Initializing. For verbose output run with \"-l debug\". Please wait...") - - ctx, cancel := context.WithCancel(context.Background()) - var bootstraps []ma.Multiaddr - if bootStr := c.String("bootstrap"); bootStr != "" { - bootstraps = parseBootstraps(strings.Split(bootStr, ",")) - } - - // Execution lock - locker.lock() - defer locker.tryUnlock() - - // Load all the configurations and identity - cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath) - checkErr("loading configurations", err) - defer cfgHelper.Manager().Shutdown() - - cfgs := cfgHelper.Configs() - - if c.Bool("stats") { - cfgs.Metrics.EnableStats = true - } - cfgHelper.SetupTracing(c.Bool("tracing")) - - // Setup bootstrapping - raftStaging := false - switch cfgHelper.GetConsensus() { - case cfgs.Raft.ConfigKey(): - if len(bootstraps) > 0 { - // Cleanup state if bootstrapping - raft.CleanupRaft(cfgs.Raft) - raftStaging = true - } - case cfgs.Crdt.ConfigKey(): - if !c.Bool("no-trust") { - crdtCfg := cfgs.Crdt - crdtCfg.TrustedPeers = append(crdtCfg.TrustedPeers, ipfscluster.PeersFromMultiaddrs(bootstraps)...) - } - } - - if c.Bool("leave") { - cfgs.Cluster.LeaveOnShutdown = true - } - - store := setupDatastore(cfgHelper) - - host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster, store) - checkErr("creating libp2p host", err) - - cluster, err := createCluster(ctx, c, cfgHelper, host, pubsub, dht, store, raftStaging) - checkErr("starting cluster", err) - - // noop if no bootstraps - // if bootstrapping fails, consensus will never be ready - // and timeout. So this can happen in background and we - // avoid worrying about error handling here (since Cluster - // will realize). - go bootstrap(ctx, cluster, bootstraps) - - return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht, store) -} - -// createCluster creates all the necessary things to produce the cluster -// object and returns it along the datastore so the lifecycle can be handled -// (the datastore needs to be Closed after shutting down the Cluster). -func createCluster( - ctx context.Context, - c *cli.Context, - cfgHelper *cmdutils.ConfigHelper, - host host.Host, - pubsub *pubsub.PubSub, - dht *dual.DHT, - store ds.Datastore, - raftStaging bool, -) (*ipfscluster.Cluster, error) { - - cfgs := cfgHelper.Configs() - cfgMgr := cfgHelper.Manager() - cfgBytes, err := cfgMgr.ToDisplayJSON() - checkErr("getting configuration string", err) - logger.Debugf("Configuration:\n%s\n", cfgBytes) - - ctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty())) - checkErr("tag context with host id", err) - - err = observations.SetupMetrics(cfgs.Metrics) - checkErr("setting up Metrics", err) - - tracer, err := observations.SetupTracing(cfgs.Tracing) - checkErr("setting up Tracing", err) - - var apis []ipfscluster.API - if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Restapi.ConfigKey()) { - var api *rest.API - // Do NOT enable default Libp2p API endpoint on CRDT - // clusters. Collaborative clusters are likely to share the - // secret with untrusted peers, thus the API would be open for - // anyone. - if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() { - api, err = rest.NewAPIWithHost(ctx, cfgs.Restapi, host) - } else { - api, err = rest.NewAPI(ctx, cfgs.Restapi) - } - checkErr("creating REST API component", err) - apis = append(apis, api) - - } - - if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Pinsvcapi.ConfigKey()) { - pinsvcapi, err := pinsvcapi.NewAPI(ctx, cfgs.Pinsvcapi) - checkErr("creating Pinning Service API component", err) - - apis = append(apis, pinsvcapi) - } - - if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Ipfsproxy.ConfigKey()) { - proxy, err := ipfsproxy.New(cfgs.Ipfsproxy) - checkErr("creating IPFS Proxy component", err) - - apis = append(apis, proxy) - } - - connector, err := ipfshttp.NewConnector(cfgs.Ipfshttp) - checkErr("creating IPFS Connector component", err) - - var informers []ipfscluster.Informer - if cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.DiskInf.ConfigKey()) { - diskInf, err := disk.NewInformer(cfgs.DiskInf) - checkErr("creating disk informer", err) - informers = append(informers, diskInf) - } - if cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.TagsInf.ConfigKey()) { - tagsInf, err := tags.New(cfgs.TagsInf) - checkErr("creating numpin informer", err) - informers = append(informers, tagsInf) - } - - if cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.PinQueueInf.ConfigKey()) { - pinQueueInf, err := pinqueue.New(cfgs.PinQueueInf) - checkErr("creating pinqueue informer", err) - informers = append(informers, pinQueueInf) - } - - // For legacy compatibility we need to make the allocator - // automatically compatible with informers that have been loaded. For - // simplicity we assume that anyone that does not specify an allocator - // configuration (legacy configs), will be using "freespace" - if !cfgMgr.IsLoadedFromJSON(config.Allocator, cfgs.BalancedAlloc.ConfigKey()) { - cfgs.BalancedAlloc.AllocateBy = []string{"freespace"} - } - alloc, err := balanced.New(cfgs.BalancedAlloc) - checkErr("creating allocator", err) - - ipfscluster.ReadyTimeout = cfgs.Raft.WaitForLeaderTimeout + 5*time.Second - - cons, err := setupConsensus( - cfgHelper, - host, - dht, - pubsub, - store, - raftStaging, - ) - if err != nil { - store.Close() - checkErr("setting up Consensus", err) - } - - var peersF func(context.Context) ([]peer.ID, error) - if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() { - peersF = cons.Peers - } - - tracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername, cons.State) - logger.Debug("stateless pintracker loaded") - - mon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, peersF) - if err != nil { - store.Close() - checkErr("setting up PeerMonitor", err) - } - - return ipfscluster.NewCluster( - ctx, - host, - dht, - cfgs.Cluster, - store, - cons, - apis, - connector, - tracker, - mon, - alloc, - informers, - tracer, - ) -} - -// bootstrap will bootstrap this peer to one of the bootstrap addresses -// if there are any. -func bootstrap(ctx context.Context, cluster *ipfscluster.Cluster, bootstraps []ma.Multiaddr) { - for _, bstrap := range bootstraps { - logger.Infof("Bootstrapping to %s", bstrap) - err := cluster.Join(ctx, bstrap) - if err != nil { - logger.Errorf("bootstrap to %s failed: %s", bstrap, err) - } - } -} - -func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore { - dsName := cfgHelper.GetDatastore() - stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), dsName, cfgHelper.Identity(), cfgHelper.Configs()) - checkErr("creating state manager", err) - store, err := stmgr.GetStore() - checkErr("creating datastore", err) - if dsName != "" { - logger.Infof("Datastore backend: %s", dsName) - } - return store -} - -func setupConsensus( - cfgHelper *cmdutils.ConfigHelper, - h host.Host, - dht *dual.DHT, - pubsub *pubsub.PubSub, - store ds.Datastore, - raftStaging bool, -) (ipfscluster.Consensus, error) { - - cfgs := cfgHelper.Configs() - switch cfgHelper.GetConsensus() { - case cfgs.Raft.ConfigKey(): - rft, err := raft.NewConsensus( - h, - cfgHelper.Configs().Raft, - store, - raftStaging, - ) - if err != nil { - return nil, errors.Wrap(err, "creating Raft component") - } - return rft, nil - case cfgs.Crdt.ConfigKey(): - convrdt, err := crdt.New( - h, - dht, - pubsub, - cfgHelper.Configs().Crdt, - store, - ) - if err != nil { - return nil, errors.Wrap(err, "creating CRDT component") - } - return convrdt, nil - default: - return nil, errors.New("unknown consensus component") - } -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE deleted file mode 100644 index 0020f2a..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE +++ /dev/null @@ -1,5 +0,0 @@ -Dual-licensed under MIT and ASLv2, by way of the [Permissive License -Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/). - -Apache-2.0: https://www.apache.org/licenses/license-2.0 -MIT: https://www.opensource.org/licenses/mit diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE-APACHE b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE-APACHE deleted file mode 100644 index 22608cf..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE-APACHE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE-MIT b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE-MIT deleted file mode 100644 index c6134ad..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2020. Protocol Labs, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/README.md b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/README.md deleted file mode 100644 index af380c1..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/dist/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# `ipfs-cluster-service` - -> The IPFS cluster peer daemon - -`ipfs-cluster-service` runs a full IPFS Cluster peer. - -### Usage - -Usage information can be obtained with: - -``` -$ ipfs-cluster-service --help -``` - -For more information, please check the [Documentation](https://ipfscluster.io/documentation), in particular the [`ipfs-cluster-service` section](https://ipfscluster.io/documentation/ipfs-cluster-service). diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/export.json b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/export.json deleted file mode 100644 index 04b5ba4..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/export.json +++ /dev/null @@ -1 +0,0 @@ -{"replication_factor_min":-1,"replication_factor_max":-1,"name":"","mode":"direct","shard_size":0,"user_allocations":null,"expire_at":"0001-01-01T00:00:00Z","metadata":null,"pin_update":null,"cid":{"/":"QmUaFyXjZUNaUwYF8rBtbJc7fEJ46aJXvgV8z2HHs6jvmJ"},"type":2,"allocations":[],"max_depth":0,"reference":null} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/lock.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/lock.go deleted file mode 100644 index c488354..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/lock.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "io" - "path" - - fslock "github.com/ipfs/go-fs-lock" - "github.com/ipfs-cluster/ipfs-cluster/cmdutils" -) - -// lock logic heavily inspired by go-ipfs/repo/fsrepo/lock/lock.go - -// The name of the file used for locking -const lockFileName = "cluster.lock" - -var locker *lock - -// lock helps to coordinate proceeds via a lock file -type lock struct { - lockCloser io.Closer - path string -} - -func (l *lock) lock() { - if l.lockCloser != nil { - checkErr("", errors.New("cannot acquire lock twice")) - } - - // we should have a config folder whenever we try to lock - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "", "") - cfgHelper.MakeConfigFolder() - - // set the lock file within this function - logger.Debug("checking lock") - lk, err := fslock.Lock(l.path, lockFileName) - if err != nil { - logger.Debug(err) - l.lockCloser = nil - errStr := "%s. If no other " - errStr += "%s process is running, remove %s, or make sure " - errStr += "that the config folder is writable for the user " - errStr += "running %s." - errStr = fmt.Sprintf( - errStr, - err, - programName, - path.Join(l.path, lockFileName), - programName, - ) - checkErr("obtaining execution lock", errors.New(errStr)) - } - logger.Debugf("%s execution lock acquired", programName) - l.lockCloser = lk -} - -func (l *lock) tryUnlock() error { - // Noop in the uninitialized case - if l.lockCloser == nil { - logger.Debug("locking not initialized, unlock is noop") - return nil - } - err := l.lockCloser.Close() - if err != nil { - return err - } - logger.Debug("successfully released execution lock") - l.lockCloser = nil - return nil -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/main.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/main.go deleted file mode 100644 index ce65911..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/main.go +++ /dev/null @@ -1,749 +0,0 @@ -// The ipfs-cluster-service application. -package main - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "os" - "os/user" - "path/filepath" - "strings" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/cmdutils" - "github.com/ipfs-cluster/ipfs-cluster/pstoremgr" - "github.com/ipfs-cluster/ipfs-cluster/version" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - - semver "github.com/blang/semver" - logging "github.com/ipfs/go-log/v2" - cli "github.com/urfave/cli" -) - -// ProgramName of this application -const programName = "ipfs-cluster-service" - -// flag defaults -const ( - defaultLogLevel = "info" - defaultConsensus = "crdt" - defaultDatastore = "badger" -) - -const ( - stateCleanupPrompt = "The peer state will be removed. Existing pins may be lost." - configurationOverwritePrompt = "The configuration file will be overwritten." -) - -// We store a commit id here -var commit string - -// Description provides a short summary of the functionality of this tool -var Description = fmt.Sprintf(` -%s runs an IPFS Cluster peer. - -A peer participates in the cluster consensus, follows a distributed log -of pinning and unpinning requests and manages pinning operations to a -configured IPFS daemon. - -This peer also provides an API for cluster management, an IPFS Proxy API which -forwards requests to IPFS and a number of components for internal communication -using LibP2P. This is a simplified view of the components: - - +------------------+ - | ipfs-cluster-ctl | - +---------+--------+ - | - | HTTP(s) -ipfs-cluster-service | HTTP -+----------+--------+--v--+----------------------+ +-------------+ -| RPC | Peer 1 | API | IPFS Connector/Proxy +------> IPFS daemon | -+----^-----+--------+-----+----------------------+ +-------------+ - | libp2p - | -+----v-----+--------+-----+----------------------+ +-------------+ -| RPC | Peer 2 | API | IPFS Connector/Proxy +------> IPFS daemon | -+----^-----+--------+-----+----------------------+ +-------------+ - | - | -+----v-----+--------+-----+----------------------+ +-------------+ -| RPC | Peer 3 | API | IPFS Connector/Proxy +------> IPFS daemon | -+----------+--------+-----+----------------------+ +-------------+ - - -%s needs valid configuration and identity files to run. -These are independent from IPFS. The identity includes its own -libp2p key-pair. They can be initialized with "init" and their -default locations are ~/%s/%s -and ~/%s/%s. - -For feedback, bug reports or any additional information, visit -https://github.com/ipfs-cluster/ipfs-cluster. - - -EXAMPLES: - -Initial configuration: - -$ ipfs-cluster-service init - -Launch a cluster: - -$ ipfs-cluster-service daemon - -Launch a peer and join existing cluster: - -$ ipfs-cluster-service daemon --bootstrap /ip4/192.168.1.2/tcp/9096/p2p/QmPSoSaPXpyunaBwHs1rZBKYSqRV4bLRk32VGYLuvdrypL - -Customize logs using --loglevel flag. To customize component-level -logging pass a comma-separated list of component-identifer:log-level -pair or without identifier for overall loglevel. Valid loglevels -are critical, error, warning, notice, info and debug. - -$ ipfs-cluster-service --loglevel info,cluster:debug,pintracker:debug daemon -`, - programName, - programName, - DefaultFolder, - DefaultConfigFile, - DefaultFolder, - DefaultIdentityFile, -) - -var logger = logging.Logger("service") - -// Default location for the configurations and data -var ( - // DefaultFolder is the name of the cluster folder - DefaultFolder = ".ipfs-cluster" - // DefaultPath is set on init() to $HOME/DefaultFolder - // and holds all the ipfs-cluster data - DefaultPath string - // The name of the configuration file inside DefaultPath - DefaultConfigFile = "service.json" - // The name of the identity file inside DefaultPath - DefaultIdentityFile = "identity.json" -) - -var ( - configPath string - identityPath string -) - -func init() { - // Set build information. - if build, err := semver.NewBuildVersion(commit); err == nil { - version.Version.Build = []string{"git" + build} - } - - // We try guessing user's home from the HOME variable. This - // allows HOME hacks for things like Snapcraft builds. HOME - // should be set in all UNIX by the OS. Alternatively, we fall back to - // usr.HomeDir (which should work on Windows etc.). - home := os.Getenv("HOME") - if home == "" { - usr, err := user.Current() - if err != nil { - panic(fmt.Sprintf("cannot get current user: %s", err)) - } - home = usr.HomeDir - } - - DefaultPath = filepath.Join(home, DefaultFolder) -} - -func out(m string, a ...interface{}) { - fmt.Fprintf(os.Stderr, m, a...) -} - -func checkErr(doing string, err error, args ...interface{}) { - if err != nil { - if len(args) > 0 { - doing = fmt.Sprintf(doing, args...) - } - out("error %s: %s\n", doing, err) - err = locker.tryUnlock() - if err != nil { - out("error releasing execution lock: %s\n", err) - } - os.Exit(1) - } -} - -func main() { - app := cli.NewApp() - app.Name = programName - app.Usage = "IPFS Cluster peer" - app.Description = Description - //app.Copyright = "© Protocol Labs, Inc." - app.Version = version.Version.String() - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "config, c", - Value: DefaultPath, - Usage: "path to the configuration and data `FOLDER`", - EnvVar: "IPFS_CLUSTER_PATH", - }, - cli.BoolFlag{ - Name: "force, f", - Usage: "forcefully proceed with some actions. i.e. overwriting configuration", - }, - cli.BoolFlag{ - Name: "debug, d", - Usage: "enable full debug logging (very verbose)", - }, - cli.StringFlag{ - Name: "loglevel, l", - EnvVar: "IPFS_CLUSTER_LOG_LEVEL", - Usage: "set overall and component-wise log levels", - }, - } - - app.Before = func(c *cli.Context) error { - absPath, err := filepath.Abs(c.String("config")) - if err != nil { - return err - } - - configPath = filepath.Join(absPath, DefaultConfigFile) - identityPath = filepath.Join(absPath, DefaultIdentityFile) - - err = setupLogLevel(c.Bool("debug"), c.String("loglevel")) - if err != nil { - return err - } - locker = &lock{path: absPath} - - return nil - } - - app.Commands = []cli.Command{ - { - Name: "init", - Usage: "Creates a configuration and generates an identity", - Description: fmt.Sprintf(` -This command will initialize a new %s configuration file and, if it -does already exist, generate a new %s for %s. - -If the optional [source-url] is given, the generated configuration file -will refer to it. The source configuration will be fetched from its source -URL during the launch of the daemon. If not, a default standard configuration -file will be created. - -In the latter case, a cluster secret will be generated as required -by %s. Alternatively, this secret can be manually -provided with --custom-secret (in which case it will be prompted), or -by setting the CLUSTER_SECRET environment variable. - -The --consensus flag allows to select an alternative consensus components for -in the newly-generated configuration. - -Note that the --force flag allows to overwrite an existing -configuration with default values. To generate a new identity, please -remove the %s file first and clean any Raft state. - -By default, an empty peerstore file will be created too. Initial contents can -be provided with the --peers flag. Depending on the chosen consensus, the -"trusted_peers" list in the "crdt" configuration section and the -"init_peerset" list in the "raft" configuration section will be prefilled to -the peer IDs in the given multiaddresses. -`, - - DefaultConfigFile, - DefaultIdentityFile, - programName, - programName, - DefaultIdentityFile, - ), - ArgsUsage: "[http-source-url]", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "consensus", - Usage: "select consensus component: 'crdt' or 'raft'", - Value: defaultConsensus, - }, - cli.StringFlag{ - Name: "datastore", - Usage: "select datastore component: 'badger' or 'leveldb'", - Value: defaultDatastore, - }, - cli.BoolFlag{ - Name: "custom-secret, s", - Usage: "prompt for the cluster secret (when no source specified)", - }, - cli.StringFlag{ - Name: "peers", - Usage: "comma-separated list of multiaddresses to init with (see help)", - }, - cli.BoolFlag{ - Name: "force, f", - Usage: "overwrite configuration without prompting", - }, - cli.BoolFlag{ - Name: "randomports", - Usage: "configure random ports to listen on instead of defaults", - }, - }, - Action: func(c *cli.Context) error { - consensus := c.String("consensus") - switch consensus { - case "raft", "crdt": - default: - checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'")) - } - - datastore := c.String("datastore") - switch datastore { - case "leveldb", "badger": - default: - checkErr("choosing datastore", errors.New("flag value must be set to 'leveldb' or 'badger'")) - } - - cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus, datastore) - defer cfgHelper.Manager().Shutdown() // wait for saves - - configExists := false - if _, err := os.Stat(configPath); !os.IsNotExist(err) { - configExists = true - } - - identityExists := false - if _, err := os.Stat(identityPath); !os.IsNotExist(err) { - identityExists = true - } - - if configExists || identityExists { - // cluster might be running - // acquire lock for config folder - locker.lock() - defer locker.tryUnlock() - } - - if configExists { - confirm := fmt.Sprintf( - "%s Continue? [y/n]:", - configurationOverwritePrompt, - ) - - // --force allows override of the prompt - if !c.Bool("force") { - if !yesNoPrompt(confirm) { - return nil - } - } - } - - // Set url. If exists, it will be the only thing saved. - cfgHelper.Manager().Source = c.Args().First() - - // Generate defaults for all registered components - err := cfgHelper.Manager().Default() - checkErr("generating default configuration", err) - - if c.Bool("randomports") { - cfgs := cfgHelper.Configs() - - cfgs.Cluster.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Cluster.ListenAddr) - checkErr("randomizing ports", err) - cfgs.Restapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Restapi.HTTPListenAddr) - checkErr("randomizing ports", err) - cfgs.Ipfsproxy.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Ipfsproxy.ListenAddr) - checkErr("randomizing ports", err) - cfgs.Pinsvcapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Pinsvcapi.HTTPListenAddr) - checkErr("randomizing ports", err) - } - err = cfgHelper.Manager().ApplyEnvVars() - checkErr("applying environment variables to configuration", err) - - userSecret, userSecretDefined := userProvidedSecret(c.Bool("custom-secret") && !c.Args().Present()) - // Set user secret - if userSecretDefined { - cfgHelper.Configs().Cluster.Secret = userSecret - } - - peersOpt := c.String("peers") - var multiAddrs []ma.Multiaddr - if peersOpt != "" { - addrs := strings.Split(peersOpt, ",") - - for _, addr := range addrs { - addr = strings.TrimSpace(addr) - multiAddr, err := ma.NewMultiaddr(addr) - checkErr("parsing peer multiaddress: "+addr, err) - multiAddrs = append(multiAddrs, multiAddr) - } - - peers := ipfscluster.PeersFromMultiaddrs(multiAddrs) - cfgHelper.Configs().Crdt.TrustAll = false - cfgHelper.Configs().Crdt.TrustedPeers = peers - cfgHelper.Configs().Raft.InitPeerset = peers - } - - // Save config. Creates the folder. - // Sets BaseDir in components. - checkErr("saving default configuration", cfgHelper.SaveConfigToDisk()) - out("configuration written to %s.\n", configPath) - - if !identityExists { - ident := cfgHelper.Identity() - err := ident.Default() - checkErr("generating an identity", err) - - err = ident.ApplyEnvVars() - checkErr("applying environment variables to the identity", err) - - err = cfgHelper.SaveIdentityToDisk() - checkErr("saving "+DefaultIdentityFile, err) - out("new identity written to %s\n", identityPath) - } - - // Initialize peerstore file - even if empty - peerstorePath := cfgHelper.Configs().Cluster.GetPeerstorePath() - peerManager := pstoremgr.New(context.Background(), nil, peerstorePath) - addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...) - checkErr("getting AddrInfos from peer multiaddresses", err) - err = peerManager.SavePeerstore(addrInfos) - checkErr("saving peers to peerstore", err) - if l := len(multiAddrs); l > 0 { - out("peerstore written to %s with %d entries.\n", peerstorePath, len(multiAddrs)) - } else { - out("new empty peerstore written to %s.\n", peerstorePath) - } - - return nil - }, - }, - { - Name: "daemon", - Usage: "Runs the IPFS Cluster peer (default)", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "upgrade, u", - Usage: "run state migrations before starting (deprecated/unused)", - }, - cli.StringFlag{ - Name: "bootstrap, j", - Usage: "join a cluster providing a comma-separated list of existing peers multiaddress(es)", - }, - cli.BoolFlag{ - Name: "leave, x", - Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"", - Hidden: true, - }, - cli.BoolFlag{ - Name: "stats", - Usage: "enable stats collection", - }, - cli.BoolFlag{ - Name: "tracing", - Usage: "enable tracing collection", - }, - cli.BoolFlag{ - Name: "no-trust", - Usage: "do not trust bootstrap peers (only for \"crdt\" consensus)", - }, - }, - Action: daemon, - }, - { - Name: "state", - Usage: "Manages the peer's consensus state (pinset)", - Subcommands: []cli.Command{ - { - Name: "export", - Usage: "save the state to a JSON file", - Description: ` -This command dumps the current cluster pinset (state) as a JSON file. The -resulting file can be used to migrate, restore or backup a Cluster peer. -By default, the state will be printed to stdout. -`, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "file, f", - Value: "", - Usage: "writes to an output file", - }, - }, - Action: func(c *cli.Context) error { - locker.lock() - defer locker.tryUnlock() - - mgr := getStateManager() - - var w io.WriteCloser - var err error - outputPath := c.String("file") - if outputPath == "" { - // Output to stdout - w = os.Stdout - } else { - // Create the export file - w, err = os.Create(outputPath) - checkErr("creating output file", err) - } - - buf := bufio.NewWriter(w) - defer func() { - buf.Flush() - w.Close() - }() - checkErr("exporting state", mgr.ExportState(buf)) - logger.Info("state successfully exported") - return nil - }, - }, - { - Name: "import", - Usage: "load the state from a file produced by 'export'", - Description: ` -This command reads in an exported pinset (state) file and replaces the -existing one. This can be used, for example, to restore a Cluster peer from a -backup. - -If an argument is provided, it will be treated it as the path of the file -to import. If no argument is provided, stdin will be used. -`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "skips confirmation prompt", - }, - cli.IntFlag{ - Name: "replication-min, rmin", - Value: 0, - Usage: "Overwrite replication-factor-min for all pins on import", - }, - cli.IntFlag{ - Name: "replication-max, rmax", - Value: 0, - Usage: "Overwrite replication-factor-max for all pins on import", - }, - cli.StringFlag{ - Name: "allocations, allocs", - Usage: "Overwrite allocations for all pins on import. Comma-separated list of peer IDs", - }, - }, - Action: func(c *cli.Context) error { - locker.lock() - defer locker.tryUnlock() - - confirm := "The pinset (state) of this peer " - confirm += "will be replaced. Continue? [y/n]:" - if !c.Bool("force") && !yesNoPrompt(confirm) { - return nil - } - - // importState allows overwriting of some options on import - opts := api.PinOptions{ - ReplicationFactorMin: c.Int("replication-min"), - ReplicationFactorMax: c.Int("replication-max"), - UserAllocations: api.StringsToPeers(strings.Split(c.String("allocations"), ",")), - } - - mgr := getStateManager() - - // Get the importing file path - importFile := c.Args().First() - var r io.ReadCloser - var err error - if importFile == "" { - r = os.Stdin - fmt.Println("reading from stdin, Ctrl-D to finish") - } else { - r, err = os.Open(importFile) - checkErr("reading import file", err) - } - defer r.Close() - - buf := bufio.NewReader(r) - - checkErr("importing state", mgr.ImportState(buf, opts)) - logger.Info("state successfully imported. Make sure all peers have consistent states") - return nil - }, - }, - { - Name: "cleanup", - Usage: "remove persistent data", - Description: ` -This command removes any persisted consensus data in this peer, including the -current pinset (state). The next start of the peer will be like the first start -to all effects. Peers may need to bootstrap and sync from scratch after this. -`, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "force, f", - Usage: "skip confirmation prompt", - }, - }, - Action: func(c *cli.Context) error { - locker.lock() - defer locker.tryUnlock() - - confirm := fmt.Sprintf( - "%s Continue? [y/n]:", - stateCleanupPrompt, - ) - if !c.Bool("force") && !yesNoPrompt(confirm) { - return nil - } - - mgr := getStateManager() - checkErr("cleaning state", mgr.Clean()) - logger.Info("data correctly cleaned up") - return nil - }, - }, - }, - }, - { - Name: "version", - Usage: "Prints the ipfs-cluster version", - Action: func(c *cli.Context) error { - fmt.Printf("%s\n", version.Version) - return nil - }, - }, - } - - app.Action = run - - app.Run(os.Args) -} - -// run daemon() by default, or error. -func run(c *cli.Context) error { - cli.ShowAppHelp(c) - os.Exit(1) - return nil -} - -func setupLogLevel(debug bool, l string) error { - // if debug is set to true, log everything in debug level - if debug { - ipfscluster.SetFacilityLogLevel("*", "DEBUG") - return nil - } - - compLogLevel := strings.Split(l, ",") - var logLevel string - compLogFacs := make(map[string]string) - // get overall log level and component-wise log levels from arguments - for _, cll := range compLogLevel { - if cll == "" { - continue - } - identifierToLevel := strings.Split(cll, ":") - var lvl string - var comp string - switch len(identifierToLevel) { - case 1: - lvl = identifierToLevel[0] - comp = "all" - case 2: - lvl = identifierToLevel[1] - comp = identifierToLevel[0] - default: - return errors.New("log level not in expected format \"identifier:loglevel\" or \"loglevel\"") - } - - _, ok := compLogFacs[comp] - if ok { - fmt.Printf("overwriting existing %s log level\n", comp) - } - compLogFacs[comp] = lvl - } - - logLevel, ok := compLogFacs["all"] - if !ok { - logLevel = defaultLogLevel - } else { - delete(compLogFacs, "all") - } - - // log service with logLevel - ipfscluster.SetFacilityLogLevel("service", logLevel) - - logfacs := make(map[string]string) - - // fill component-wise log levels - for identifier, level := range compLogFacs { - logfacs[identifier] = level - } - - // Set the values for things not set by the user or for - // things set by "all". - for key := range ipfscluster.LoggingFacilities { - if _, ok := logfacs[key]; !ok { - logfacs[key] = logLevel - } - } - - // For Extra facilities, set the defaults per logging.go unless - // manually set - for key, defaultLvl := range ipfscluster.LoggingFacilitiesExtra { - if _, ok := logfacs[key]; !ok { - logfacs[key] = defaultLvl - } - } - - for identifier, level := range logfacs { - ipfscluster.SetFacilityLogLevel(identifier, level) - } - - return nil -} - -func userProvidedSecret(enterSecret bool) ([]byte, bool) { - if enterSecret { - secret := promptUser("Enter cluster secret (32-byte hex string): ") - decodedSecret, err := ipfscluster.DecodeClusterSecret(secret) - checkErr("parsing user-provided secret", err) - return decodedSecret, true - } - - return nil, false -} - -func promptUser(msg string) string { - scanner := bufio.NewScanner(os.Stdin) - fmt.Print(msg) - scanner.Scan() - return scanner.Text() -} - -// Lifted from go-ipfs/cmd/ipfs/daemon.go -func yesNoPrompt(prompt string) bool { - var s string - for i := 0; i < 3; i++ { - fmt.Printf("%s ", prompt) - fmt.Scanf("%s", &s) - switch s { - case "y", "Y": - return true - case "n", "N": - return false - case "": - return false - } - fmt.Println("Please press either 'y' or 'n'") - } - return false -} - -func getStateManager() cmdutils.StateManager { - cfgHelper, err := cmdutils.NewLoadedConfigHelper( - configPath, - identityPath, - ) - checkErr("loading configurations", err) - cfgHelper.Manager().Shutdown() - mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper) - checkErr("creating state manager", err) - return mgr -} diff --git a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/main_test.go b/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/main_test.go deleted file mode 100644 index 22f8a1d..0000000 --- a/packages/networking/ipfs-cluster/cmd/ipfs-cluster-service/main_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/cmdutils" - - ma "github.com/multiformats/go-multiaddr" -) - -func TestRandomPorts(t *testing.T) { - port := "9096" - m1, _ := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/9096") - m2, _ := ma.NewMultiaddr("/ip6/::/udp/9096") - - addresses, err := cmdutils.RandomizePorts([]ma.Multiaddr{m1, m2}) - if err != nil { - t.Fatal(err) - } - - v1, err := addresses[0].ValueForProtocol(ma.P_TCP) - if err != nil { - t.Fatal(err) - } - - v2, err := addresses[1].ValueForProtocol(ma.P_UDP) - if err != nil { - t.Fatal(err) - } - - if v1 == port { - t.Error("expected different ipv4 ports") - } - - if v2 == port { - t.Error("expected different ipv6 ports") - } -} diff --git a/packages/networking/ipfs-cluster/cmdutils/cmdutils.go b/packages/networking/ipfs-cluster/cmdutils/cmdutils.go deleted file mode 100644 index 4ce27a1..0000000 --- a/packages/networking/ipfs-cluster/cmdutils/cmdutils.go +++ /dev/null @@ -1,207 +0,0 @@ -// Package cmdutils contains utilities to facilitate building of command line -// applications launching cluster peers. -package cmdutils - -import ( - "context" - "fmt" - "io" - "net" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/ipfs/go-datastore" - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - ipfshttp "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp" - host "github.com/libp2p/go-libp2p/core/host" - dual "github.com/libp2p/go-libp2p-kad-dht/dual" - ma "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - "go.uber.org/multierr" -) - -// RandomizePorts replaces TCP and UDP ports with random, but valid port -// values, on the given multiaddresses -func RandomizePorts(addrs []ma.Multiaddr) ([]ma.Multiaddr, error) { - results := make([]ma.Multiaddr, 0, len(addrs)) - - for _, m := range addrs { - var prev string - var err error - components := []ma.Multiaddr{} - ma.ForEach(m, func(c ma.Component) bool { - code := c.Protocol().Code - - if code != ma.P_TCP && code != ma.P_UDP { - components = append(components, &c) - prev = c.Value() - return true - } - - var ln io.Closer - var port int - - ip := prev - if strings.Contains(ip, ":") { // ipv6 needs bracketing - ip = "[" + ip + "]" - } - - if c.Protocol().Code == ma.P_UDP { - ln, port, err = listenUDP(c.Protocol().Name, ip) - } else { - ln, port, err = listenTCP(c.Protocol().Name, ip) - } - if err != nil { - return false - } - defer ln.Close() - - var c1 *ma.Component - c1, err = ma.NewComponent(c.Protocol().Name, fmt.Sprintf("%d", port)) - if err != nil { - return false - } - - components = append(components, c1) - prev = c.Value() - - return true - }) - if err != nil { - return results, err - } - results = append(results, ma.Join(components...)) - } - - return results, nil -} - -// returns the listener so it can be closed later and port -func listenTCP(name, ip string) (io.Closer, int, error) { - ln, err := net.Listen(name, ip+":0") - if err != nil { - return nil, 0, err - } - - return ln, ln.Addr().(*net.TCPAddr).Port, nil -} - -// returns the listener so it can be cloesd later and port -func listenUDP(name, ip string) (io.Closer, int, error) { - ln, err := net.ListenPacket(name, ip+":0") - if err != nil { - return nil, 0, err - } - - return ln, ln.LocalAddr().(*net.UDPAddr).Port, nil -} - -// HandleSignals orderly shuts down an IPFS Cluster peer -// on SIGINT, SIGTERM, SIGHUP. It forces command termination -// on the 3rd-signal count. -func HandleSignals( - ctx context.Context, - cancel context.CancelFunc, - cluster *ipfscluster.Cluster, - host host.Host, - dht *dual.DHT, - store datastore.Datastore, -) error { - signalChan := make(chan os.Signal, 20) - signal.Notify( - signalChan, - syscall.SIGINT, - syscall.SIGTERM, - syscall.SIGHUP, - ) - - var ctrlcCount int - for { - select { - case <-signalChan: - ctrlcCount++ - handleCtrlC(ctx, cluster, ctrlcCount) - case <-cluster.Done(): - cancel() - return multierr.Combine( - dht.Close(), - host.Close(), - store.Close(), - ) - } - } -} - -func handleCtrlC(ctx context.Context, cluster *ipfscluster.Cluster, ctrlcCount int) { - switch ctrlcCount { - case 1: - go func() { - if err := cluster.Shutdown(ctx); err != nil { - ErrorOut("error shutting down cluster: %s", err) - os.Exit(1) - } - }() - case 2: - ErrorOut(` - - -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -Shutdown is taking too long! Press Ctrl-c again to manually kill cluster. -Note that this may corrupt the local cluster state. -!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -`) - case 3: - ErrorOut("exiting cluster NOW") - os.Exit(1) - } -} - -// ErrorOut formats something and prints it to sdterr. -func ErrorOut(m string, a ...interface{}) { - fmt.Fprintf(os.Stderr, m, a...) -} - -// WaitForIPFS hangs until IPFS API becomes available or the given context is -// canceled. The IPFS API location is determined by the default ipfshttp -// component configuration and can be overridden using environment variables -// that affect that configuration. Note that we have to do this in the blind, -// since we want to wait for IPFS before we even fetch the IPFS component -// configuration (because the configuration might be hosted on IPFS itself) -func WaitForIPFS(ctx context.Context) error { - ipfshttpCfg := ipfshttp.Config{} - ipfshttpCfg.Default() - ipfshttpCfg.ApplyEnvVars() - ipfshttpCfg.ConnectSwarmsDelay = 0 - ipfshttpCfg.Tracing = false - ipfscluster.SetFacilityLogLevel("ipfshttp", "critical") - defer ipfscluster.SetFacilityLogLevel("ipfshttp", "info") - ipfs, err := ipfshttp.NewConnector(&ipfshttpCfg) - if err != nil { - return errors.Wrap(err, "error creating an ipfshttp instance to wait for IPFS") - } - - i := 0 - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - if i%10 == 0 { - fmt.Printf("waiting for IPFS to become available on %s...\n", ipfshttpCfg.NodeAddr) - } - i++ - time.Sleep(time.Second) - _, err := ipfs.ID(ctx) - if err == nil { - // sleep an extra second and quit - time.Sleep(time.Second) - return nil - } - } - } -} diff --git a/packages/networking/ipfs-cluster/cmdutils/configs.go b/packages/networking/ipfs-cluster/cmdutils/configs.go deleted file mode 100644 index 46c02df..0000000 --- a/packages/networking/ipfs-cluster/cmdutils/configs.go +++ /dev/null @@ -1,336 +0,0 @@ -package cmdutils - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/pkg/errors" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - "github.com/ipfs-cluster/ipfs-cluster/allocator/balanced" - "github.com/ipfs-cluster/ipfs-cluster/api/ipfsproxy" - "github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi" - "github.com/ipfs-cluster/ipfs-cluster/api/rest" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/consensus/crdt" - "github.com/ipfs-cluster/ipfs-cluster/consensus/raft" - "github.com/ipfs-cluster/ipfs-cluster/datastore/badger" - "github.com/ipfs-cluster/ipfs-cluster/datastore/leveldb" - "github.com/ipfs-cluster/ipfs-cluster/informer/disk" - "github.com/ipfs-cluster/ipfs-cluster/informer/numpin" - "github.com/ipfs-cluster/ipfs-cluster/informer/pinqueue" - "github.com/ipfs-cluster/ipfs-cluster/informer/tags" - "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp" - "github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" -) - -// Configs carries config types used by a Cluster Peer. -type Configs struct { - Cluster *ipfscluster.Config - Restapi *rest.Config - Pinsvcapi *pinsvcapi.Config - Ipfsproxy *ipfsproxy.Config - Ipfshttp *ipfshttp.Config - Raft *raft.Config - Crdt *crdt.Config - Statelesstracker *stateless.Config - Pubsubmon *pubsubmon.Config - BalancedAlloc *balanced.Config - DiskInf *disk.Config - NumpinInf *numpin.Config - TagsInf *tags.Config - PinQueueInf *pinqueue.Config - Metrics *observations.MetricsConfig - Tracing *observations.TracingConfig - Badger *badger.Config - LevelDB *leveldb.Config -} - -// ConfigHelper helps managing the configuration and identity files with the -// standard set of cluster components. -type ConfigHelper struct { - identity *config.Identity - manager *config.Manager - configs *Configs - - configPath string - identityPath string - consensus string - datastore string -} - -// NewConfigHelper creates a config helper given the paths to the -// configuration and identity files. -// Remember to Shutdown() the ConfigHelper.Manager() after use. -func NewConfigHelper(configPath, identityPath, consensus, datastore string) *ConfigHelper { - ch := &ConfigHelper{ - configPath: configPath, - identityPath: identityPath, - consensus: consensus, - datastore: datastore, - } - ch.init() - return ch -} - -// NewLoadedConfigHelper creates a config helper given the paths to the -// configuration and identity files and loads the configurations from disk. -// Remember to Shutdown() the ConfigHelper.Manager() after use. -func NewLoadedConfigHelper(configPath, identityPath string) (*ConfigHelper, error) { - cfgHelper := NewConfigHelper(configPath, identityPath, "", "") - err := cfgHelper.LoadFromDisk() - return cfgHelper, err -} - -// LoadConfigFromDisk parses the configuration from disk. -func (ch *ConfigHelper) LoadConfigFromDisk() error { - return ch.manager.LoadJSONFileAndEnv(ch.configPath) -} - -// LoadIdentityFromDisk parses the identity from disk. -func (ch *ConfigHelper) LoadIdentityFromDisk() error { - // load identity with hack for 0.11.0 - identity separation. - _, err := os.Stat(ch.identityPath) - ident := &config.Identity{} - // temporary hack to convert identity - if os.IsNotExist(err) { - clusterConfig, err := config.GetClusterConfig(ch.configPath) - if err != nil { - return err - } - err = ident.LoadJSON(clusterConfig) - if err != nil { - return errors.Wrap(err, "error loading identity") - } - - err = ident.SaveJSON(ch.identityPath) - if err != nil { - return errors.Wrap(err, "error saving identity") - } - - fmt.Fprintf( - os.Stderr, - "\nNOTICE: identity information extracted from %s and saved as %s.\n\n", - ch.configPath, - ch.identityPath, - ) - } else { // leave this part when the hack is removed. - err = ident.LoadJSONFromFile(ch.identityPath) - if err != nil { - return fmt.Errorf("error loading identity from %s: %s", ch.identityPath, err) - } - } - - err = ident.ApplyEnvVars() - if err != nil { - return errors.Wrap(err, "error applying environment variables to the identity") - } - ch.identity = ident - return nil -} - -// LoadFromDisk loads both configuration and identity from disk. -func (ch *ConfigHelper) LoadFromDisk() error { - err := ch.LoadConfigFromDisk() - if err != nil { - return err - } - return ch.LoadIdentityFromDisk() -} - -// Identity returns the Identity object. It returns an empty identity -// if not loaded yet. -func (ch *ConfigHelper) Identity() *config.Identity { - return ch.identity -} - -// Manager returns the config manager with all the -// cluster configurations registered. -func (ch *ConfigHelper) Manager() *config.Manager { - return ch.manager -} - -// Configs returns the Configs object which holds all the cluster -// configurations. Configurations are empty if they have not been loaded from -// disk. -func (ch *ConfigHelper) Configs() *Configs { - return ch.configs -} - -// GetConsensus attempts to return the configured consensus. -// If the ConfigHelper was initialized with a consensus string -// then it returns that. -// -// Otherwise it checks whether one of the consensus configurations -// has been loaded. If both or none have been loaded, it returns -// an empty string. -func (ch *ConfigHelper) GetConsensus() string { - if ch.consensus != "" { - return ch.consensus - } - crdtLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Crdt.ConfigKey()) - raftLoaded := ch.manager.IsLoadedFromJSON(config.Consensus, ch.configs.Raft.ConfigKey()) - if crdtLoaded == raftLoaded { //both loaded or none - return "" - } - - if crdtLoaded { - return ch.configs.Crdt.ConfigKey() - } - return ch.configs.Raft.ConfigKey() -} - -// GetDatastore attempts to return the configured datastore. If the -// ConfigHelper was initialized with a datastore string, then it returns that. -// -// Otherwise it checks whether one of the datastore configurations has been -// loaded. If none or more than one have been loaded, it returns an empty -// string. Otherwise it returns the key of the loaded configuration. -func (ch *ConfigHelper) GetDatastore() string { - if ch.datastore != "" { - return ch.datastore - } - - badgerLoaded := ch.manager.IsLoadedFromJSON(config.Datastore, ch.configs.Badger.ConfigKey()) - levelDBLoaded := ch.manager.IsLoadedFromJSON(config.Datastore, ch.configs.LevelDB.ConfigKey()) - - nLoaded := 0 - for _, v := range []bool{badgerLoaded, levelDBLoaded} { - if v { - nLoaded++ - } - } - if nLoaded == 0 || nLoaded > 1 { - return "" - } - switch { - case badgerLoaded: - return ch.configs.Badger.ConfigKey() - case levelDBLoaded: - return ch.configs.LevelDB.ConfigKey() - default: - return "" - } -} - -// register all current cluster components -func (ch *ConfigHelper) init() { - man := config.NewManager() - cfgs := &Configs{ - Cluster: &ipfscluster.Config{}, - Restapi: rest.NewConfig(), - Pinsvcapi: pinsvcapi.NewConfig(), - Ipfsproxy: &ipfsproxy.Config{}, - Ipfshttp: &ipfshttp.Config{}, - Raft: &raft.Config{}, - Crdt: &crdt.Config{}, - Statelesstracker: &stateless.Config{}, - Pubsubmon: &pubsubmon.Config{}, - BalancedAlloc: &balanced.Config{}, - DiskInf: &disk.Config{}, - NumpinInf: &numpin.Config{}, - TagsInf: &tags.Config{}, - PinQueueInf: &pinqueue.Config{}, - Metrics: &observations.MetricsConfig{}, - Tracing: &observations.TracingConfig{}, - Badger: &badger.Config{}, - LevelDB: &leveldb.Config{}, - } - man.RegisterComponent(config.Cluster, cfgs.Cluster) - man.RegisterComponent(config.API, cfgs.Restapi) - man.RegisterComponent(config.API, cfgs.Pinsvcapi) - man.RegisterComponent(config.API, cfgs.Ipfsproxy) - man.RegisterComponent(config.IPFSConn, cfgs.Ipfshttp) - man.RegisterComponent(config.PinTracker, cfgs.Statelesstracker) - man.RegisterComponent(config.Monitor, cfgs.Pubsubmon) - man.RegisterComponent(config.Allocator, cfgs.BalancedAlloc) - man.RegisterComponent(config.Informer, cfgs.DiskInf) - // man.RegisterComponent(config.Informer, cfgs.Numpininf) - man.RegisterComponent(config.Informer, cfgs.TagsInf) - man.RegisterComponent(config.Informer, cfgs.PinQueueInf) - man.RegisterComponent(config.Observations, cfgs.Metrics) - man.RegisterComponent(config.Observations, cfgs.Tracing) - - registerDatastores := false - - switch ch.consensus { - case cfgs.Raft.ConfigKey(): - man.RegisterComponent(config.Consensus, cfgs.Raft) - case cfgs.Crdt.ConfigKey(): - man.RegisterComponent(config.Consensus, cfgs.Crdt) - registerDatastores = true - default: - man.RegisterComponent(config.Consensus, cfgs.Raft) - man.RegisterComponent(config.Consensus, cfgs.Crdt) - registerDatastores = true - } - - if registerDatastores { - switch ch.datastore { - case cfgs.Badger.ConfigKey(): - man.RegisterComponent(config.Datastore, cfgs.Badger) - case cfgs.LevelDB.ConfigKey(): - man.RegisterComponent(config.Datastore, cfgs.LevelDB) - - default: - man.RegisterComponent(config.Datastore, cfgs.LevelDB) - man.RegisterComponent(config.Datastore, cfgs.Badger) - } - } - - ch.identity = &config.Identity{} - ch.manager = man - ch.configs = cfgs -} - -// MakeConfigFolder creates the folder to hold -// configuration and identity files. -func (ch *ConfigHelper) MakeConfigFolder() error { - f := filepath.Dir(ch.configPath) - if _, err := os.Stat(f); os.IsNotExist(err) { - err := os.MkdirAll(f, 0700) - if err != nil { - return err - } - } - return nil -} - -// SaveConfigToDisk saves the configuration file to disk. -func (ch *ConfigHelper) SaveConfigToDisk() error { - err := ch.MakeConfigFolder() - if err != nil { - return err - } - return ch.manager.SaveJSON(ch.configPath) -} - -// SaveIdentityToDisk saves the identity file to disk. -func (ch *ConfigHelper) SaveIdentityToDisk() error { - err := ch.MakeConfigFolder() - if err != nil { - return err - } - return ch.Identity().SaveJSON(ch.identityPath) -} - -// SetupTracing propagates tracingCfg.EnableTracing to all other -// configurations. Use only when identity has been loaded or generated. The -// forceEnabled parameter allows to override the EnableTracing value. -func (ch *ConfigHelper) SetupTracing(forceEnabled bool) { - enabled := forceEnabled || ch.configs.Tracing.EnableTracing - - ch.configs.Tracing.ClusterID = ch.Identity().ID.Pretty() - ch.configs.Tracing.ClusterPeername = ch.configs.Cluster.Peername - ch.configs.Tracing.EnableTracing = enabled - ch.configs.Cluster.Tracing = enabled - ch.configs.Raft.Tracing = enabled - ch.configs.Crdt.Tracing = enabled - ch.configs.Restapi.Tracing = enabled - ch.configs.Pinsvcapi.Tracing = enabled - ch.configs.Ipfshttp.Tracing = enabled - ch.configs.Ipfsproxy.Tracing = enabled -} diff --git a/packages/networking/ipfs-cluster/cmdutils/state.go b/packages/networking/ipfs-cluster/cmdutils/state.go deleted file mode 100644 index fdfa5ec..0000000 --- a/packages/networking/ipfs-cluster/cmdutils/state.go +++ /dev/null @@ -1,243 +0,0 @@ -package cmdutils - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/consensus/crdt" - "github.com/ipfs-cluster/ipfs-cluster/consensus/raft" - "github.com/ipfs-cluster/ipfs-cluster/datastore/badger" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/datastore/leveldb" - "github.com/ipfs-cluster/ipfs-cluster/pstoremgr" - "github.com/ipfs-cluster/ipfs-cluster/state" - - ds "github.com/ipfs/go-datastore" -) - -// StateManager is the interface that allows to import, export and clean -// different cluster states depending on the consensus component used. -type StateManager interface { - ImportState(io.Reader, api.PinOptions) error - ExportState(io.Writer) error - GetStore() (ds.Datastore, error) - GetOfflineState(ds.Datastore) (state.State, error) - Clean() error -} - -// NewStateManager returns an state manager implementation for the given -// consensus ("raft" or "crdt"). It will need initialized configs. -func NewStateManager(consensus string, datastore string, ident *config.Identity, cfgs *Configs) (StateManager, error) { - switch consensus { - case cfgs.Raft.ConfigKey(): - return &raftStateManager{ident, cfgs}, nil - case cfgs.Crdt.ConfigKey(): - return &crdtStateManager{ - cfgs: cfgs, - datastore: datastore, - }, nil - case "": - return nil, errors.New("could not determine the consensus component") - default: - return nil, fmt.Errorf("unknown consensus component '%s'", consensus) - } -} - -// NewStateManagerWithHelper returns a state manager initialized using the -// configuration and identity provided by the given config helper. -func NewStateManagerWithHelper(cfgHelper *ConfigHelper) (StateManager, error) { - return NewStateManager( - cfgHelper.GetConsensus(), - cfgHelper.GetDatastore(), - cfgHelper.Identity(), - cfgHelper.Configs(), - ) -} - -type raftStateManager struct { - ident *config.Identity - cfgs *Configs -} - -func (raftsm *raftStateManager) GetStore() (ds.Datastore, error) { - return inmem.New(), nil -} - -func (raftsm *raftStateManager) GetOfflineState(store ds.Datastore) (state.State, error) { - return raft.OfflineState(raftsm.cfgs.Raft, store) -} - -func (raftsm *raftStateManager) ImportState(r io.Reader, opts api.PinOptions) error { - err := raftsm.Clean() - if err != nil { - return err - } - - store, err := raftsm.GetStore() - if err != nil { - return err - } - defer store.Close() - st, err := raftsm.GetOfflineState(store) - if err != nil { - return err - } - err = importState(r, st, opts) - if err != nil { - return err - } - pm := pstoremgr.New(context.Background(), nil, raftsm.cfgs.Cluster.GetPeerstorePath()) - raftPeers := append( - ipfscluster.PeersFromMultiaddrs(pm.LoadPeerstore()), - raftsm.ident.ID, - ) - return raft.SnapshotSave(raftsm.cfgs.Raft, st, raftPeers) -} - -func (raftsm *raftStateManager) ExportState(w io.Writer) error { - store, err := raftsm.GetStore() - if err != nil { - return err - } - defer store.Close() - st, err := raftsm.GetOfflineState(store) - if err != nil { - return err - } - return exportState(w, st) -} - -func (raftsm *raftStateManager) Clean() error { - return raft.CleanupRaft(raftsm.cfgs.Raft) -} - -type crdtStateManager struct { - cfgs *Configs - datastore string -} - -func (crdtsm *crdtStateManager) GetStore() (ds.Datastore, error) { - switch crdtsm.datastore { - case crdtsm.cfgs.Badger.ConfigKey(): - return badger.New(crdtsm.cfgs.Badger) - case crdtsm.cfgs.LevelDB.ConfigKey(): - return leveldb.New(crdtsm.cfgs.LevelDB) - default: - return nil, errors.New("unknown datastore") - } - -} - -func (crdtsm *crdtStateManager) GetOfflineState(store ds.Datastore) (state.State, error) { - return crdt.OfflineState(crdtsm.cfgs.Crdt, store) -} - -func (crdtsm *crdtStateManager) ImportState(r io.Reader, opts api.PinOptions) error { - err := crdtsm.Clean() - if err != nil { - return err - } - - store, err := crdtsm.GetStore() - if err != nil { - return err - } - defer store.Close() - st, err := crdtsm.GetOfflineState(store) - if err != nil { - return err - } - batchingSt := st.(state.BatchingState) - - err = importState(r, batchingSt, opts) - if err != nil { - return err - } - - return batchingSt.Commit(context.Background()) -} - -func (crdtsm *crdtStateManager) ExportState(w io.Writer) error { - store, err := crdtsm.GetStore() - if err != nil { - return err - } - defer store.Close() - st, err := crdtsm.GetOfflineState(store) - if err != nil { - return err - } - return exportState(w, st) -} - -func (crdtsm *crdtStateManager) Clean() error { - store, err := crdtsm.GetStore() - if err != nil { - return err - } - defer store.Close() - return crdt.Clean(context.Background(), crdtsm.cfgs.Crdt, store) -} - -func importState(r io.Reader, st state.State, opts api.PinOptions) error { - ctx := context.Background() - dec := json.NewDecoder(r) - for { - var pin api.Pin - err := dec.Decode(&pin) - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - if opts.ReplicationFactorMax > 0 { - pin.ReplicationFactorMax = opts.ReplicationFactorMax - } - - if opts.ReplicationFactorMin > 0 { - pin.ReplicationFactorMin = opts.ReplicationFactorMin - } - - if len(opts.UserAllocations) > 0 { - // We are injecting directly to the state. - // UserAllocation option is not stored in the state. - // We need to set Allocations directly. - pin.Allocations = opts.UserAllocations - } - - err = st.Add(ctx, pin) - if err != nil { - return err - } - } -} - -// ExportState saves a json representation of a state -func exportState(w io.Writer, st state.State) error { - out := make(chan api.Pin, 10000) - errCh := make(chan error, 1) - go func() { - defer close(errCh) - errCh <- st.List(context.Background(), out) - }() - var err error - enc := json.NewEncoder(w) - for pin := range out { - if err == nil { - err = enc.Encode(pin) - } - } - if err != nil { - return err - } - err = <-errCh - return err -} diff --git a/packages/networking/ipfs-cluster/config/config.go b/packages/networking/ipfs-cluster/config/config.go deleted file mode 100644 index afd4937..0000000 --- a/packages/networking/ipfs-cluster/config/config.go +++ /dev/null @@ -1,629 +0,0 @@ -// Package config provides interfaces and utilities for different Cluster -// components to register, read, write and validate configuration sections -// stored in a central configuration file. -package config - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" -) - -var logger = logging.Logger("config") - -var ( - // Error when downloading a Source-based configuration - errFetchingSource = errors.New("could not fetch configuration from source") - // Error when remote source points to another remote-source - errSourceRedirect = errors.New("a sourced configuration cannot point to another source") -) - -// IsErrFetchingSource reports whether this error happened when trying to -// fetch a remote configuration source (as opposed to an error parsing the -// config). -func IsErrFetchingSource(err error) bool { - return errors.Is(err, errFetchingSource) -} - -// ConfigSaveInterval specifies how often to save the configuration file if -// it needs saving. -var ConfigSaveInterval = time.Second - -// The ComponentConfig interface allows components to define configurations -// which can be managed as part of the ipfs-cluster configuration file by the -// Manager. -type ComponentConfig interface { - // Returns a string identifying the section name for this configuration - ConfigKey() string - // Parses a JSON representation of this configuration - LoadJSON([]byte) error - // Provides a JSON representation of this configuration - ToJSON() ([]byte, error) - // Sets default working values - Default() error - // Sets values from environment variables - ApplyEnvVars() error - // Allows this component to work under a subfolder - SetBaseDir(string) - // Checks that the configuration is valid - Validate() error - // Provides a channel to signal the Manager that the configuration - // should be persisted. - SaveCh() <-chan struct{} - // ToDisplayJSON returns a string representing the config excluding hidden fields. - ToDisplayJSON() ([]byte, error) -} - -// These are the component configuration types -// supported by the Manager. -const ( - Cluster SectionType = iota - Consensus - API - IPFSConn - State - PinTracker - Monitor - Allocator - Informer - Observations - Datastore - endTypes // keep this at the end -) - -// SectionType specifies to which section a component configuration belongs. -type SectionType int - -// SectionTypes returns the list of supported SectionTypes -func SectionTypes() []SectionType { - var l []SectionType - for i := Cluster; i < endTypes; i++ { - l = append(l, i) - } - return l -} - -// Section is a section of which stores -// component-specific configurations. -type Section map[string]ComponentConfig - -// jsonSection stores component specific -// configurations. Component configurations depend on -// components themselves. -type jsonSection map[string]*json.RawMessage - -// Manager represents an ipfs-cluster configuration which bundles -// different ComponentConfigs object together. -// Use RegisterComponent() to add a component configurations to the -// object. Once registered, configurations will be parsed from the -// central configuration file when doing LoadJSON(), and saved to it -// when doing SaveJSON(). -type Manager struct { - ctx context.Context - cancel func() - wg sync.WaitGroup - - // The Cluster configuration has a top-level - // special section. - clusterConfig ComponentConfig - - // Holds configuration objects for components. - sections map[SectionType]Section - - // store originally parsed jsonConfig - jsonCfg *jsonConfig - // stores original source if any - Source string - - sourceRedirs int // used avoid recursive source load - - // map of components which has empty configuration - // in JSON file - undefinedComps map[SectionType]map[string]bool - - // if a config has been loaded from disk, track the path - // so it can be saved to the same place. - path string - saveMux sync.Mutex -} - -// NewManager returns a correctly initialized Manager -// which is ready to accept component configurations. -func NewManager() *Manager { - ctx, cancel := context.WithCancel(context.Background()) - return &Manager{ - ctx: ctx, - cancel: cancel, - undefinedComps: make(map[SectionType]map[string]bool), - sections: make(map[SectionType]Section), - } - -} - -// Shutdown makes sure all configuration save operations are finished -// before returning. -func (cfg *Manager) Shutdown() { - cfg.cancel() - cfg.wg.Wait() -} - -// this watches a save channel which is used to signal that -// we need to store changes in the configuration. -// because saving can be called too much, we will only -// save at intervals of 1 save/second at most. -func (cfg *Manager) watchSave(save <-chan struct{}) { - defer cfg.wg.Done() - - // Save once per second mostly - ticker := time.NewTicker(ConfigSaveInterval) - defer ticker.Stop() - - thingsToSave := false - - for { - select { - case <-save: - thingsToSave = true - case <-ticker.C: - if thingsToSave { - err := cfg.SaveJSON("") - if err != nil { - logger.Error(err) - } - thingsToSave = false - } - - // Exit if we have to - select { - case <-cfg.ctx.Done(): - return - default: - } - } - } -} - -// jsonConfig represents a Cluster configuration as it will look when it is -// saved using json. Most configuration keys are converted into simple types -// like strings, and key names aim to be self-explanatory for the user. -type jsonConfig struct { - Source string `json:"source,omitempty"` - Cluster *json.RawMessage `json:"cluster,omitempty"` - Consensus jsonSection `json:"consensus,omitempty"` - API jsonSection `json:"api,omitempty"` - IPFSConn jsonSection `json:"ipfs_connector,omitempty"` - State jsonSection `json:"state,omitempty"` - PinTracker jsonSection `json:"pin_tracker,omitempty"` - Monitor jsonSection `json:"monitor,omitempty"` - Allocator jsonSection `json:"allocator,omitempty"` - Informer jsonSection `json:"informer,omitempty"` - Observations jsonSection `json:"observations,omitempty"` - Datastore jsonSection `json:"datastore,omitempty"` -} - -func (jcfg *jsonConfig) getSection(i SectionType) *jsonSection { - switch i { - case Consensus: - return &jcfg.Consensus - case API: - return &jcfg.API - case IPFSConn: - return &jcfg.IPFSConn - case State: - return &jcfg.State - case PinTracker: - return &jcfg.PinTracker - case Monitor: - return &jcfg.Monitor - case Allocator: - return &jcfg.Allocator - case Informer: - return &jcfg.Informer - case Observations: - return &jcfg.Observations - case Datastore: - return &jcfg.Datastore - default: - return nil - } -} - -// Default generates a default configuration by generating defaults for all -// registered components. -func (cfg *Manager) Default() error { - for _, section := range cfg.sections { - for k, compcfg := range section { - logger.Debugf("generating default conf for %s", k) - err := compcfg.Default() - if err != nil { - return err - } - } - } - if cfg.clusterConfig != nil { - logger.Debug("generating default conf for cluster") - err := cfg.clusterConfig.Default() - if err != nil { - return err - } - } - return nil -} - -// ApplyEnvVars overrides configuration fields with any values found -// in environment variables. -func (cfg *Manager) ApplyEnvVars() error { - for _, section := range cfg.sections { - for k, compcfg := range section { - logger.Debugf("applying environment variables conf for %s", k) - err := compcfg.ApplyEnvVars() - if err != nil { - return err - } - } - } - - if cfg.clusterConfig != nil { - logger.Debugf("applying environment variables conf for cluster") - err := cfg.clusterConfig.ApplyEnvVars() - if err != nil { - return err - } - } - return nil -} - -// RegisterComponent lets the Manager load and save component configurations -func (cfg *Manager) RegisterComponent(t SectionType, ccfg ComponentConfig) { - cfg.wg.Add(1) - go cfg.watchSave(ccfg.SaveCh()) - - if t == Cluster { - cfg.clusterConfig = ccfg - return - } - - if cfg.sections == nil { - cfg.sections = make(map[SectionType]Section) - } - - _, ok := cfg.sections[t] - if !ok { - cfg.sections[t] = make(Section) - } - - cfg.sections[t][ccfg.ConfigKey()] = ccfg - - _, ok = cfg.undefinedComps[t] - if !ok { - cfg.undefinedComps[t] = make(map[string]bool) - } -} - -// Validate checks that all the registered components in this -// Manager have valid configurations. It also makes sure that -// the main Cluster compoenent exists. -func (cfg *Manager) Validate() error { - if cfg.clusterConfig == nil { - return errors.New("no registered cluster section") - } - - if cfg.sections == nil { - return errors.New("no registered components") - } - - err := cfg.clusterConfig.Validate() - if err != nil { - return fmt.Errorf("cluster section failed to validate: %s", err) - } - - for t, section := range cfg.sections { - if section == nil { - return fmt.Errorf("section %d is nil", t) - } - for k, compCfg := range section { - if compCfg == nil { - return fmt.Errorf("%s entry for section %d is nil", k, t) - } - err := compCfg.Validate() - if err != nil { - return fmt.Errorf("%s failed to validate: %s", k, err) - } - } - } - return nil -} - -// LoadJSONFromFile reads a Configuration file from disk and parses -// it. See LoadJSON too. -func (cfg *Manager) LoadJSONFromFile(path string) error { - cfg.path = path - - file, err := os.ReadFile(path) - if err != nil { - logger.Error("error reading the configuration file: ", err) - return err - } - - return cfg.LoadJSON(file) -} - -// LoadJSONFromHTTPSource reads a Configuration file from a URL and parses it. -func (cfg *Manager) LoadJSONFromHTTPSource(url string) error { - logger.Infof("loading configuration from %s", url) - cfg.Source = url - resp, err := http.Get(url) - if err != nil { - return fmt.Errorf("%w: %s", errFetchingSource, url) - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - - if resp.StatusCode >= 300 { - return fmt.Errorf("unsuccessful request (%d): %s", resp.StatusCode, body) - } - - // Avoid recursively loading remote sources - if cfg.sourceRedirs > 0 { - return errSourceRedirect - } - cfg.sourceRedirs++ - // make sure the counter is always reset when function done - defer func() { cfg.sourceRedirs = 0 }() - - err = cfg.LoadJSON(body) - if err != nil { - return err - } - return nil -} - -// LoadJSONFileAndEnv calls LoadJSONFromFile followed by ApplyEnvVars, -// reading and parsing a Configuration file and then overriding fields -// with any values found in environment variables. -func (cfg *Manager) LoadJSONFileAndEnv(path string) error { - if err := cfg.LoadJSONFromFile(path); err != nil { - return err - } - - return cfg.ApplyEnvVars() -} - -// LoadJSON parses configurations for all registered components, -// In order to work, component configurations must have been registered -// beforehand with RegisterComponent. -func (cfg *Manager) LoadJSON(bs []byte) error { - dir := filepath.Dir(cfg.path) - - jcfg := &jsonConfig{} - err := json.Unmarshal(bs, jcfg) - if err != nil { - logger.Error("error parsing JSON: ", err) - return err - } - - cfg.jsonCfg = jcfg - // Handle remote source - if jcfg.Source != "" { - return cfg.LoadJSONFromHTTPSource(jcfg.Source) - } - - // Load Cluster section. Needs to have been registered - if cfg.clusterConfig != nil && jcfg.Cluster != nil { - cfg.clusterConfig.SetBaseDir(dir) - err = cfg.clusterConfig.LoadJSON([]byte(*jcfg.Cluster)) - if err != nil { - return err - } - } - - loadCompJSON := func(name string, component ComponentConfig, jsonSection jsonSection, t SectionType) error { - component.SetBaseDir(dir) - raw, ok := jsonSection[name] - if ok { - err := component.LoadJSON([]byte(*raw)) - if err != nil { - return err - } - logger.Debugf("%s component configuration loaded", name) - } else { - cfg.undefinedComps[t][name] = true - logger.Debugf("%s component is empty, generating default", name) - component.Default() - } - - return nil - } - // Helper function to load json from each section in the json config - loadSectionJSON := func(section Section, jsonSection jsonSection, t SectionType) error { - for name, component := range section { - err := loadCompJSON(name, component, jsonSection, t) - if err != nil { - logger.Error(err) - return err - } - } - return nil - - } - - sections := cfg.sections - - for _, t := range SectionTypes() { - if t == Cluster { - continue - } - err := loadSectionJSON(sections[t], *jcfg.getSection(t), t) - if err != nil { - return err - } - } - return cfg.Validate() -} - -// SaveJSON saves the JSON representation of the Config to -// the given path. -func (cfg *Manager) SaveJSON(path string) error { - cfg.saveMux.Lock() - defer cfg.saveMux.Unlock() - - logger.Info("Saving configuration") - - if path != "" { - cfg.path = path - } - - bs, err := cfg.ToJSON() - if err != nil { - return err - } - - return os.WriteFile(cfg.path, bs, 0600) -} - -// ToJSON provides a JSON representation of the configuration by -// generating JSON for all componenents registered. -func (cfg *Manager) ToJSON() ([]byte, error) { - dir := filepath.Dir(cfg.path) - - err := cfg.Validate() - if err != nil { - return nil, err - } - - if cfg.Source != "" { - return DefaultJSONMarshal(&jsonConfig{Source: cfg.Source}) - } - - jcfg := cfg.jsonCfg - if jcfg == nil { - jcfg = &jsonConfig{} - } - - if cfg.clusterConfig != nil { - cfg.clusterConfig.SetBaseDir(dir) - raw, err := cfg.clusterConfig.ToJSON() - if err != nil { - return nil, err - } - jcfg.Cluster = new(json.RawMessage) - *jcfg.Cluster = raw - logger.Debug("writing changes for cluster section") - } - - // Given a Section and a *jsonSection, it updates the - // component-configurations in the latter. - updateJSONConfigs := func(section Section, dest *jsonSection) error { - for k, v := range section { - v.SetBaseDir(dir) - logger.Debugf("writing changes for %s section", k) - j, err := v.ToJSON() - if err != nil { - return err - } - if *dest == nil { - *dest = make(jsonSection) - } - jsonSection := *dest - jsonSection[k] = new(json.RawMessage) - *jsonSection[k] = j - } - return nil - } - - err = cfg.applyUpdateJSONConfigs(jcfg, updateJSONConfigs) - if err != nil { - return nil, err - } - - return DefaultJSONMarshal(jcfg) -} - -// ToDisplayJSON returns a printable cluster configuration. -func (cfg *Manager) ToDisplayJSON() ([]byte, error) { - jcfg := &jsonConfig{} - - if cfg.clusterConfig != nil { - raw, err := cfg.clusterConfig.ToDisplayJSON() - if err != nil { - return nil, err - } - jcfg.Cluster = new(json.RawMessage) - *jcfg.Cluster = raw - } - - updateJSONConfigs := func(section Section, dest *jsonSection) error { - for k, v := range section { - j, err := v.ToDisplayJSON() - if err != nil { - return err - } - if *dest == nil { - *dest = make(jsonSection) - } - jsonSection := *dest - jsonSection[k] = new(json.RawMessage) - *jsonSection[k] = j - } - return nil - } - - err := cfg.applyUpdateJSONConfigs(jcfg, updateJSONConfigs) - if err != nil { - return nil, err - } - - return DefaultJSONMarshal(jcfg) -} - -func (cfg *Manager) applyUpdateJSONConfigs(jcfg *jsonConfig, updateJSONConfigs func(section Section, dest *jsonSection) error) error { - for _, t := range SectionTypes() { - if t == Cluster { - continue - } - jsection := jcfg.getSection(t) - err := updateJSONConfigs(cfg.sections[t], jsection) - if err != nil { - return err - } - } - - return nil -} - -// IsLoadedFromJSON tells whether the given component belonging to -// the given section type is present in the cluster JSON -// config or not. -func (cfg *Manager) IsLoadedFromJSON(t SectionType, name string) bool { - return !cfg.undefinedComps[t][name] -} - -// GetClusterConfig extracts cluster config from the configuration file -// and returns bytes of it -func GetClusterConfig(configPath string) ([]byte, error) { - file, err := os.ReadFile(configPath) - if err != nil { - logger.Error("error reading the configuration file: ", err) - return nil, err - } - - jcfg := &jsonConfig{} - err = json.Unmarshal(file, jcfg) - if err != nil { - logger.Error("error parsing JSON: ", err) - return nil, err - } - return []byte(*jcfg.Cluster), nil -} diff --git a/packages/networking/ipfs-cluster/config/config_test.go b/packages/networking/ipfs-cluster/config/config_test.go deleted file mode 100644 index 6a456a4..0000000 --- a/packages/networking/ipfs-cluster/config/config_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "net/http" - "net/http/httptest" - "testing" -) - -var mockJSON = []byte(`{ - "cluster": { - "a": "b" - }, - "consensus": { - "mock": { - "a": "b" - } - }, - "api": { - "mock": { - "a": "b" - } - }, - "ipfs_connector": { - "mock": { - "a": "b" - } - }, - "state": { - "mock": { - "a": "b" - } - }, - "pin_tracker": { - "mock": { - "a": "b" - } - }, - "monitor": { - "mock": { - "a": "b" - } - }, - "allocator": { - "mock": { - "a": "b" - } - }, - "informer": { - "mock": { - "a": "b" - } - }, - "observations": { - "mock": { - "a": "b" - } - }, - "datastore": { - "mock": { - "a": "b" - } - } -}`) - -type mockCfg struct { - Saver -} - -func (m *mockCfg) ConfigKey() string { - return "mock" -} - -func (m *mockCfg) LoadJSON([]byte) error { - return nil -} - -func (m *mockCfg) ToJSON() ([]byte, error) { - return []byte(`{"a":"b"}`), nil -} - -func (m *mockCfg) Default() error { - return nil -} - -func (m *mockCfg) ApplyEnvVars() error { - return nil -} - -func (m *mockCfg) Validate() error { - return nil -} - -func (m *mockCfg) ToDisplayJSON() ([]byte, error) { - return []byte(` - { - "a":"b" - } - `), nil -} - -func setupConfigManager() *Manager { - cfg := NewManager() - mockCfg := &mockCfg{} - cfg.RegisterComponent(Cluster, mockCfg) - for _, sect := range SectionTypes() { - cfg.RegisterComponent(sect, mockCfg) - } - return cfg -} - -func TestManager_ToJSON(t *testing.T) { - cfgMgr := setupConfigManager() - err := cfgMgr.Default() - if err != nil { - t.Fatal(err) - } - got, err := cfgMgr.ToJSON() - if err != nil { - t.Error(err) - } - - if !bytes.Equal(got, mockJSON) { - t.Errorf("mismatch between got: %s and want: %s", got, mockJSON) - } -} - -func TestLoadFromHTTPSourceRedirect(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) { - json := fmt.Sprintf(`{ "source" : "http://%s/config" }`, r.Host) - w.Write([]byte(json)) - }) - s := httptest.NewServer(mux) - defer s.Close() - - cfgMgr := NewManager() - err := cfgMgr.LoadJSONFromHTTPSource(s.URL + "/config") - if err != errSourceRedirect { - t.Fatal("expected errSourceRedirect") - } -} - -func TestLoadFromHTTPSource(t *testing.T) { - mux := http.NewServeMux() - mux.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) { - w.Write(mockJSON) - }) - s := httptest.NewServer(mux) - defer s.Close() - - cfgMgr := setupConfigManager() - err := cfgMgr.LoadJSONFromHTTPSource(s.URL + "/config") - if err != nil { - t.Fatal("unexpected error") - } - - cfgMgr.Source = "" - newJSON, err := cfgMgr.ToJSON() - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(newJSON, mockJSON) { - t.Error("generated json different than loaded") - } -} - -func TestSaveWithSource(t *testing.T) { - cfgMgr := setupConfigManager() - cfgMgr.Default() - cfgMgr.Source = "http://a.b.c" - newJSON, err := cfgMgr.ToJSON() - if err != nil { - t.Fatal(err) - } - - expected := []byte(`{ - "source": "http://a.b.c" -}`) - - if !bytes.Equal(newJSON, expected) { - t.Error("should have generated a source-only json") - } -} - -func TestDefaultJSONMarshalWithoutHiddenFields(t *testing.T) { - type s struct { - A string `json:"a_key"` - B string `json:"b_key" hidden:"true"` - } - cfg := s{ - A: "hi", - B: "there", - } - - expected := `{ - "a_key": "hi", - "b_key": "XXX_hidden_XXX" -}` - - res, err := DisplayJSON(&cfg) - if err != nil { - t.Fatal(err) - } - - if string(res) != expected { - t.Error("result does not match expected") - t.Error(string(res)) - } -} diff --git a/packages/networking/ipfs-cluster/config/identity.go b/packages/networking/ipfs-cluster/config/identity.go deleted file mode 100644 index cf5a09b..0000000 --- a/packages/networking/ipfs-cluster/config/identity.go +++ /dev/null @@ -1,193 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "os" - - crypto "github.com/libp2p/go-libp2p/core/crypto" - peer "github.com/libp2p/go-libp2p/core/peer" - - "github.com/kelseyhightower/envconfig" -) - -const configKey = "cluster" - -// Identity defaults -const ( - DefaultConfigCrypto = crypto.Ed25519 - DefaultConfigKeyLength = -1 -) - -// Identity represents identity of a cluster peer for communication, -// including the Consensus component. -type Identity struct { - ID peer.ID - PrivateKey crypto.PrivKey -} - -// identityJSON represents a Cluster peer identity as it will look when it is -// saved using JSON. -type identityJSON struct { - ID string `json:"id"` - PrivateKey string `json:"private_key"` -} - -// NewIdentity returns a new random identity. -func NewIdentity() (*Identity, error) { - ident := &Identity{} - err := ident.Default() - return ident, err -} - -// Default generates a random keypair for this identity. -func (ident *Identity) Default() error { - // pid and private key generation - priv, pub, err := crypto.GenerateKeyPair( - DefaultConfigCrypto, - DefaultConfigKeyLength, - ) - if err != nil { - return err - } - pid, err := peer.IDFromPublicKey(pub) - if err != nil { - return err - } - ident.ID = pid - ident.PrivateKey = priv - return nil -} - -// ConfigKey returns a human-readable string to identify -// a cluster Identity. -func (ident *Identity) ConfigKey() string { - return configKey -} - -// SaveJSON saves the JSON representation of the Identity to -// the given path. -func (ident *Identity) SaveJSON(path string) error { - logger.Info("Saving identity") - - bs, err := ident.ToJSON() - if err != nil { - return err - } - - return os.WriteFile(path, bs, 0600) -} - -// ToJSON generates a human-friendly version of Identity. -func (ident *Identity) ToJSON() (raw []byte, err error) { - jID, err := ident.toIdentityJSON() - if err != nil { - return - } - - raw, err = json.MarshalIndent(jID, "", " ") - return -} - -func (ident *Identity) toIdentityJSON() (jID *identityJSON, err error) { - jID = &identityJSON{} - - // Private Key - pkeyBytes, err := crypto.MarshalPrivateKey(ident.PrivateKey) - if err != nil { - return - } - pKey := base64.StdEncoding.EncodeToString(pkeyBytes) - - // Set all identity fields - jID.ID = ident.ID.Pretty() - jID.PrivateKey = pKey - return -} - -// LoadJSON receives a raw json-formatted identity and -// sets the Config fields from it. Note that it should be JSON -// as generated by ToJSON(). -func (ident *Identity) LoadJSON(raw []byte) error { - jID := &identityJSON{} - err := json.Unmarshal(raw, jID) - if err != nil { - logger.Error("Error unmarshaling cluster config") - return err - } - - return ident.applyIdentityJSON(jID) -} - -func (ident *Identity) applyIdentityJSON(jID *identityJSON) error { - pid, err := peer.Decode(jID.ID) - if err != nil { - err = fmt.Errorf("error decoding cluster ID: %s", err) - return err - } - ident.ID = pid - - pkb, err := base64.StdEncoding.DecodeString(jID.PrivateKey) - if err != nil { - err = fmt.Errorf("error decoding private_key: %s", err) - return err - } - pKey, err := crypto.UnmarshalPrivateKey(pkb) - if err != nil { - err = fmt.Errorf("error parsing private_key ID: %s", err) - return err - } - ident.PrivateKey = pKey - - return ident.Validate() -} - -// Validate will check that the values of this identity -// seem to be working ones. -func (ident *Identity) Validate() error { - if ident.ID == "" { - return errors.New("identity ID not set") - } - - if ident.PrivateKey == nil { - return errors.New("no identity private_key set") - } - - if !ident.ID.MatchesPrivateKey(ident.PrivateKey) { - return errors.New("identity ID does not match the private_key") - } - return nil -} - -// LoadJSONFromFile reads an Identity file from disk and parses -// it and return Identity. -func (ident *Identity) LoadJSONFromFile(path string) error { - file, err := os.ReadFile(path) - if err != nil { - logger.Error("error reading the configuration file: ", err) - return err - } - - return ident.LoadJSON(file) -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (ident *Identity) ApplyEnvVars() error { - jID, err := ident.toIdentityJSON() - if err != nil { - return err - } - err = envconfig.Process(ident.ConfigKey(), jID) - if err != nil { - return err - } - return ident.applyIdentityJSON(jID) -} - -// Equals returns true if equal to provided identity. -func (ident *Identity) Equals(i *Identity) bool { - return ident.ID == i.ID && ident.PrivateKey.Equals(i.PrivateKey) -} diff --git a/packages/networking/ipfs-cluster/config/identity_test.go b/packages/networking/ipfs-cluster/config/identity_test.go deleted file mode 100644 index 5fe3c42..0000000 --- a/packages/networking/ipfs-cluster/config/identity_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package config - -import ( - "encoding/json" - "os" - "testing" -) - -var identityTestJSON = []byte(`{ - "id": "QmUfSFm12eYCaRdypg48m8RqkXfLW7A2ZeGZb2skeHHDGA", - "private_key": "CAASqAkwggSkAgEAAoIBAQDpT16IRF6bb9tHsCbQ7M+nb2aI8sz8xyt8PoAWM42ki+SNoESIxKb4UhFxixKvtEdGxNE6aUUVc8kFk6wTStJ/X3IGiMetwkXiFiUxabUF/8A6SyvnSVDm+wFuavugpVrZikjLcfrf2xOVgnG3deQQvd/qbAv14jTwMFl+T+8d/cXBo8Mn/leLZCQun/EJEnkXP5MjgNI8XcWUE4NnH3E0ESSm6Pkm8MhMDZ2fmzNgqEyJ0GVinNgSml3Pyha3PBSj5LRczLip/ie4QkKx5OHvX2L3sNv/JIUHse5HSbjZ1c/4oGCYMVTYCykWiczrxBUOlcr8RwnZLOm4n2bCt5ZhAgMBAAECggEAVkePwfzmr7zR7tTpxeGNeXHtDUAdJm3RWwUSASPXgb5qKyXVsm5nAPX4lXDE3E1i/nzSkzNS5PgIoxNVU10cMxZs6JW0okFx7oYaAwgAddN6lxQtjD7EuGaixN6zZ1k/G6vT98iS6i3uNCAlRZ9HVBmjsOF8GtYolZqLvfZ5izEVFlLVq/BCs7Y5OrDrbGmn3XupfitVWYExV0BrHpobDjsx2fYdTZkmPpSSvXNcm4Iq2AXVQzoqAfGo7+qsuLCZtVlyTfVKQjMvE2ffzN1dQunxixOvev/fz4WSjGnRpC6QLn6Oqps9+VxQKqKuXXqUJC+U45DuvA94Of9MvZfAAQKBgQD7xmXueXRBMr2+0WftybAV024ap0cXFrCAu+KWC1SUddCfkiV7e5w+kRJx6RH1cg4cyyCL8yhHZ99Z5V0Mxa/b/usuHMadXPyX5szVI7dOGgIC9q8IijN7B7GMFAXc8+qC7kivehJzjQghpRRAqvRzjDls4gmbNPhbH1jUiU124QKBgQDtOaW5/fOEtOq0yWbDLkLdjImct6oKMLhENL6yeIKjMYgifzHb2adk7rWG3qcMrdgaFtDVfqv8UmMEkzk7bSkovMVj3SkLzMz84ii1SkSfyaCXgt/UOzDkqAUYB0cXMppYA7jxHa2OY8oEHdBgmyJXdLdzJxCp851AoTlRUSePgQKBgQCQgKgUHOUaXnMEx88sbOuBO14gMg3dNIqM+Ejt8QbURmI8k3arzqA4UK8Tbb9+7b0nzXWanS5q/TT1tWyYXgW28DIuvxlHTA01aaP6WItmagrphIelERzG6f1+9ib/T4czKmvROvDIHROjq8lZ7ERs5Pg4g+sbh2VbdzxWj49EQQKBgFEna36ZVfmMOs7mJ3WWGeHY9ira2hzqVd9fe+1qNKbHhx7mDJR9fTqWPxuIh/Vac5dZPtAKqaOEO8OQ6f9edLou+ggT3LrgsS/B3tNGOPvA6mNqrk/Yf/15TWTO+I8DDLIXc+lokbsogC+wU1z5NWJd13RZZOX/JUi63vTmonYBAoGBAIpglLCH2sPXfmguO6p8QcQcv4RjAU1c0GP4P5PNN3Wzo0ItydVd2LHJb6MdmL6ypeiwNklzPFwTeRlKTPmVxJ+QPg1ct/3tAURN/D40GYw9ojDhqmdSl4HW4d6gHS2lYzSFeU5jkG49y5nirOOoEgHy95wghkh6BfpwHujYJGw4" -}`) - -var ( - ID = "QmUfSFm12eYCaRdypg48m8RqkXfLW7A2ZeGZb2skeHHDGA" - PrivateKey = "CAASqAkwggSkAgEAAoIBAQDpT16IRF6bb9tHsCbQ7M+nb2aI8sz8xyt8PoAWM42ki+SNoESIxKb4UhFxixKvtEdGxNE6aUUVc8kFk6wTStJ/X3IGiMetwkXiFiUxabUF/8A6SyvnSVDm+wFuavugpVrZikjLcfrf2xOVgnG3deQQvd/qbAv14jTwMFl+T+8d/cXBo8Mn/leLZCQun/EJEnkXP5MjgNI8XcWUE4NnH3E0ESSm6Pkm8MhMDZ2fmzNgqEyJ0GVinNgSml3Pyha3PBSj5LRczLip/ie4QkKx5OHvX2L3sNv/JIUHse5HSbjZ1c/4oGCYMVTYCykWiczrxBUOlcr8RwnZLOm4n2bCt5ZhAgMBAAECggEAVkePwfzmr7zR7tTpxeGNeXHtDUAdJm3RWwUSASPXgb5qKyXVsm5nAPX4lXDE3E1i/nzSkzNS5PgIoxNVU10cMxZs6JW0okFx7oYaAwgAddN6lxQtjD7EuGaixN6zZ1k/G6vT98iS6i3uNCAlRZ9HVBmjsOF8GtYolZqLvfZ5izEVFlLVq/BCs7Y5OrDrbGmn3XupfitVWYExV0BrHpobDjsx2fYdTZkmPpSSvXNcm4Iq2AXVQzoqAfGo7+qsuLCZtVlyTfVKQjMvE2ffzN1dQunxixOvev/fz4WSjGnRpC6QLn6Oqps9+VxQKqKuXXqUJC+U45DuvA94Of9MvZfAAQKBgQD7xmXueXRBMr2+0WftybAV024ap0cXFrCAu+KWC1SUddCfkiV7e5w+kRJx6RH1cg4cyyCL8yhHZ99Z5V0Mxa/b/usuHMadXPyX5szVI7dOGgIC9q8IijN7B7GMFAXc8+qC7kivehJzjQghpRRAqvRzjDls4gmbNPhbH1jUiU124QKBgQDtOaW5/fOEtOq0yWbDLkLdjImct6oKMLhENL6yeIKjMYgifzHb2adk7rWG3qcMrdgaFtDVfqv8UmMEkzk7bSkovMVj3SkLzMz84ii1SkSfyaCXgt/UOzDkqAUYB0cXMppYA7jxHa2OY8oEHdBgmyJXdLdzJxCp851AoTlRUSePgQKBgQCQgKgUHOUaXnMEx88sbOuBO14gMg3dNIqM+Ejt8QbURmI8k3arzqA4UK8Tbb9+7b0nzXWanS5q/TT1tWyYXgW28DIuvxlHTA01aaP6WItmagrphIelERzG6f1+9ib/T4czKmvROvDIHROjq8lZ7ERs5Pg4g+sbh2VbdzxWj49EQQKBgFEna36ZVfmMOs7mJ3WWGeHY9ira2hzqVd9fe+1qNKbHhx7mDJR9fTqWPxuIh/Vac5dZPtAKqaOEO8OQ6f9edLou+ggT3LrgsS/B3tNGOPvA6mNqrk/Yf/15TWTO+I8DDLIXc+lokbsogC+wU1z5NWJd13RZZOX/JUi63vTmonYBAoGBAIpglLCH2sPXfmguO6p8QcQcv4RjAU1c0GP4P5PNN3Wzo0ItydVd2LHJb6MdmL6ypeiwNklzPFwTeRlKTPmVxJ+QPg1ct/3tAURN/D40GYw9ojDhqmdSl4HW4d6gHS2lYzSFeU5jkG49y5nirOOoEgHy95wghkh6BfpwHujYJGw4" -) - -func TestLoadJSON(t *testing.T) { - t.Run("basic", func(t *testing.T) { - ident := &Identity{} - err := ident.LoadJSON(identityTestJSON) - if err != nil { - t.Fatal(err) - } - }) - - loadJSON := func(t *testing.T, f func(j *identityJSON)) (*Identity, error) { - ident := &Identity{} - j := &identityJSON{} - json.Unmarshal(identityTestJSON, j) - f(j) - tst, err := json.Marshal(j) - if err != nil { - return ident, err - } - err = ident.LoadJSON(tst) - if err != nil { - return ident, err - } - return ident, nil - } - - t.Run("bad id", func(t *testing.T) { - _, err := loadJSON(t, func(j *identityJSON) { j.ID = "abc" }) - if err == nil { - t.Error("expected error decoding ID") - } - }) - - t.Run("bad private key", func(t *testing.T) { - _, err := loadJSON(t, func(j *identityJSON) { j.PrivateKey = "abc" }) - if err == nil { - t.Error("expected error parsing private key") - } - }) -} - -func TestToJSON(t *testing.T) { - ident := &Identity{} - err := ident.LoadJSON(identityTestJSON) - if err != nil { - t.Fatal(err) - } - newjson, err := ident.ToJSON() - if err != nil { - t.Fatal(err) - } - ident2 := &Identity{} - err = ident2.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } - - if !ident.Equals(ident2) { - t.Error("did not load to the same identity") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_ID", ID) - os.Setenv("CLUSTER_PRIVATEKEY", PrivateKey) - - ident, err := NewIdentity() - if err != nil { - t.Fatal(err) - } - err = ident.ApplyEnvVars() - if err != nil { - t.Fatal(err) - } - - ident2 := &Identity{} - err = ident2.LoadJSON(identityTestJSON) - if err != nil { - t.Fatal(err) - } - - if !ident.Equals(ident2) { - t.Error("failed to override identity with env var") - } -} - -func TestValidate(t *testing.T) { - ident := &Identity{} - - if ident.Validate() == nil { - t.Fatal("expected error validating") - } - - ident, err := NewIdentity() - if err != nil { - t.Fatal(err) - } - - if ident.Validate() != nil { - t.Error("expected to validate without error") - } - - ident.ID = "" - if ident.Validate() == nil { - t.Fatal("expected error validating") - } - -} diff --git a/packages/networking/ipfs-cluster/config/util.go b/packages/networking/ipfs-cluster/config/util.go deleted file mode 100644 index f91b0dc..0000000 --- a/packages/networking/ipfs-cluster/config/util.go +++ /dev/null @@ -1,220 +0,0 @@ -package config - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - "time" -) - -// Saver implements common functionality useful for ComponentConfigs -type Saver struct { - save chan struct{} - BaseDir string -} - -// NotifySave signals the SaveCh() channel in a non-blocking fashion. -func (sv *Saver) NotifySave() { - if sv.save == nil { - sv.save = make(chan struct{}, 10) - } - - // Non blocking, in case no one's listening - select { - case sv.save <- struct{}{}: - default: - logger.Warn("configuration save channel full") - } -} - -// SaveCh returns a channel which is signaled when a component wants -// to persist its configuration -func (sv *Saver) SaveCh() <-chan struct{} { - if sv.save == nil { - sv.save = make(chan struct{}) - } - return sv.save -} - -// SetBaseDir is a setter for BaseDir and implements -// part of the ComponentConfig interface. -func (sv *Saver) SetBaseDir(dir string) { - sv.BaseDir = dir -} - -// DefaultJSONMarshal produces pretty JSON with 2-space indentation -func DefaultJSONMarshal(v interface{}) ([]byte, error) { - bs, err := json.MarshalIndent(v, "", " ") - if err != nil { - return nil, err - } - return bs, nil -} - -// SetIfNotDefault sets dest to the value of src if src is not the default -// value of the type. -// dest must be a pointer. -func SetIfNotDefault(src interface{}, dest interface{}) { - switch src.(type) { - case time.Duration: - t := src.(time.Duration) - if t != 0 { - *dest.(*time.Duration) = t - } - case string: - str := src.(string) - if str != "" { - *dest.(*string) = str - } - case uint64: - n := src.(uint64) - if n != 0 { - *dest.(*uint64) = n - } - case int: - n := src.(int) - if n != 0 { - *dest.(*int) = n - } - case float64: - n := src.(float64) - if n != 0 { - *dest.(*float64) = n - } - case bool: - b := src.(bool) - if b { - *dest.(*bool) = b - } - } -} - -// DurationOpt provides a datatype to use with ParseDurations -type DurationOpt struct { - // The duration we need to parse - Duration string - // Where to store the result - Dst *time.Duration - // A variable name associated to it for helpful errors. - Name string -} - -// ParseDurations takes a time.Duration src and saves it to the given dst. -func ParseDurations(component string, args ...*DurationOpt) error { - for _, arg := range args { - if arg.Duration == "" { - // don't do anything. Let the destination field - // stay at its default. - continue - } - t, err := time.ParseDuration(arg.Duration) - if err != nil { - return fmt.Errorf( - "error parsing %s.%s: %s", - component, - arg.Name, - err, - ) - } - *arg.Dst = t - } - return nil -} - -type hiddenField struct{} - -func (hf hiddenField) MarshalJSON() ([]byte, error) { - return []byte(`"XXX_hidden_XXX"`), nil -} -func (hf hiddenField) UnmarshalJSON(b []byte) error { return nil } - -// DisplayJSON takes pointer to a JSON-friendly configuration struct and -// returns the JSON-encoded representation of it filtering out any struct -// fields marked with the tag `hidden:"true"`, but keeping fields marked -// with `"json:omitempty"`. -func DisplayJSON(cfg interface{}) ([]byte, error) { - cfg = reflect.Indirect(reflect.ValueOf(cfg)).Interface() - origStructT := reflect.TypeOf(cfg) - if origStructT.Kind() != reflect.Struct { - panic("the given argument should be a struct") - } - - hiddenFieldT := reflect.TypeOf(hiddenField{}) - - // create a new struct type with same fields - // but setting hidden fields as hidden. - finalStructFields := []reflect.StructField{} - for i := 0; i < origStructT.NumField(); i++ { - f := origStructT.Field(i) - hidden := f.Tag.Get("hidden") == "true" - if f.PkgPath != "" { // skip unexported - continue - } - if hidden { - f.Type = hiddenFieldT - } - - // remove omitempty from tag, ignore other tags except json - var jsonTags []string - for _, s := range strings.Split(f.Tag.Get("json"), ",") { - if s != "omitempty" { - jsonTags = append(jsonTags, s) - } - } - f.Tag = reflect.StructTag(fmt.Sprintf("json:\"%s\"", strings.Join(jsonTags, ","))) - - finalStructFields = append(finalStructFields, f) - } - - // Parse the original JSON into the new - // struct and re-convert it to JSON. - finalStructT := reflect.StructOf(finalStructFields) - finalValue := reflect.New(finalStructT) - data := finalValue.Interface() - origJSON, err := json.Marshal(cfg) - if err != nil { - return nil, err - } - err = json.Unmarshal(origJSON, data) - if err != nil { - return nil, err - } - return DefaultJSONMarshal(data) -} - -// Strings is a helper type that (un)marshals a single string to/from a single -// JSON string and a slice of strings to/from a JSON array of strings. -type Strings []string - -// UnmarshalJSON conforms to the json.Unmarshaler interface. -func (o *Strings) UnmarshalJSON(data []byte) error { - if data[0] == '[' { - return json.Unmarshal(data, (*[]string)(o)) - } - var value string - if err := json.Unmarshal(data, &value); err != nil { - return err - } - if len(value) == 0 { - *o = []string{} - } else { - *o = []string{value} - } - return nil -} - -// MarshalJSON conforms to the json.Marshaler interface. -func (o Strings) MarshalJSON() ([]byte, error) { - switch len(o) { - case 0: - return json.Marshal(nil) - case 1: - return json.Marshal(o[0]) - default: - return json.Marshal([]string(o)) - } -} - -var _ json.Unmarshaler = (*Strings)(nil) -var _ json.Marshaler = (*Strings)(nil) diff --git a/packages/networking/ipfs-cluster/config_test.go b/packages/networking/ipfs-cluster/config_test.go deleted file mode 100644 index c39871b..0000000 --- a/packages/networking/ipfs-cluster/config_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package ipfscluster - -import ( - "github.com/ipfs-cluster/ipfs-cluster/allocator/balanced" - "github.com/ipfs-cluster/ipfs-cluster/api/ipfsproxy" - "github.com/ipfs-cluster/ipfs-cluster/api/rest" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/consensus/crdt" - "github.com/ipfs-cluster/ipfs-cluster/consensus/raft" - "github.com/ipfs-cluster/ipfs-cluster/datastore/badger" - "github.com/ipfs-cluster/ipfs-cluster/datastore/leveldb" - "github.com/ipfs-cluster/ipfs-cluster/informer/disk" - "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp" - "github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" -) - -var testingClusterSecret, _ = DecodeClusterSecret("2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed") - -var testingIdentity = []byte(`{ - "id": "12D3KooWQiK1sYbGNnD9XtWF1sP95cawwwNy3d2WUwtP71McwUfZ", - "private_key": "CAESQJZ0wHQyoWGizG7eSATrDtTVlyyr99O8726jIu1lf2D+3VJBBAu6HXPRkbdNINBWlPMn+PK3bO6EgGGuaou8bKg=" -}`) - -var testingClusterCfg = []byte(`{ - "secret": "2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed", - "leave_on_shutdown": false, - "listen_multiaddress": "/ip4/127.0.0.1/tcp/10000", - "connection_manager": { - "high_water": 400, - "low_water": 200, - "grace_period": "2m0s" - }, - "state_sync_interval": "1m0s", - "pin_recover_interval": "1m0s", - "replication_factor": -1, - "monitor_ping_interval": "250ms", - "peer_watch_interval": "1s", - "pin_only_on_trusted_peers": true, - "disable_repinning": false, - "mdns_interval": "0s" -}`) - -var testingRaftCfg = []byte(`{ - "data_folder": "raftFolderFromTests", - "wait_for_leader_timeout": "5s", - "commit_retries": 2, - "commit_retry_delay": "50ms", - "backups_rotate": 2, - "network_timeout": "5s", - "heartbeat_timeout": "700ms", - "election_timeout": "1s", - "commit_timeout": "250ms", - "max_append_entries": 256, - "trailing_logs": 10240, - "snapshot_interval": "2m0s", - "snapshot_threshold": 8192, - "leader_lease_timeout": "500ms" -}`) - -var testingCrdtCfg = []byte(`{ - "cluster_name": "crdt-test", - "trusted_peers": ["*"], - "rebroadcast_interval": "250ms" -}`) - -var testingBadgerCfg = []byte(`{ - "folder": "badgerFromTests", - "gc_interval": "0m", - "gc_sleep": "0m", - "badger_options": { - "max_table_size": 1048576 - } -}`) - -var testingLevelDBCfg = []byte(`{ - "folder": "leveldbFromTests", - "leveldb_options": { - } -}`) - -var testingAPICfg = []byte(`{ - "http_listen_multiaddress": "/ip4/127.0.0.1/tcp/10002", - "read_timeout": "0", - "read_header_timeout": "5s", - "write_timeout": "0", - "idle_timeout": "2m0s", - "headers": { - "Access-Control-Allow-Headers": [ - "X-Requested-With", - "Range" - ], - "Access-Control-Allow-Methods": [ - "GET" - ], - "Access-Control-Allow-Origin": [ - "*" - ] - } -}`) - -var testingProxyCfg = []byte(`{ - "listen_multiaddress": "/ip4/127.0.0.1/tcp/10001", - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "read_timeout": "0", - "read_header_timeout": "10m0s", - "write_timeout": "0", - "idle_timeout": "1m0s" -}`) - -var testingIpfsCfg = []byte(`{ - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "connect_swarms_delay": "7s", - "pin_timeout": "30s", - "unpin_timeout": "15s", - "informer_trigger_interval": 10 -}`) - -var testingTrackerCfg = []byte(` -{ - "max_pin_queue_size": 4092, - "concurrent_pins": 1 -} -`) - -var testingMonCfg = []byte(`{ - "check_interval": "800ms", - "failure_threshold": 6 -}`) - -var testingAllocBalancedCfg = []byte(`{ - "allocate_by": ["freespace"] -}`) - -var testingDiskInfCfg = []byte(`{ - "metric_ttl": "900ms" -}`) - -var testingTracerCfg = []byte(`{ - "enable_tracing": false, - "jaeger_agent_endpoint": "/ip4/0.0.0.0/udp/6831", - "sampling_prob": 1, - "service_name": "cluster-daemon" -}`) - -func testingConfigs() (*config.Identity, *Config, *rest.Config, *ipfsproxy.Config, *ipfshttp.Config, *badger.Config, *leveldb.Config, *raft.Config, *crdt.Config, *stateless.Config, *pubsubmon.Config, *balanced.Config, *disk.Config, *observations.TracingConfig) { - identity, clusterCfg, apiCfg, proxyCfg, ipfsCfg, badgerCfg, levelDBCfg, raftCfg, crdtCfg, statelesstrkrCfg, pubsubmonCfg, allocBalancedCfg, diskInfCfg, tracingCfg := testingEmptyConfigs() - identity.LoadJSON(testingIdentity) - clusterCfg.LoadJSON(testingClusterCfg) - apiCfg.LoadJSON(testingAPICfg) - proxyCfg.LoadJSON(testingProxyCfg) - ipfsCfg.LoadJSON(testingIpfsCfg) - badgerCfg.LoadJSON(testingBadgerCfg) - raftCfg.LoadJSON(testingRaftCfg) - levelDBCfg.LoadJSON(testingLevelDBCfg) - crdtCfg.LoadJSON(testingCrdtCfg) - statelesstrkrCfg.LoadJSON(testingTrackerCfg) - pubsubmonCfg.LoadJSON(testingMonCfg) - allocBalancedCfg.LoadJSON(testingAllocBalancedCfg) - diskInfCfg.LoadJSON(testingDiskInfCfg) - tracingCfg.LoadJSON(testingTracerCfg) - - return identity, clusterCfg, apiCfg, proxyCfg, ipfsCfg, badgerCfg, levelDBCfg, raftCfg, crdtCfg, statelesstrkrCfg, pubsubmonCfg, allocBalancedCfg, diskInfCfg, tracingCfg -} - -func testingEmptyConfigs() (*config.Identity, *Config, *rest.Config, *ipfsproxy.Config, *ipfshttp.Config, *badger.Config, *leveldb.Config, *raft.Config, *crdt.Config, *stateless.Config, *pubsubmon.Config, *balanced.Config, *disk.Config, *observations.TracingConfig) { - identity := &config.Identity{} - clusterCfg := &Config{} - apiCfg := rest.NewConfig() - proxyCfg := &ipfsproxy.Config{} - ipfshttpCfg := &ipfshttp.Config{} - badgerCfg := &badger.Config{} - raftCfg := &raft.Config{} - levelDBCfg := &leveldb.Config{} - crdtCfg := &crdt.Config{} - statelessCfg := &stateless.Config{} - pubsubmonCfg := &pubsubmon.Config{} - allocBalancedCfg := &balanced.Config{} - diskInfCfg := &disk.Config{} - tracingCfg := &observations.TracingConfig{} - return identity, clusterCfg, apiCfg, proxyCfg, ipfshttpCfg, badgerCfg, levelDBCfg, raftCfg, crdtCfg, statelessCfg, pubsubmonCfg, allocBalancedCfg, diskInfCfg, tracingCfg -} - -// func TestConfigDefault(t *testing.T) { -// cfg := testingEmptyConfig() -// cfg.Default() -// err := cfg.Validate() -// if err != nil { -// t.Fatal(err) -// } -// } - -// func TestConfigToJSON(t *testing.T) { -// cfg := testingConfig() -// _, err := cfg.ToJSON() -// if err != nil { -// t.Error(err) -// } -// } - -// func TestConfigToConfig(t *testing.T) { -// cfg := testingConfig() -// j, _ := cfg.ToJSON() -// cfg2 := testingEmptyConfig() -// err := cfg2.LoadJSON(j) -// if err != nil { -// t.Error(err) -// } -// } diff --git a/packages/networking/ipfs-cluster/connect_graph.go b/packages/networking/ipfs-cluster/connect_graph.go deleted file mode 100644 index 29ffcaa..0000000 --- a/packages/networking/ipfs-cluster/connect_graph.go +++ /dev/null @@ -1,129 +0,0 @@ -package ipfscluster - -import ( - "github.com/ipfs-cluster/ipfs-cluster/api" - - peer "github.com/libp2p/go-libp2p/core/peer" - - "go.opencensus.io/trace" -) - -// ConnectGraph returns a description of which cluster peers and ipfs -// daemons are connected to each other. -func (c *Cluster) ConnectGraph() (api.ConnectGraph, error) { - ctx, span := trace.StartSpan(c.ctx, "cluster/ConnectGraph") - defer span.End() - - cg := api.ConnectGraph{ - ClusterID: c.host.ID(), - IDtoPeername: make(map[string]string), - IPFSLinks: make(map[string][]peer.ID), - ClusterLinks: make(map[string][]peer.ID), - ClusterTrustLinks: make(map[string]bool), - ClustertoIPFS: make(map[string]peer.ID), - } - members, err := c.consensus.Peers(ctx) - if err != nil { - return cg, err - } - - for _, member := range members { - // one of the entries is for itself, but that shouldn't hurt - cg.ClusterTrustLinks[member.String()] = c.consensus.IsTrustedPeer(ctx, member) - } - - peers := make([][]api.ID, len(members)) - errs := make([]error, len(members)) - - for i, member := range members { - in := make(chan struct{}) - close(in) - out := make(chan api.ID, 1024) - errCh := make(chan error, 1) - go func(i int) { - defer close(errCh) - - errCh <- c.rpcClient.Stream( - ctx, - member, - "Cluster", - "Peers", - in, - out, - ) - }(i) - var ids []api.ID - for id := range out { - ids = append(ids, id) - } - peers[i] = ids - errs[i] = <-errCh - } - - for i, err := range errs { - p := members[i].String() - cg.ClusterLinks[p] = make([]peer.ID, 0) - if err != nil { // Only setting cluster connections when no error occurs - logger.Debugf("RPC error reaching cluster peer %s: %s", p, err.Error()) - continue - } - - selfConnection, pID := c.recordClusterLinks(&cg, p, peers[i]) - cg.IDtoPeername[p] = pID.Peername - // IPFS connections - if !selfConnection { - logger.Warnf("cluster peer %s not its own peer. No ipfs info ", p) - continue - } - c.recordIPFSLinks(&cg, pID) - } - - return cg, nil -} - -func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []api.ID) (bool, api.ID) { - selfConnection := false - var pID api.ID - for _, id := range peers { - if id.Error != "" { - logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID.Pretty()) - continue - } - if id.ID.String() == p { - selfConnection = true - pID = id - } else { - cg.ClusterLinks[p] = append(cg.ClusterLinks[p], id.ID) - } - } - return selfConnection, pID -} - -func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID api.ID) { - ipfsID := pID.IPFS.ID - if pID.IPFS.Error != "" { // Only setting ipfs connections when no error occurs - logger.Warnf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error) - return - } - - pid := pID.ID.String() - ipfsPid := ipfsID.String() - - if _, ok := cg.IPFSLinks[pid]; ok { - logger.Warnf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty()) - } - cg.ClustertoIPFS[pid] = ipfsID - cg.IPFSLinks[ipfsPid] = make([]peer.ID, 0) - var swarmPeers []peer.ID - err := c.rpcClient.Call( - pID.ID, - "IPFSConnector", - "SwarmPeers", - struct{}{}, - &swarmPeers, - ) - if err != nil { - return - } - cg.IPFSLinks[ipfsPid] = swarmPeers -} diff --git a/packages/networking/ipfs-cluster/consensus/crdt/config.go b/packages/networking/ipfs-cluster/consensus/crdt/config.go deleted file mode 100644 index 7742fe0..0000000 --- a/packages/networking/ipfs-cluster/consensus/crdt/config.go +++ /dev/null @@ -1,267 +0,0 @@ -package crdt - -import ( - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/config" - - peer "github.com/libp2p/go-libp2p/core/peer" - - "github.com/kelseyhightower/envconfig" -) - -var configKey = "crdt" -var envConfigKey = "cluster_crdt" - -// Default configuration values -var ( - DefaultClusterName = "ipfs-cluster" - DefaultPeersetMetric = "ping" - DefaultDatastoreNamespace = "/c" // from "/crdt" - DefaultRebroadcastInterval = time.Minute - DefaultTrustedPeers = []peer.ID{} - DefaultTrustAll = true - DefaultBatchingMaxQueueSize = 50000 - DefaultRepairInterval = time.Hour -) - -// BatchingConfig configures parameters for batching multiple pins in a single -// CRDT-put operation. -// -// MaxBatchSize will trigger a commit whenever the number of pins in the batch -// reaches the limit. -// -// MaxBatchAge will trigger a commit when the oldest update in the batch -// reaches it. Setting both values to 0 means batching is disabled. -// -// MaxQueueSize specifies how many items can be waiting to be batched before -// the LogPin/Unpin operations block. -type BatchingConfig struct { - MaxBatchSize int - MaxBatchAge time.Duration - MaxQueueSize int -} - -// Config is the configuration object for Consensus. -type Config struct { - config.Saver - - hostShutdown bool - - // The topic we wish to subscribe to - ClusterName string - - // TrustAll specifies whether we should trust all peers regardless of - // the TrustedPeers contents. - TrustAll bool - - // Any update received from a peer outside this set is ignored and not - // forwarded. Trusted peers can also access additional RPC endpoints - // for this peer that are forbidden for other peers. - TrustedPeers []peer.ID - - // Specifies whether to batch CRDT updates for increased - // performance. - Batching BatchingConfig - - // The interval before re-announcing the current state - // to the network when no activity is observed. - RebroadcastInterval time.Duration - - // The name of the metric we use to obtain the peerset (every peer - // with valid metric of this type is part of it). - PeersetMetric string - - // All keys written to the datastore will be namespaced with this prefix - DatastoreNamespace string - - // How often the underlying crdt store triggers a repair when the - // datastore is marked dirty. - RepairInterval time.Duration - - // Tracing enables propagation of contexts across binary boundaries. - Tracing bool -} - -type batchingConfigJSON struct { - MaxBatchSize int `json:"max_batch_size"` - MaxBatchAge string `json:"max_batch_age"` - MaxQueueSize int `json:"max_queue_size,omitempty"` -} - -type jsonConfig struct { - ClusterName string `json:"cluster_name"` - TrustedPeers []string `json:"trusted_peers"` - Batching batchingConfigJSON `json:"batching"` - RepairInterval string `json:"repair_interval"` - RebroadcastInterval string `json:"rebroadcast_interval,omitempty"` - - PeersetMetric string `json:"peerset_metric,omitempty"` - DatastoreNamespace string `json:"datastore_namespace,omitempty"` -} - -// ConfigKey returns the section name for this type of configuration. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Validate returns an error if the configuration has invalid values. -func (cfg *Config) Validate() error { - if cfg.ClusterName == "" { - return errors.New("crdt.cluster_name cannot be empty") - } - - if cfg.PeersetMetric == "" { - return errors.New("crdt.peerset_metric needs a name") - } - - if cfg.RebroadcastInterval <= 0 { - return errors.New("crdt.rebroadcast_interval is invalid") - } - - if cfg.Batching.MaxQueueSize <= 0 { - return errors.New("crdt.batching.max_queue_size is invalid") - } - - if cfg.RepairInterval < 0 { - return errors.New("crdt.repair_interval is invalid") - } - return nil -} - -// LoadJSON takes a raw JSON slice and sets all the configuration fields. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - return fmt.Errorf("error unmarshaling %s config", configKey) - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - config.SetIfNotDefault(jcfg.ClusterName, &cfg.ClusterName) - - // Whenever we parse JSON, TrustAll is false unless an '*' peer exists - cfg.TrustAll = false - cfg.TrustedPeers = []peer.ID{} - - for _, p := range jcfg.TrustedPeers { - if p == "*" { - cfg.TrustAll = true - cfg.TrustedPeers = []peer.ID{} - break - } - pid, err := peer.Decode(p) - if err != nil { - return fmt.Errorf("error parsing trusted peers: %s", err) - } - cfg.TrustedPeers = append(cfg.TrustedPeers, pid) - } - - cfg.Batching.MaxBatchSize = jcfg.Batching.MaxBatchSize - - config.SetIfNotDefault(jcfg.Batching.MaxQueueSize, &cfg.Batching.MaxQueueSize) - config.SetIfNotDefault(jcfg.PeersetMetric, &cfg.PeersetMetric) - config.SetIfNotDefault(jcfg.DatastoreNamespace, &cfg.DatastoreNamespace) - config.ParseDurations( - "crdt", - &config.DurationOpt{Duration: jcfg.RebroadcastInterval, Dst: &cfg.RebroadcastInterval, Name: "rebroadcast_interval"}, - &config.DurationOpt{Duration: jcfg.Batching.MaxBatchAge, Dst: &cfg.Batching.MaxBatchAge, Name: "max_batch_age"}, - &config.DurationOpt{Duration: jcfg.RepairInterval, Dst: &cfg.RepairInterval, Name: "repair_interval"}, - ) - return cfg.Validate() -} - -// ToJSON returns the JSON representation of this configuration. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - jcfg := &jsonConfig{ - ClusterName: cfg.ClusterName, - PeersetMetric: "", - RebroadcastInterval: "", - } - - if cfg.TrustAll { - jcfg.TrustedPeers = []string{"*"} - } else { - jcfg.TrustedPeers = api.PeersToStrings(cfg.TrustedPeers) - } - - jcfg.Batching.MaxBatchSize = cfg.Batching.MaxBatchSize - jcfg.Batching.MaxBatchAge = cfg.Batching.MaxBatchAge.String() - if cfg.Batching.MaxQueueSize != DefaultBatchingMaxQueueSize { - jcfg.Batching.MaxQueueSize = cfg.Batching.MaxQueueSize - // otherwise leave as 0/hidden - } - - if cfg.PeersetMetric != DefaultPeersetMetric { - jcfg.PeersetMetric = cfg.PeersetMetric - // otherwise leave empty/hidden - } - - if cfg.DatastoreNamespace != DefaultDatastoreNamespace { - jcfg.DatastoreNamespace = cfg.DatastoreNamespace - // otherwise leave empty/hidden - } - - if cfg.RebroadcastInterval != DefaultRebroadcastInterval { - jcfg.RebroadcastInterval = cfg.RebroadcastInterval.String() - } - - jcfg.RepairInterval = cfg.RepairInterval.String() - - return jcfg -} - -// Default sets the configuration fields to their default values. -func (cfg *Config) Default() error { - cfg.ClusterName = DefaultClusterName - cfg.RebroadcastInterval = DefaultRebroadcastInterval - cfg.PeersetMetric = DefaultPeersetMetric - cfg.DatastoreNamespace = DefaultDatastoreNamespace - cfg.TrustedPeers = DefaultTrustedPeers - cfg.TrustAll = DefaultTrustAll - cfg.Batching = BatchingConfig{ - MaxBatchSize: 0, - MaxBatchAge: 0, - MaxQueueSize: DefaultBatchingMaxQueueSize, - } - cfg.RepairInterval = DefaultRepairInterval - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} - -func (cfg *Config) batchingEnabled() bool { - return cfg.Batching.MaxBatchSize > 0 && - cfg.Batching.MaxBatchAge > 0 -} diff --git a/packages/networking/ipfs-cluster/consensus/crdt/config_test.go b/packages/networking/ipfs-cluster/consensus/crdt/config_test.go deleted file mode 100644 index 932b999..0000000 --- a/packages/networking/ipfs-cluster/consensus/crdt/config_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package crdt - -import ( - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "cluster_name": "test", - "trusted_peers": ["QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6"], - "batching": { - "max_batch_size": 30, - "max_batch_age": "5s", - "max_queue_size": 150 - }, - "repair_interval": "1m" -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - if cfg.TrustAll { - t.Error("TrustAll should not be enabled when peers in trusted peers") - } - - if cfg.Batching.MaxBatchSize != 30 || - cfg.Batching.MaxBatchAge != 5*time.Second || - cfg.Batching.MaxQueueSize != 150 { - t.Error("Batching options were not parsed correctly") - } - if cfg.RepairInterval != time.Minute { - t.Error("repair interval not set") - } - - cfg = &Config{} - err = cfg.LoadJSON([]byte(` -{ - "cluster_name": "test", - "trusted_peers": ["abc"] -}`)) - - if err == nil { - t.Fatal("expected error parsing trusted_peers") - } - - cfg = &Config{} - err = cfg.LoadJSON([]byte(` -{ - "cluster_name": "test", - "trusted_peers": [] -}`)) - if err != nil { - t.Fatal(err) - } - - if cfg.TrustAll { - t.Error("TrustAll is only enabled with '*'") - } - - cfg = &Config{} - err = cfg.LoadJSON([]byte(` -{ - "cluster_name": "test", - "trusted_peers": ["QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6", "*"] -}`)) - if err != nil { - t.Fatal(err) - } - if !cfg.TrustAll { - t.Error("expected TrustAll to be true") - } - - if cfg.Batching.MaxQueueSize != DefaultBatchingMaxQueueSize { - t.Error("MaxQueueSize should be default when unset") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.ClusterName = "" - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.PeersetMetric = "" - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.RebroadcastInterval = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.Batching.MaxQueueSize = -3 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.RepairInterval = -3 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_CRDT_CLUSTERNAME", "test2") - os.Setenv("CLUSTER_CRDT_BATCHING_MAXBATCHSIZE", "5") - os.Setenv("CLUSTER_CRDT_BATCHING_MAXBATCHAGE", "10s") - - cfg := &Config{} - cfg.Default() - cfg.ApplyEnvVars() - - if cfg.ClusterName != "test2" { - t.Error("failed to override cluster_name with env var") - } - - if cfg.Batching.MaxBatchSize != 5 { - t.Error("MaxBatchSize as env var does not work") - } - - if cfg.Batching.MaxBatchAge != 10*time.Second { - t.Error("MaxBatchAge as env var does not work") - } -} diff --git a/packages/networking/ipfs-cluster/consensus/crdt/consensus.go b/packages/networking/ipfs-cluster/consensus/crdt/consensus.go deleted file mode 100644 index 5d00f69..0000000 --- a/packages/networking/ipfs-cluster/consensus/crdt/consensus.go +++ /dev/null @@ -1,737 +0,0 @@ -// Package crdt implements the IPFS Cluster consensus interface using -// CRDT-datastore to replicate the cluster global state to every peer. -package crdt - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/pstoremgr" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - - ds "github.com/ipfs/go-datastore" - namespace "github.com/ipfs/go-datastore/namespace" - query "github.com/ipfs/go-datastore/query" - crdt "github.com/ipfs/go-ds-crdt" - dshelp "github.com/ipfs/go-ipfs-ds-help" - logging "github.com/ipfs/go-log/v2" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - "github.com/libp2p/go-libp2p/core/routing" - rpc "github.com/libp2p/go-libp2p-gorpc" - pubsub "github.com/libp2p/go-libp2p-pubsub" - multihash "github.com/multiformats/go-multihash" - - ipfslite "github.com/hsanjuan/ipfs-lite" - trace "go.opencensus.io/trace" -) - -var logger = logging.Logger("crdt") - -var ( - blocksNs = "b" // blockstore namespace - connMgrTag = "crdt" -) - -// Common variables for the module. -var ( - ErrNoLeader = errors.New("crdt consensus component does not provide a leader") - ErrRmPeer = errors.New("crdt consensus component cannot remove peers") - ErrMaxQueueSizeReached = errors.New("batching max_queue_size reached. Too many operations are waiting to be batched. Try increasing the max_queue_size or adjusting the batching options") -) - -// wraps pins so that they can be batched. -type batchItem struct { - ctx context.Context - isPin bool // pin or unpin - pin api.Pin - batched chan error // notify if item was sent for batching -} - -// Consensus implement ipfscluster.Consensus and provides the facility to add -// and remove pins from the Cluster shared state. It uses a CRDT-backed -// implementation of go-datastore (go-ds-crdt). -type Consensus struct { - ctx context.Context - cancel context.CancelFunc - batchingCtx context.Context - batchingCancel context.CancelFunc - - config *Config - - trustedPeers sync.Map - - host host.Host - peerManager *pstoremgr.Manager - - store ds.Datastore - namespace ds.Key - - state state.State - batchingState state.BatchingState - crdt *crdt.Datastore - ipfs *ipfslite.Peer - - dht routing.Routing - pubsub *pubsub.PubSub - - rpcClient *rpc.Client - rpcReady chan struct{} - stateReady chan struct{} - readyCh chan struct{} - - sendToBatchCh chan batchItem - batchItemCh chan batchItem - batchingDone chan struct{} - - shutdownLock sync.RWMutex - shutdown bool -} - -// New creates a new crdt Consensus component. The given PubSub will be used to -// broadcast new heads. The given thread-safe datastore will be used to persist -// data and all will be prefixed with cfg.DatastoreNamespace. -func New( - host host.Host, - dht routing.Routing, - pubsub *pubsub.PubSub, - cfg *Config, - store ds.Datastore, -) (*Consensus, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - ctx, cancel := context.WithCancel(context.Background()) - batchingCtx, batchingCancel := context.WithCancel(ctx) - - var blocksDatastore ds.Batching - ns := ds.NewKey(cfg.DatastoreNamespace) - blocksDatastore = namespace.Wrap(store, ns.ChildString(blocksNs)) - - ipfs, err := ipfslite.New( - ctx, - blocksDatastore, - host, - dht, - &ipfslite.Config{ - Offline: false, - }, - ) - if err != nil { - logger.Errorf("error creating ipfs-lite: %s", err) - cancel() - batchingCancel() - return nil, err - } - - css := &Consensus{ - ctx: ctx, - cancel: cancel, - batchingCtx: batchingCtx, - batchingCancel: batchingCancel, - config: cfg, - host: host, - peerManager: pstoremgr.New(ctx, host, ""), - dht: dht, - store: store, - ipfs: ipfs, - namespace: ns, - pubsub: pubsub, - rpcReady: make(chan struct{}, 1), - readyCh: make(chan struct{}, 1), - stateReady: make(chan struct{}, 1), - sendToBatchCh: make(chan batchItem), - batchItemCh: make(chan batchItem, cfg.Batching.MaxQueueSize), - batchingDone: make(chan struct{}), - } - - go css.setup() - return css, nil -} - -func (css *Consensus) setup() { - select { - case <-css.ctx.Done(): - return - case <-css.rpcReady: - } - - // Set up a fast-lookup trusted peers cache. - // Protect these peers in the ConnMgr - for _, p := range css.config.TrustedPeers { - css.Trust(css.ctx, p) - } - - // Hash the cluster name and produce the topic name from there - // as a way to avoid pubsub topic collisions with other - // pubsub applications potentially when both potentially use - // simple names like "test". - topicName := css.config.ClusterName - topicHash, err := multihash.Sum([]byte(css.config.ClusterName), multihash.MD5, -1) - if err != nil { - logger.Errorf("error hashing topic: %s", err) - } else { - topicName = topicHash.B58String() - } - - // Validate pubsub messages for our topic (only accept - // from trusted sources) - err = css.pubsub.RegisterTopicValidator( - topicName, - func(ctx context.Context, _ peer.ID, msg *pubsub.Message) bool { - signer := msg.GetFrom() - trusted := css.IsTrustedPeer(ctx, signer) - if !trusted { - logger.Debug("discarded pubsub message from non trusted source %s ", signer) - } - return trusted - }, - ) - if err != nil { - logger.Errorf("error registering topic validator: %s", err) - } - - broadcaster, err := crdt.NewPubSubBroadcaster( - css.ctx, - css.pubsub, - topicName, // subscription name - ) - if err != nil { - logger.Errorf("error creating broadcaster: %s", err) - return - } - - opts := crdt.DefaultOptions() - opts.RebroadcastInterval = css.config.RebroadcastInterval - opts.DAGSyncerTimeout = 2 * time.Minute - opts.Logger = logger - opts.RepairInterval = css.config.RepairInterval - opts.MultiHeadProcessing = false - opts.NumWorkers = 50 - opts.PutHook = func(k ds.Key, v []byte) { - ctx, span := trace.StartSpan(css.ctx, "crdt/PutHook") - defer span.End() - - pin := api.Pin{} - err := pin.ProtoUnmarshal(v) - if err != nil { - logger.Error(err) - return - } - - // TODO: tracing for this context - err = css.rpcClient.CallContext( - ctx, - "", - "PinTracker", - "Track", - pin, - &struct{}{}, - ) - if err != nil { - logger.Error(err) - } - logger.Infof("new pin added: %s", pin.Cid) - } - opts.DeleteHook = func(k ds.Key) { - ctx, span := trace.StartSpan(css.ctx, "crdt/DeleteHook") - defer span.End() - - kb, err := dshelp.BinaryFromDsKey(k) - if err != nil { - logger.Error(err, k) - return - } - c, err := api.CastCid(kb) - if err != nil { - logger.Error(err, k) - return - } - - pin := api.PinCid(c) - - err = css.rpcClient.CallContext( - ctx, - "", - "PinTracker", - "Untrack", - pin, - &struct{}{}, - ) - if err != nil { - logger.Error(err) - } - logger.Infof("pin removed: %s", c) - } - - crdt, err := crdt.New( - css.store, - css.namespace, - css.ipfs, - broadcaster, - opts, - ) - if err != nil { - logger.Error(err) - return - } - - css.crdt = crdt - - clusterState, err := dsstate.New( - css.ctx, - css.crdt, - // unsure if we should set something else but crdt is already - // namespaced and this would only namespace the keys, which only - // complicates things. - "", - dsstate.DefaultHandle(), - ) - if err != nil { - logger.Errorf("error creating cluster state datastore: %s", err) - return - } - css.state = clusterState - - batchingState, err := dsstate.NewBatching( - css.ctx, - css.crdt, - "", - dsstate.DefaultHandle(), - ) - if err != nil { - logger.Errorf("error creating cluster state batching datastore: %s", err) - return - } - css.batchingState = batchingState - - if css.config.TrustAll { - logger.Info("'trust all' mode enabled. Any peer in the cluster can modify the pinset.") - } - - // launch batching workers - if css.config.batchingEnabled() { - logger.Infof("'crdt batching' enabled: %d items / %s", - css.config.Batching.MaxBatchSize, - css.config.Batching.MaxBatchAge.String(), - ) - go css.sendToBatchWorker() - go css.batchWorker() - } - - // notifies State() it is safe to return - close(css.stateReady) - css.readyCh <- struct{}{} -} - -// Shutdown closes this component, canceling the pubsub subscription and -// closing the datastore. -func (css *Consensus) Shutdown(ctx context.Context) error { - css.shutdownLock.Lock() - defer css.shutdownLock.Unlock() - - if css.shutdown { - logger.Debug("already shutdown") - return nil - } - css.shutdown = true - - logger.Info("stopping Consensus component") - - // Cancel the batching code - css.batchingCancel() - if css.config.batchingEnabled() { - <-css.batchingDone - } - - css.cancel() - - // Only close crdt after canceling the context, otherwise - // the pubsub broadcaster stays on and locks it. - if crdt := css.crdt; crdt != nil { - crdt.Close() - } - - if css.config.hostShutdown { - css.host.Close() - } - - css.shutdown = true - close(css.rpcReady) - return nil -} - -// SetClient gives the component the ability to communicate and -// leaves it ready to use. -func (css *Consensus) SetClient(c *rpc.Client) { - css.rpcClient = c - css.rpcReady <- struct{}{} -} - -// Ready returns a channel which is signaled when the component -// is ready to use. -func (css *Consensus) Ready(ctx context.Context) <-chan struct{} { - return css.readyCh -} - -// IsTrustedPeer returns whether the given peer is taken into account -// when submitting updates to the consensus state. -func (css *Consensus) IsTrustedPeer(ctx context.Context, pid peer.ID) bool { - _, span := trace.StartSpan(ctx, "consensus/IsTrustedPeer") - defer span.End() - - if css.config.TrustAll { - return true - } - - if pid == css.host.ID() { - return true - } - - _, ok := css.trustedPeers.Load(pid) - return ok -} - -// Trust marks a peer as "trusted". It makes sure it is trusted as issuer -// for pubsub updates, it is protected in the connection manager, it -// has the highest priority when the peerstore is saved, and it's addresses -// are always remembered. -func (css *Consensus) Trust(ctx context.Context, pid peer.ID) error { - _, span := trace.StartSpan(ctx, "consensus/Trust") - defer span.End() - - css.trustedPeers.Store(pid, struct{}{}) - if conman := css.host.ConnManager(); conman != nil { - conman.Protect(pid, connMgrTag) - } - css.peerManager.SetPriority(pid, 0) - addrs := css.host.Peerstore().Addrs(pid) - css.host.Peerstore().SetAddrs(pid, addrs, peerstore.PermanentAddrTTL) - return nil -} - -// Distrust removes a peer from the "trusted" set. -func (css *Consensus) Distrust(ctx context.Context, pid peer.ID) error { - _, span := trace.StartSpan(ctx, "consensus/Distrust") - defer span.End() - - css.trustedPeers.Delete(pid) - return nil -} - -// LogPin adds a new pin to the shared state. -func (css *Consensus) LogPin(ctx context.Context, pin api.Pin) error { - ctx, span := trace.StartSpan(ctx, "consensus/LogPin") - defer span.End() - - if css.config.batchingEnabled() { - batched := make(chan error) - css.sendToBatchCh <- batchItem{ - ctx: ctx, - isPin: true, - pin: pin, - batched: batched, - } - return <-batched - } - - return css.state.Add(ctx, pin) -} - -// LogUnpin removes a pin from the shared state. -func (css *Consensus) LogUnpin(ctx context.Context, pin api.Pin) error { - ctx, span := trace.StartSpan(ctx, "consensus/LogUnpin") - defer span.End() - - if css.config.batchingEnabled() { - batched := make(chan error) - css.sendToBatchCh <- batchItem{ - ctx: ctx, - isPin: false, - pin: pin, - batched: batched, - } - return <-batched - } - - return css.state.Rm(ctx, pin.Cid) -} - -func (css *Consensus) sendToBatchWorker() { - for { - select { - case <-css.batchingCtx.Done(): - close(css.batchItemCh) - // This will stay here forever to catch any pins sent - // while shutting down. - for bi := range css.sendToBatchCh { - bi.batched <- errors.New("shutting down. Pin could not be batched") - close(bi.batched) - } - - return - case bi := <-css.sendToBatchCh: - select { - case css.batchItemCh <- bi: - close(bi.batched) // no error - default: // queue is full - err := fmt.Errorf("error batching item: %w", ErrMaxQueueSizeReached) - logger.Error(err) - bi.batched <- err - close(bi.batched) - } - } - } -} - -// Launched in setup as a goroutine. -func (css *Consensus) batchWorker() { - defer close(css.batchingDone) - - maxSize := css.config.Batching.MaxBatchSize - maxAge := css.config.Batching.MaxBatchAge - batchCurSize := 0 - // Create the timer but stop it. It will reset when - // items start arriving. - batchTimer := time.NewTimer(maxAge) - if !batchTimer.Stop() { - <-batchTimer.C - } - - // Add/Rm from state - addToBatch := func(bi batchItem) error { - var err error - if bi.isPin { - err = css.batchingState.Add(bi.ctx, bi.pin) - } else { - err = css.batchingState.Rm(bi.ctx, bi.pin.Cid) - } - if err != nil { - logger.Errorf("error batching: %s (%s, isPin: %s)", err, bi.pin.Cid, bi.isPin) - } - return err - } - - for { - select { - case <-css.batchingCtx.Done(): - // Drain batchItemCh for missing things to be batched - for batchItem := range css.batchItemCh { - err := addToBatch(batchItem) - if err != nil { - continue - } - batchCurSize++ - } - if err := css.batchingState.Commit(css.ctx); err != nil { - logger.Errorf("error committing batch during shutdown: %s", err) - } - logger.Infof("batch commit (shutdown): %d items", batchCurSize) - - return - case batchItem := <-css.batchItemCh: - // First item in batch. Start the timer - if batchCurSize == 0 { - batchTimer.Reset(maxAge) - } - - err := addToBatch(batchItem) - if err != nil { - continue - } - - batchCurSize++ - - if batchCurSize < maxSize { - continue - } - - if err := css.batchingState.Commit(css.ctx); err != nil { - logger.Errorf("error committing batch after reaching max size: %s", err) - continue - } - logger.Infof("batch commit (size): %d items", maxSize) - - // Stop timer and commit. Leave ready to reset on next - // item. - if !batchTimer.Stop() { - <-batchTimer.C - } - batchCurSize = 0 - - case <-batchTimer.C: - // Commit - if err := css.batchingState.Commit(css.ctx); err != nil { - logger.Errorf("error committing batch after reaching max age: %s", err) - continue - } - logger.Infof("batch commit (max age): %d items", batchCurSize) - // timer is expired at this point, it will have to be - // reset. - batchCurSize = 0 - } - } -} - -// Peers returns the current known peerset. It uses -// the monitor component and considers every peer with -// valid known metrics a member. -func (css *Consensus) Peers(ctx context.Context) ([]peer.ID, error) { - ctx, span := trace.StartSpan(ctx, "consensus/Peers") - defer span.End() - - var metrics []api.Metric - - err := css.rpcClient.CallContext( - ctx, - "", - "PeerMonitor", - "LatestMetrics", - css.config.PeersetMetric, - &metrics, - ) - if err != nil { - return nil, err - } - - var peers []peer.ID - - selfIncluded := false - for _, m := range metrics { - peers = append(peers, m.Peer) - if m.Peer == css.host.ID() { - selfIncluded = true - } - } - - // Always include self - if !selfIncluded { - peers = append(peers, css.host.ID()) - } - - sort.Sort(peer.IDSlice(peers)) - - return peers, nil -} - -// WaitForSync is a no-op as it is not necessary to be fully synced for the -// component to be usable. -func (css *Consensus) WaitForSync(ctx context.Context) error { return nil } - -// AddPeer is a no-op as we do not need to do peerset management with -// Merkle-CRDTs. Therefore adding a peer to the peerset means doing nothing. -func (css *Consensus) AddPeer(ctx context.Context, pid peer.ID) error { - return nil -} - -// RmPeer is a no-op which always errors, as, since we do not do peerset -// management, we also have no ability to remove a peer from it. -func (css *Consensus) RmPeer(ctx context.Context, pid peer.ID) error { - return ErrRmPeer -} - -// State returns the cluster shared state. It will block until the consensus -// component is ready, shutdown or the given context has been canceled. -func (css *Consensus) State(ctx context.Context) (state.ReadOnly, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-css.ctx.Done(): - return nil, css.ctx.Err() - case <-css.stateReady: - if css.config.batchingEnabled() { - return css.batchingState, nil - } - return css.state, nil - } -} - -// Clean deletes all crdt-consensus datas from the datastore. -func (css *Consensus) Clean(ctx context.Context) error { - return Clean(ctx, css.config, css.store) -} - -// Clean deletes all crdt-consensus datas from the given datastore. -func Clean(ctx context.Context, cfg *Config, store ds.Datastore) error { - logger.Info("cleaning all CRDT data from datastore") - q := query.Query{ - Prefix: cfg.DatastoreNamespace, - KeysOnly: true, - } - - results, err := store.Query(ctx, q) - if err != nil { - return err - } - defer results.Close() - - for r := range results.Next() { - if r.Error != nil { - return err - } - k := ds.NewKey(r.Key) - err := store.Delete(ctx, k) - if err != nil { - // do not die, continue cleaning - logger.Error(err) - } - } - return nil -} - -// Leader returns ErrNoLeader. -func (css *Consensus) Leader(ctx context.Context) (peer.ID, error) { - return "", ErrNoLeader -} - -// OfflineState returns an offline, batching state using the given -// datastore. This allows to inspect and modify the shared state in offline -// mode. -func OfflineState(cfg *Config, store ds.Datastore) (state.BatchingState, error) { - batching, ok := store.(ds.Batching) - if !ok { - return nil, errors.New("must provide a Batching datastore") - } - opts := crdt.DefaultOptions() - opts.Logger = logger - - var blocksDatastore ds.Batching = namespace.Wrap( - batching, - ds.NewKey(cfg.DatastoreNamespace).ChildString(blocksNs), - ) - - ipfs, err := ipfslite.New( - context.Background(), - blocksDatastore, - nil, - nil, - &ipfslite.Config{ - Offline: true, - }, - ) - - if err != nil { - return nil, err - } - - crdt, err := crdt.New( - batching, - ds.NewKey(cfg.DatastoreNamespace), - ipfs, - nil, - opts, - ) - if err != nil { - return nil, err - } - return dsstate.NewBatching(context.Background(), crdt, "", dsstate.DefaultHandle()) -} diff --git a/packages/networking/ipfs-cluster/consensus/crdt/consensus_test.go b/packages/networking/ipfs-cluster/consensus/crdt/consensus_test.go deleted file mode 100644 index 389b9a8..0000000 --- a/packages/networking/ipfs-cluster/consensus/crdt/consensus_test.go +++ /dev/null @@ -1,496 +0,0 @@ -package crdt - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/test" - - ipns "github.com/ipfs/go-ipns" - libp2p "github.com/libp2p/go-libp2p" - host "github.com/libp2p/go-libp2p/core/host" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - dht "github.com/libp2p/go-libp2p-kad-dht" - dual "github.com/libp2p/go-libp2p-kad-dht/dual" - pubsub "github.com/libp2p/go-libp2p-pubsub" - record "github.com/libp2p/go-libp2p-record" - routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" -) - -func makeTestingHost(t *testing.T) (host.Host, *pubsub.PubSub, *dual.DHT) { - ctx := context.Background() - h, err := libp2p.New( - libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), - ) - if err != nil { - t.Fatal(err) - } - - psub, err := pubsub.NewGossipSub( - ctx, - h, - pubsub.WithMessageSigning(true), - pubsub.WithStrictSignatureVerification(true), - ) - if err != nil { - h.Close() - t.Fatal(err) - } - - idht, err := dual.New(ctx, h, - dual.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), - dual.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})), - dual.DHTOption(dht.Concurrency(10)), - dual.DHTOption(dht.RoutingTableRefreshPeriod(200*time.Millisecond)), - dual.DHTOption(dht.RoutingTableRefreshQueryTimeout(100*time.Millisecond)), - ) - if err != nil { - h.Close() - t.Fatal(err) - } - - rHost := routedhost.Wrap(h, idht) - return rHost, psub, idht -} - -func testingConsensus(t *testing.T, idn int) *Consensus { - cfg := &Config{} - cfg.Default() - - return testingConsensusWithCfg(t, idn, cfg) -} - -func testingConsensusWithCfg(t *testing.T, idn int, cfg *Config) *Consensus { - h, psub, dht := makeTestingHost(t) - - cfg.DatastoreNamespace = fmt.Sprintf("crdttest-%d", idn) - cfg.hostShutdown = true - - cc, err := New(h, dht, psub, cfg, inmem.New()) - if err != nil { - t.Fatal("cannot create Consensus:", err) - } - cc.SetClient(test.NewMockRPCClientWithHost(t, h)) - <-cc.Ready(context.Background()) - return cc -} - -func clean(t *testing.T, cc *Consensus) { - err := cc.Clean(context.Background()) - if err != nil { - t.Error(err) - } -} - -func testPin(c api.Cid) api.Pin { - p := api.PinCid(c) - p.ReplicationFactorMin = -1 - p.ReplicationFactorMax = -1 - return p -} - -func TestShutdownConsensus(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer clean(t, cc) - err := cc.Shutdown(ctx) - if err != nil { - t.Fatal("Consensus cannot shutdown:", err) - } - err = cc.Shutdown(ctx) // should be fine to shutdown twice - if err != nil { - t.Fatal("Consensus should be able to shutdown several times") - } -} - -func TestConsensusPin(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer clean(t, cc) - defer cc.Shutdown(ctx) - - err := cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error(err) - } - - time.Sleep(250 * time.Millisecond) - st, err := cc.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - out := make(chan api.Pin, 10) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("the added pin should be in the state") - } -} - -func TestConsensusUnpin(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer clean(t, cc) - defer cc.Shutdown(ctx) - - err := cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error(err) - } - - err = cc.LogUnpin(ctx, api.PinCid(test.Cid1)) - if err != nil { - t.Error(err) - } -} - -func TestConsensusUpdate(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer clean(t, cc) - defer cc.Shutdown(ctx) - - // Pin first - pin := testPin(test.Cid1) - pin.Type = api.ShardType - err := cc.LogPin(ctx, pin) - if err != nil { - t.Fatal(err) - } - time.Sleep(500 * time.Millisecond) - - // Update pin - pin.Reference = &test.Cid2 - err = cc.LogPin(ctx, pin) - if err != nil { - t.Error(err) - } - - time.Sleep(500 * time.Millisecond) - st, err := cc.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - // Channel will not block sending because plenty of space - out := make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Fatal("the added pin should be in the state") - } - if !pins[0].Reference.Equals(test.Cid2) { - t.Error("pin updated incorrectly") - } -} - -func TestConsensusAddRmPeer(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - cc2 := testingConsensus(t, 2) - defer clean(t, cc) - defer clean(t, cc) - defer cc.Shutdown(ctx) - defer cc2.Shutdown(ctx) - - cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL) - _, err := cc.host.Network().DialPeer(ctx, cc2.host.ID()) - if err != nil { - t.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - - err = cc.AddPeer(ctx, cc2.host.ID()) - if err != nil { - t.Error("could not add peer:", err) - } - - err = cc2.Trust(ctx, cc.host.ID()) - if err != nil { - t.Error("could not trust peer:", err) - } - - // Make a pin on peer1 and check it arrived to peer2 - err = cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error(err) - } - - time.Sleep(500 * time.Millisecond) - st, err := cc2.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - out := make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("the added pin should be in the state") - } - - err = cc2.RmPeer(ctx, cc.host.ID()) - if err == nil { - t.Error("crdt consensus should not remove pins") - } -} - -func TestConsensusDistrustPeer(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - cc2 := testingConsensus(t, 2) - defer clean(t, cc) - defer clean(t, cc) - defer cc.Shutdown(ctx) - defer cc2.Shutdown(ctx) - - cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL) - _, err := cc.host.Network().DialPeer(ctx, cc2.host.ID()) - if err != nil { - t.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - - err = cc2.Trust(ctx, cc.host.ID()) - if err != nil { - t.Error("could not trust peer:", err) - } - - // Make a pin on peer1 and check it arrived to peer2 - err = cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error(err) - } - - time.Sleep(250 * time.Millisecond) - - err = cc2.Distrust(ctx, cc.host.ID()) - if err != nil { - t.Error("could not distrust peer:", err) - } - - // Another pin should never get to peer2 - err = cc.LogPin(ctx, testPin(test.Cid2)) - if err != nil { - t.Error(err) - } - - // Verify we only got the first pin - st, err := cc2.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - out := make(chan api.Pin, 10) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("only first pin should be in the state") - } -} - -func TestPeers(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer clean(t, cc) - defer cc.Shutdown(ctx) - - peers, err := cc.Peers(ctx) - if err != nil { - t.Fatal(err) - } - - // 1 is ourselves and the other comes from rpc - // mock PeerMonitorLatestMetrics - if len(peers) != 2 { - t.Error("unexpected number of peers") - } -} - -func TestOfflineState(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer clean(t, cc) - defer cc.Shutdown(ctx) - - // Make pin 1 - err := cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error(err) - } - - // Make pin 2 - err = cc.LogPin(ctx, testPin(test.Cid2)) - if err != nil { - t.Error(err) - } - - err = cc.Shutdown(ctx) - if err != nil { - t.Fatal(err) - } - - offlineState, err := OfflineState(cc.config, cc.store) - if err != nil { - t.Fatal(err) - } - - out := make(chan api.Pin, 100) - err = offlineState.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 2 { - t.Error("there should be two pins in the state") - } -} - -func TestBatching(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - cfg.Batching.MaxBatchSize = 3 - cfg.Batching.MaxBatchAge = 1 * time.Second - - cc := testingConsensusWithCfg(t, 1, cfg) - defer clean(t, cc) - defer cc.Shutdown(ctx) - - st, err := cc.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - // Pin something - err = cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error(err) - } - - time.Sleep(250 * time.Millisecond) - - out := make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 0 { - t.Error("pin should not be pinned yet as it is being batched") - } - - // Trigger batch auto-commit by time - time.Sleep(time.Second) - - out = make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - pins = nil - - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("the added pin should be in the state") - } - - // Pin 4 things, and check that 3 are committed - for _, c := range []api.Cid{test.Cid2, test.Cid3, test.Cid4, test.Cid5} { - err = cc.LogPin(ctx, testPin(c)) - if err != nil { - t.Error(err) - } - } - - // Give a chance for things to persist - time.Sleep(250 * time.Millisecond) - - out = make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - pins = nil - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 4 { - t.Error("expected 4 items pinned") - } - - // wait for the last pin - time.Sleep(time.Second) - - out = make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - pins = nil - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 5 { - t.Error("expected 5 items pinned") - } -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/config.go b/packages/networking/ipfs-cluster/consensus/raft/config.go deleted file mode 100644 index 2263a16..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/config.go +++ /dev/null @@ -1,320 +0,0 @@ -package raft - -import ( - "encoding/json" - "errors" - "io" - "path/filepath" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/config" - - peer "github.com/libp2p/go-libp2p/core/peer" - - hraft "github.com/hashicorp/raft" - "github.com/kelseyhightower/envconfig" -) - -// ConfigKey is the default configuration key for holding this component's -// configuration section. -var configKey = "raft" -var envConfigKey = "cluster_raft" - -// Configuration defaults -var ( - DefaultDataSubFolder = "raft" - DefaultWaitForLeaderTimeout = 15 * time.Second - DefaultCommitRetries = 1 - DefaultNetworkTimeout = 10 * time.Second - DefaultCommitRetryDelay = 200 * time.Millisecond - DefaultBackupsRotate = 6 - DefaultDatastoreNamespace = "/r" // from "/raft" -) - -// Config allows to configure the Raft Consensus component for ipfs-cluster. -// The component's configuration section is represented by ConfigJSON. -// Config implements the ComponentConfig interface. -type Config struct { - config.Saver - - // will shutdown libp2p host on shutdown. Useful for testing - hostShutdown bool - - // A folder to store Raft's data. - DataFolder string - - // InitPeerset provides the list of initial cluster peers for new Raft - // peers (with no prior state). It is ignored when Raft was already - // initialized or when starting in staging mode. - InitPeerset []peer.ID - // LeaderTimeout specifies how long to wait for a leader before - // failing an operation. - WaitForLeaderTimeout time.Duration - // NetworkTimeout specifies how long before a Raft network - // operation is timed out - NetworkTimeout time.Duration - // CommitRetries specifies how many times we retry a failed commit until - // we give up. - CommitRetries int - // How long to wait between retries - CommitRetryDelay time.Duration - // BackupsRotate specifies the maximum number of Raft's DataFolder - // copies that we keep as backups (renaming) after cleanup. - BackupsRotate int - // Namespace to use when writing keys to the datastore - DatastoreNamespace string - - // A Hashicorp Raft's configuration object. - RaftConfig *hraft.Config - - // Tracing enables propagation of contexts across binary boundaries. - Tracing bool -} - -// ConfigJSON represents a human-friendly Config -// object which can be saved to JSON. Most configuration keys are converted -// into simple types like strings, and key names aim to be self-explanatory -// for the user. -// Check https://godoc.org/github.com/hashicorp/raft#Config for extended -// description on all Raft-specific keys. -type jsonConfig struct { - // Storage folder for snapshots, log store etc. Used by - // the Raft. - DataFolder string `json:"data_folder,omitempty"` - - // InitPeerset provides the list of initial cluster peers for new Raft - // peers (with no prior state). It is ignored when Raft was already - // initialized or when starting in staging mode. - InitPeerset []string `json:"init_peerset"` - - // How long to wait for a leader before failing - WaitForLeaderTimeout string `json:"wait_for_leader_timeout"` - - // How long to wait before timing out network operations - NetworkTimeout string `json:"network_timeout"` - - // How many retries to make upon a failed commit - CommitRetries int `json:"commit_retries"` - - // How long to wait between commit retries - CommitRetryDelay string `json:"commit_retry_delay"` - - // BackupsRotate specifies the maximum number of Raft's DataFolder - // copies that we keep as backups (renaming) after cleanup. - BackupsRotate int `json:"backups_rotate"` - - DatastoreNamespace string `json:"datastore_namespace,omitempty"` - - // HeartbeatTimeout specifies the time in follower state without - // a leader before we attempt an election. - HeartbeatTimeout string `json:"heartbeat_timeout,omitempty"` - - // ElectionTimeout specifies the time in candidate state without - // a leader before we attempt an election. - ElectionTimeout string `json:"election_timeout,omitempty"` - - // CommitTimeout controls the time without an Apply() operation - // before we heartbeat to ensure a timely commit. - CommitTimeout string `json:"commit_timeout,omitempty"` - - // MaxAppendEntries controls the maximum number of append entries - // to send at once. - MaxAppendEntries int `json:"max_append_entries,omitempty"` - - // TrailingLogs controls how many logs we leave after a snapshot. - TrailingLogs uint64 `json:"trailing_logs,omitempty"` - - // SnapshotInterval controls how often we check if we should perform - // a snapshot. - SnapshotInterval string `json:"snapshot_interval,omitempty"` - - // SnapshotThreshold controls how many outstanding logs there must be - // before we perform a snapshot. - SnapshotThreshold uint64 `json:"snapshot_threshold,omitempty"` - - // LeaderLeaseTimeout is used to control how long the "lease" lasts - // for being the leader without being able to contact a quorum - // of nodes. If we reach this interval without contact, we will - // step down as leader. - LeaderLeaseTimeout string `json:"leader_lease_timeout,omitempty"` - - // The unique ID for this server across all time. When running with - // ProtocolVersion < 3, you must set this to be the same as the network - // address of your transport. - // LocalID string `json:local_id` -} - -// ConfigKey returns a human-friendly indentifier for this Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Validate checks that this configuration has working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.RaftConfig == nil { - return errors.New("no hashicorp/raft.Config") - } - if cfg.WaitForLeaderTimeout <= 0 { - return errors.New("wait_for_leader_timeout <= 0") - } - - if cfg.NetworkTimeout <= 0 { - return errors.New("network_timeout <= 0") - } - - if cfg.CommitRetries < 0 { - return errors.New("commit_retries is invalid") - } - - if cfg.CommitRetryDelay <= 0 { - return errors.New("commit_retry_delay is invalid") - } - - if cfg.BackupsRotate <= 0 { - return errors.New("backups_rotate should be larger than 0") - } - - return hraft.ValidateConfig(cfg.RaftConfig) -} - -// LoadJSON parses a json-encoded configuration (see jsonConfig). -// The Config will have default values for all fields not explicited -// in the given json object. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling raft config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - parseDuration := func(txt string) time.Duration { - d, _ := time.ParseDuration(txt) - if txt != "" && d == 0 { - logger.Warnf("%s is not a valid duration. Default will be used", txt) - } - return d - } - - // Parse durations. We ignore errors as 0 will take Default values. - waitForLeaderTimeout := parseDuration(jcfg.WaitForLeaderTimeout) - networkTimeout := parseDuration(jcfg.NetworkTimeout) - commitRetryDelay := parseDuration(jcfg.CommitRetryDelay) - heartbeatTimeout := parseDuration(jcfg.HeartbeatTimeout) - electionTimeout := parseDuration(jcfg.ElectionTimeout) - commitTimeout := parseDuration(jcfg.CommitTimeout) - snapshotInterval := parseDuration(jcfg.SnapshotInterval) - leaderLeaseTimeout := parseDuration(jcfg.LeaderLeaseTimeout) - - // Set all values in config. For some, take defaults if they are 0. - // Set values from jcfg if they are not 0 values - - // Own values - config.SetIfNotDefault(jcfg.DataFolder, &cfg.DataFolder) - config.SetIfNotDefault(waitForLeaderTimeout, &cfg.WaitForLeaderTimeout) - config.SetIfNotDefault(networkTimeout, &cfg.NetworkTimeout) - cfg.CommitRetries = jcfg.CommitRetries - config.SetIfNotDefault(commitRetryDelay, &cfg.CommitRetryDelay) - config.SetIfNotDefault(jcfg.BackupsRotate, &cfg.BackupsRotate) - - // Raft values - config.SetIfNotDefault(heartbeatTimeout, &cfg.RaftConfig.HeartbeatTimeout) - config.SetIfNotDefault(electionTimeout, &cfg.RaftConfig.ElectionTimeout) - config.SetIfNotDefault(commitTimeout, &cfg.RaftConfig.CommitTimeout) - config.SetIfNotDefault(jcfg.MaxAppendEntries, &cfg.RaftConfig.MaxAppendEntries) - config.SetIfNotDefault(jcfg.TrailingLogs, &cfg.RaftConfig.TrailingLogs) - config.SetIfNotDefault(snapshotInterval, &cfg.RaftConfig.SnapshotInterval) - config.SetIfNotDefault(jcfg.SnapshotThreshold, &cfg.RaftConfig.SnapshotThreshold) - config.SetIfNotDefault(leaderLeaseTimeout, &cfg.RaftConfig.LeaderLeaseTimeout) - - cfg.InitPeerset = api.StringsToPeers(jcfg.InitPeerset) - return cfg.Validate() -} - -// ToJSON returns the pretty JSON representation of a Config. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - jcfg := &jsonConfig{ - DataFolder: cfg.DataFolder, - InitPeerset: api.PeersToStrings(cfg.InitPeerset), - WaitForLeaderTimeout: cfg.WaitForLeaderTimeout.String(), - NetworkTimeout: cfg.NetworkTimeout.String(), - CommitRetries: cfg.CommitRetries, - CommitRetryDelay: cfg.CommitRetryDelay.String(), - BackupsRotate: cfg.BackupsRotate, - HeartbeatTimeout: cfg.RaftConfig.HeartbeatTimeout.String(), - ElectionTimeout: cfg.RaftConfig.ElectionTimeout.String(), - CommitTimeout: cfg.RaftConfig.CommitTimeout.String(), - MaxAppendEntries: cfg.RaftConfig.MaxAppendEntries, - TrailingLogs: cfg.RaftConfig.TrailingLogs, - SnapshotInterval: cfg.RaftConfig.SnapshotInterval.String(), - SnapshotThreshold: cfg.RaftConfig.SnapshotThreshold, - LeaderLeaseTimeout: cfg.RaftConfig.LeaderLeaseTimeout.String(), - } - if cfg.DatastoreNamespace != DefaultDatastoreNamespace { - jcfg.DatastoreNamespace = cfg.DatastoreNamespace - // otherwise leave empty so it gets omitted. - } - return jcfg -} - -// Default initializes this configuration with working defaults. -func (cfg *Config) Default() error { - cfg.DataFolder = "" // empty so it gets omitted - cfg.InitPeerset = []peer.ID{} - cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout - cfg.NetworkTimeout = DefaultNetworkTimeout - cfg.CommitRetries = DefaultCommitRetries - cfg.CommitRetryDelay = DefaultCommitRetryDelay - cfg.BackupsRotate = DefaultBackupsRotate - cfg.DatastoreNamespace = DefaultDatastoreNamespace - cfg.RaftConfig = hraft.DefaultConfig() - - // These options are imposed over any Default Raft Config. - cfg.RaftConfig.ShutdownOnRemove = false - cfg.RaftConfig.LocalID = "will_be_set_automatically" - - // Set up logging - cfg.RaftConfig.LogOutput = io.Discard - cfg.RaftConfig.Logger = &hcLogToLogger{} - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// GetDataFolder returns the Raft data folder that we are using. -func (cfg *Config) GetDataFolder() string { - if cfg.DataFolder == "" { - return filepath.Join(cfg.BaseDir, DefaultDataSubFolder) - } - return cfg.DataFolder -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/config_test.go b/packages/networking/ipfs-cluster/consensus/raft/config_test.go deleted file mode 100644 index eb46f29..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/config_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package raft - -import ( - "encoding/json" - "os" - "testing" - - hraft "github.com/hashicorp/raft" -) - -var cfgJSON = []byte(` -{ - "init_peerset": [], - "wait_for_leader_timeout": "15s", - "network_timeout": "1s", - "commit_retries": 1, - "commit_retry_delay": "200ms", - "backups_rotate": 5, - "heartbeat_timeout": "1s", - "election_timeout": "1s", - "commit_timeout": "50ms", - "max_append_entries": 64, - "trailing_logs": 10240, - "snapshot_interval": "2m0s", - "snapshot_threshold": 8192, - "leader_lease_timeout": "500ms" -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.HeartbeatTimeout = "1us" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding heartbeat_timeout") - } - - json.Unmarshal(cfgJSON, j) - j.LeaderLeaseTimeout = "abc" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err != nil { - t.Fatal(err) - } - def := hraft.DefaultConfig() - if cfg.RaftConfig.LeaderLeaseTimeout != def.LeaderLeaseTimeout { - t.Error("expected default leader lease") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.RaftConfig.HeartbeatTimeout = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.RaftConfig = nil - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.CommitRetries = -1 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.WaitForLeaderTimeout = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.BackupsRotate = 0 - - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_RAFT_COMMITRETRIES", "300") - cfg := &Config{} - cfg.Default() - cfg.ApplyEnvVars() - - if cfg.CommitRetries != 300 { - t.Fatal("failed to override commit_retries with env var") - } -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/consensus.go b/packages/networking/ipfs-cluster/consensus/raft/consensus.go deleted file mode 100644 index bea88dc..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/consensus.go +++ /dev/null @@ -1,568 +0,0 @@ -// Package raft implements a Consensus component for IPFS Cluster which uses -// Raft (go-libp2p-raft). -package raft - -import ( - "context" - "errors" - "fmt" - "sort" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - - ds "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log/v2" - consensus "github.com/libp2p/go-libp2p-consensus" - rpc "github.com/libp2p/go-libp2p-gorpc" - libp2praft "github.com/libp2p/go-libp2p-raft" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - - "go.opencensus.io/tag" - "go.opencensus.io/trace" -) - -var logger = logging.Logger("raft") - -// Consensus handles the work of keeping a shared-state between -// the peers of an IPFS Cluster, as well as modifying that state and -// applying any updates in a thread-safe manner. -type Consensus struct { - ctx context.Context - cancel func() - config *Config - - host host.Host - - consensus consensus.OpLogConsensus - actor consensus.Actor - baseOp *LogOp - raft *raftWrapper - - rpcClient *rpc.Client - rpcReady chan struct{} - readyCh chan struct{} - - shutdownLock sync.RWMutex - shutdown bool -} - -// NewConsensus builds a new ClusterConsensus component using Raft. -// -// Raft saves state snapshots regularly and persists log data in a bolt -// datastore. Therefore, unless memory usage is a concern, it is recommended -// to use an in-memory go-datastore as store parameter. -// -// The staging parameter controls if the Raft peer should start in -// staging mode (used when joining a new Raft peerset with other peers). -// -// The store parameter should be a thread-safe datastore. -func NewConsensus( - host host.Host, - cfg *Config, - store ds.Datastore, - staging bool, // this peer must not be bootstrapped if no state exists -) (*Consensus, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - ctx, cancel := context.WithCancel(context.Background()) - - logger.Debug("starting Consensus and waiting for a leader...") - baseOp := &LogOp{tracing: cfg.Tracing} - state, err := dsstate.New( - ctx, - store, - cfg.DatastoreNamespace, - dsstate.DefaultHandle(), - ) - if err != nil { - cancel() - return nil, err - } - consensus := libp2praft.NewOpLog(state, baseOp) - raft, err := newRaftWrapper(host, cfg, consensus.FSM(), staging) - if err != nil { - logger.Error("error creating raft: ", err) - cancel() - return nil, err - } - actor := libp2praft.NewActor(raft.raft) - consensus.SetActor(actor) - - cc := &Consensus{ - ctx: ctx, - cancel: cancel, - config: cfg, - host: host, - consensus: consensus, - actor: actor, - baseOp: baseOp, - raft: raft, - rpcReady: make(chan struct{}, 1), - readyCh: make(chan struct{}, 1), - } - - baseOp.consensus = cc - - go cc.finishBootstrap() - return cc, nil -} - -// WaitForSync waits for a leader and for the state to be up to date, then returns. -func (cc *Consensus) WaitForSync(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "consensus/WaitForSync") - defer span.End() - - leaderCtx, cancel := context.WithTimeout( - ctx, - cc.config.WaitForLeaderTimeout) - defer cancel() - - // 1 - wait for leader - // 2 - wait until we are a Voter - // 3 - wait until last index is applied - - // From raft docs: - - // once a staging server receives enough log entries to be sufficiently - // caught up to the leader's log, the leader will invoke a membership - // change to change the Staging server to a Voter - - // Thus, waiting to be a Voter is a guarantee that we have a reasonable - // up to date state. Otherwise, we might return too early (see - // https://github.com/ipfs-cluster/ipfs-cluster/issues/378) - - _, err := cc.raft.WaitForLeader(leaderCtx) - if err != nil { - return errors.New("error waiting for leader: " + err.Error()) - } - - err = cc.raft.WaitForVoter(ctx) - if err != nil { - return errors.New("error waiting to become a Voter: " + err.Error()) - } - - err = cc.raft.WaitForUpdates(ctx) - if err != nil { - return errors.New("error waiting for consensus updates: " + err.Error()) - } - return nil -} - -// waits until there is a consensus leader and syncs the state -// to the tracker. If errors happen, this will return and never -// signal the component as Ready. -func (cc *Consensus) finishBootstrap() { - // wait until we have RPC to perform any actions. - select { - case <-cc.ctx.Done(): - return - case <-cc.rpcReady: - } - - // Sometimes bootstrap is a no-op. It only applies when - // no state exists and staging=false. - _, err := cc.raft.Bootstrap() - if err != nil { - return - } - - err = cc.WaitForSync(cc.ctx) - if err != nil { - return - } - logger.Debug("Raft state is now up to date") - logger.Debug("consensus ready") - cc.readyCh <- struct{}{} -} - -// Shutdown stops the component so it will not process any -// more updates. The underlying consensus is permanently -// shutdown, along with the libp2p transport. -func (cc *Consensus) Shutdown(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "consensus/Shutdown") - defer span.End() - - cc.shutdownLock.Lock() - defer cc.shutdownLock.Unlock() - - if cc.shutdown { - logger.Debug("already shutdown") - return nil - } - - logger.Info("stopping Consensus component") - - // Raft Shutdown - err := cc.raft.Shutdown(ctx) - if err != nil { - logger.Error(err) - } - - if cc.config.hostShutdown { - cc.host.Close() - } - - cc.shutdown = true - cc.cancel() - close(cc.rpcReady) - return nil -} - -// SetClient makes the component ready to perform RPC requets -func (cc *Consensus) SetClient(c *rpc.Client) { - cc.rpcClient = c - cc.rpcReady <- struct{}{} -} - -// Ready returns a channel which is signaled when the Consensus -// algorithm has finished bootstrapping and is ready to use -func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} { - _, span := trace.StartSpan(ctx, "consensus/Ready") - defer span.End() - - return cc.readyCh -} - -// IsTrustedPeer returns true. In Raft we trust all peers. -func (cc *Consensus) IsTrustedPeer(ctx context.Context, p peer.ID) bool { - return true -} - -// Trust is a no-op. -func (cc *Consensus) Trust(ctx context.Context, pid peer.ID) error { return nil } - -// Distrust is a no-op. -func (cc *Consensus) Distrust(ctx context.Context, pid peer.ID) error { return nil } - -func (cc *Consensus) op(ctx context.Context, pin api.Pin, t LogOpType) *LogOp { - return &LogOp{ - Cid: pin, - Type: t, - } -} - -// returns true if the operation was redirected to the leader -// note that if the leader just dissappeared, the rpc call will -// fail because we haven't heard that it's gone. -func (cc *Consensus) redirectToLeader(method string, arg interface{}) (bool, error) { - ctx, span := trace.StartSpan(cc.ctx, "consensus/redirectToLeader") - defer span.End() - - var finalErr error - - // Retry redirects - for i := 0; i <= cc.config.CommitRetries; i++ { - logger.Debugf("redirect try %d", i) - leader, err := cc.Leader(ctx) - - // No leader, wait for one - if err != nil { - logger.Warn("there seems to be no leader. Waiting for one") - rctx, cancel := context.WithTimeout( - ctx, - cc.config.WaitForLeaderTimeout, - ) - defer cancel() - pidstr, err := cc.raft.WaitForLeader(rctx) - - // means we timed out waiting for a leader - // we don't retry in this case - if err != nil { - return false, fmt.Errorf("timed out waiting for leader: %s", err) - } - leader, err = peer.Decode(pidstr) - if err != nil { - return false, err - } - } - - // We are the leader. Do not redirect - if leader == cc.host.ID() { - return false, nil - } - - logger.Debugf("redirecting %s to leader: %s", method, leader.Pretty()) - finalErr = cc.rpcClient.CallContext( - ctx, - leader, - "Consensus", - method, - arg, - &struct{}{}, - ) - if finalErr != nil { - logger.Errorf("retrying to redirect request to leader: %s", finalErr) - time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout) - continue - } - break - } - - // We tried to redirect, but something happened - return true, finalErr -} - -// commit submits a cc.consensus commit. It retries upon failures. -func (cc *Consensus) commit(ctx context.Context, op *LogOp, rpcOp string, redirectArg interface{}) error { - ctx, span := trace.StartSpan(ctx, "consensus/commit") - defer span.End() - - if cc.config.Tracing { - // required to cross the serialized boundary - op.SpanCtx = span.SpanContext() - tagmap := tag.FromContext(ctx) - if tagmap != nil { - op.TagCtx = tag.Encode(tagmap) - } - } - - var finalErr error - for i := 0; i <= cc.config.CommitRetries; i++ { - logger.Debugf("attempt #%d: committing %+v", i, op) - - // this means we are retrying - if finalErr != nil { - logger.Errorf("retrying upon failed commit (retry %d): %s ", - i, finalErr) - } - - // try to send it to the leader - // redirectToLeader has it's own retry loop. If this fails - // we're done here. - ok, err := cc.redirectToLeader(rpcOp, redirectArg) - if err != nil || ok { - return err - } - - // Being here means we are the LEADER. We can commit. - - // now commit the changes to our state - cc.shutdownLock.RLock() // do not shut down while committing - _, finalErr = cc.consensus.CommitOp(op) - cc.shutdownLock.RUnlock() - if finalErr != nil { - goto RETRY - } - - switch op.Type { - case LogOpPin: - logger.Infof("pin committed to global state: %s", op.Cid.Cid) - case LogOpUnpin: - logger.Infof("unpin committed to global state: %s", op.Cid.Cid) - } - break - - RETRY: - time.Sleep(cc.config.CommitRetryDelay) - } - return finalErr -} - -// LogPin submits a Cid to the shared state of the cluster. It will forward -// the operation to the leader if this is not it. -func (cc *Consensus) LogPin(ctx context.Context, pin api.Pin) error { - ctx, span := trace.StartSpan(ctx, "consensus/LogPin") - defer span.End() - - op := cc.op(ctx, pin, LogOpPin) - err := cc.commit(ctx, op, "LogPin", pin) - if err != nil { - return err - } - return nil -} - -// LogUnpin removes a Cid from the shared state of the cluster. -func (cc *Consensus) LogUnpin(ctx context.Context, pin api.Pin) error { - ctx, span := trace.StartSpan(ctx, "consensus/LogUnpin") - defer span.End() - - op := cc.op(ctx, pin, LogOpUnpin) - err := cc.commit(ctx, op, "LogUnpin", pin) - if err != nil { - return err - } - return nil -} - -// AddPeer adds a new peer to participate in this consensus. It will -// forward the operation to the leader if this is not it. -func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error { - ctx, span := trace.StartSpan(ctx, "consensus/AddPeer") - defer span.End() - - var finalErr error - for i := 0; i <= cc.config.CommitRetries; i++ { - logger.Debugf("attempt #%d: AddPeer %s", i, pid.Pretty()) - if finalErr != nil { - logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr) - } - ok, err := cc.redirectToLeader("AddPeer", pid) - if err != nil || ok { - return err - } - // Being here means we are the leader and can commit - cc.shutdownLock.RLock() // do not shutdown while committing - finalErr = cc.raft.AddPeer(ctx, pid.String()) - - cc.shutdownLock.RUnlock() - if finalErr != nil { - time.Sleep(cc.config.CommitRetryDelay) - continue - } - logger.Infof("peer added to Raft: %s", pid.Pretty()) - break - } - return finalErr -} - -// RmPeer removes a peer from this consensus. It will -// forward the operation to the leader if this is not it. -func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error { - ctx, span := trace.StartSpan(ctx, "consensus/RmPeer") - defer span.End() - - var finalErr error - for i := 0; i <= cc.config.CommitRetries; i++ { - logger.Debugf("attempt #%d: RmPeer %s", i, pid.Pretty()) - if finalErr != nil { - logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr) - } - ok, err := cc.redirectToLeader("RmPeer", pid) - if err != nil || ok { - return err - } - // Being here means we are the leader and can commit - cc.shutdownLock.RLock() // do not shutdown while committing - finalErr = cc.raft.RemovePeer(ctx, pid.String()) - cc.shutdownLock.RUnlock() - if finalErr != nil { - time.Sleep(cc.config.CommitRetryDelay) - continue - } - logger.Infof("peer removed from Raft: %s", pid.Pretty()) - break - } - return finalErr -} - -// State retrieves the current consensus State. It may error if no State has -// been agreed upon or the state is not consistent. The returned State is the -// last agreed-upon State known by this node. No writes are allowed, as all -// writes to the shared state should happen through the Consensus component -// methods. -func (cc *Consensus) State(ctx context.Context) (state.ReadOnly, error) { - _, span := trace.StartSpan(ctx, "consensus/State") - defer span.End() - - st, err := cc.consensus.GetLogHead() - if err == libp2praft.ErrNoState { - return state.Empty(), nil - } - - if err != nil { - return nil, err - } - state, ok := st.(state.State) - if !ok { - return nil, errors.New("wrong state type") - } - return state, nil -} - -// Leader returns the peerID of the Leader of the -// cluster. It returns an error when there is no leader. -func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) { - _, span := trace.StartSpan(ctx, "consensus/Leader") - defer span.End() - - // Note the hard-dependency on raft here... - raftactor := cc.actor.(*libp2praft.Actor) - return raftactor.Leader() -} - -// Clean removes the Raft persisted state. -func (cc *Consensus) Clean(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "consensus/Clean") - defer span.End() - - cc.shutdownLock.RLock() - defer cc.shutdownLock.RUnlock() - if !cc.shutdown { - return errors.New("consensus component is not shutdown") - } - - return CleanupRaft(cc.config) -} - -// Rollback replaces the current agreed-upon -// state with the state provided. Only the consensus leader -// can perform this operation. -func (cc *Consensus) Rollback(state state.State) error { - // This is unused. It *might* be used for upgrades. - // There is rather untested magic in libp2p-raft's FSM() - // to make this possible. - return cc.consensus.Rollback(state) -} - -// Peers return the current list of peers in the consensus. -// The list will be sorted alphabetically. -func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) { - ctx, span := trace.StartSpan(ctx, "consensus/Peers") - defer span.End() - - cc.shutdownLock.RLock() // prevent shutdown while here - defer cc.shutdownLock.RUnlock() - - if cc.shutdown { // things hang a lot in this case - return nil, errors.New("consensus is shutdown") - } - peers := []peer.ID{} - raftPeers, err := cc.raft.Peers(ctx) - if err != nil { - return nil, fmt.Errorf("cannot retrieve list of peers: %s", err) - } - - sort.Strings(raftPeers) - - for _, p := range raftPeers { - id, err := peer.Decode(p) - if err != nil { - panic("could not decode peer") - } - peers = append(peers, id) - } - return peers, nil -} - -// OfflineState state returns a cluster state by reading the Raft data and -// writing it to the given datastore which is then wrapped as a state.State. -// Usually an in-memory datastore suffices. The given datastore should be -// thread-safe. -func OfflineState(cfg *Config, store ds.Datastore) (state.State, error) { - r, snapExists, err := LastStateRaw(cfg) - if err != nil { - return nil, err - } - - st, err := dsstate.New(context.Background(), store, cfg.DatastoreNamespace, dsstate.DefaultHandle()) - if err != nil { - return nil, err - } - if !snapExists { - return st, nil - } - - err = st.Unmarshal(r) - if err != nil { - return nil, err - } - return st, nil -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/consensus_test.go b/packages/networking/ipfs-cluster/consensus/raft/consensus_test.go deleted file mode 100644 index d0c12e3..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/consensus_test.go +++ /dev/null @@ -1,349 +0,0 @@ -package raft - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - host "github.com/libp2p/go-libp2p/core/host" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" -) - -func cleanRaft(idn int) { - os.RemoveAll(fmt.Sprintf("raftFolderFromTests-%d", idn)) -} - -func testPin(c api.Cid) api.Pin { - p := api.PinCid(c) - p.ReplicationFactorMin = -1 - p.ReplicationFactorMax = -1 - return p -} - -func makeTestingHost(t *testing.T) host.Host { - h, err := libp2p.New( - libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), - ) - if err != nil { - t.Fatal(err) - } - return h -} - -func testingConsensus(t *testing.T, idn int) *Consensus { - ctx := context.Background() - cleanRaft(idn) - h := makeTestingHost(t) - - cfg := &Config{} - cfg.Default() - cfg.DataFolder = fmt.Sprintf("raftFolderFromTests-%d", idn) - cfg.hostShutdown = true - - cc, err := NewConsensus(h, cfg, inmem.New(), false) - if err != nil { - t.Fatal("cannot create Consensus:", err) - } - cc.SetClient(test.NewMockRPCClientWithHost(t, h)) - <-cc.Ready(ctx) - return cc -} - -func TestShutdownConsensus(t *testing.T) { - ctx := context.Background() - // Bring it up twice to make sure shutdown cleans up properly - // but also to make sure raft comes up ok when re-initialized - cc := testingConsensus(t, 1) - defer cleanRaft(1) - err := cc.Shutdown(ctx) - if err != nil { - t.Fatal("Consensus cannot shutdown:", err) - } - err = cc.Shutdown(ctx) // should be fine to shutdown twice - if err != nil { - t.Fatal("Consensus should be able to shutdown several times") - } - cleanRaft(1) - - cc = testingConsensus(t, 1) - err = cc.Shutdown(ctx) - if err != nil { - t.Fatal("Consensus cannot shutdown:", err) - } - cleanRaft(1) -} - -func TestConsensusPin(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer cleanRaft(1) // Remember defer runs in LIFO order - defer cc.Shutdown(ctx) - - err := cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error("the operation did not make it to the log:", err) - } - - time.Sleep(250 * time.Millisecond) - st, err := cc.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - out := make(chan api.Pin, 10) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("the added pin should be in the state") - } -} - -func TestConsensusUnpin(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer cleanRaft(1) - defer cc.Shutdown(ctx) - - err := cc.LogUnpin(ctx, api.PinCid(test.Cid1)) - if err != nil { - t.Error("the operation did not make it to the log:", err) - } -} - -func TestConsensusUpdate(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer cleanRaft(1) - defer cc.Shutdown(ctx) - - // Pin first - pin := testPin(test.Cid1) - pin.Type = api.ShardType - err := cc.LogPin(ctx, pin) - if err != nil { - t.Fatal("the initial operation did not make it to the log:", err) - } - time.Sleep(250 * time.Millisecond) - - // Update pin - pin.Reference = &test.Cid2 - err = cc.LogPin(ctx, pin) - if err != nil { - t.Error("the update op did not make it to the log:", err) - } - - time.Sleep(250 * time.Millisecond) - st, err := cc.State(ctx) - if err != nil { - t.Fatal("error getting state:", err) - } - - out := make(chan api.Pin, 10) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("the added pin should be in the state") - } - if !pins[0].Reference.Equals(test.Cid2) { - t.Error("pin updated incorrectly") - } -} - -func TestConsensusAddPeer(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - cc2 := testingConsensus(t, 2) - t.Log(cc.host.ID().Pretty()) - t.Log(cc2.host.ID().Pretty()) - defer cleanRaft(1) - defer cleanRaft(2) - defer cc.Shutdown(ctx) - defer cc2.Shutdown(ctx) - - cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL) - err := cc.AddPeer(ctx, cc2.host.ID()) - if err != nil { - t.Error("the operation did not make it to the log:", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - err = cc2.raft.WaitForPeer(ctx, cc.host.ID().Pretty(), false) - if err != nil { - t.Fatal(err) - } - - peers, err := cc2.raft.Peers(ctx) - if err != nil { - t.Fatal(err) - } - - if len(peers) != 2 { - t.Error("peer was not added") - } -} - -func TestConsensusRmPeer(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - cc2 := testingConsensus(t, 2) - defer cleanRaft(1) - defer cleanRaft(2) - defer cc.Shutdown(ctx) - defer cc2.Shutdown(ctx) - - cc.host.Peerstore().AddAddrs(cc2.host.ID(), cc2.host.Addrs(), peerstore.PermanentAddrTTL) - - err := cc.AddPeer(ctx, cc2.host.ID()) - if err != nil { - t.Error("could not add peer:", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), false) - if err != nil { - t.Fatal(err) - } - cc.raft.WaitForLeader(ctx) - - err = cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error("could not pin after adding peer:", err) - } - - time.Sleep(2 * time.Second) - - // Remove unexisting peer - err = cc.RmPeer(ctx, test.PeerID1) - if err != nil { - t.Fatal("the operation did not make it to the log:", err) - } - - // Remove real peer. At least the leader can succeed - err = cc2.RmPeer(ctx, cc.host.ID()) - err2 := cc.RmPeer(ctx, cc2.host.ID()) - if err != nil && err2 != nil { - t.Fatal("could not remove peer:", err, err2) - } - - err = cc.raft.WaitForPeer(ctx, cc2.host.ID().Pretty(), true) - if err != nil { - t.Fatal(err) - } -} - -func TestConsensusLeader(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - pID := cc.host.ID() - defer cleanRaft(1) - defer cc.Shutdown(ctx) - l, err := cc.Leader(ctx) - if err != nil { - t.Fatal("No leader:", err) - } - - if l != pID { - t.Errorf("expected %s but the leader appears as %s", pID, l) - } -} - -func TestRaftLatestSnapshot(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - defer cleanRaft(1) - defer cc.Shutdown(ctx) - - // Make pin 1 - err := cc.LogPin(ctx, testPin(test.Cid1)) - if err != nil { - t.Error("the first pin did not make it to the log:", err) - } - - time.Sleep(250 * time.Millisecond) - err = cc.raft.Snapshot() - if err != nil { - t.Error("the first snapshot was not taken successfully") - } - - // Make pin 2 - err = cc.LogPin(ctx, testPin(test.Cid2)) - if err != nil { - t.Error("the second pin did not make it to the log:", err) - } - - time.Sleep(250 * time.Millisecond) - err = cc.raft.Snapshot() - if err != nil { - t.Error("the second snapshot was not taken successfully") - } - - // Make pin 3 - err = cc.LogPin(ctx, testPin(test.Cid3)) - if err != nil { - t.Error("the third pin did not make it to the log:", err) - } - - time.Sleep(250 * time.Millisecond) - err = cc.raft.Snapshot() - if err != nil { - t.Error("the third snapshot was not taken successfully") - } - - // Call raft.LastState and ensure we get the correct state - snapState, err := dsstate.New(ctx, inmem.New(), "", dsstate.DefaultHandle()) - if err != nil { - t.Fatal(err) - } - r, snapExists, err := LastStateRaw(cc.config) - if !snapExists { - t.Fatal("No snapshot found by LastStateRaw") - } - if err != nil { - t.Fatal("Error while taking snapshot", err) - } - err = snapState.Unmarshal(r) - if err != nil { - t.Fatal("Snapshot bytes returned could not restore to state: ", err) - } - - out := make(chan api.Pin, 100) - err = snapState.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 3 { - t.Fatal("Latest snapshot not read") - } -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/data_helper.go b/packages/networking/ipfs-cluster/consensus/raft/data_helper.go deleted file mode 100644 index e7fa66c..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/data_helper.go +++ /dev/null @@ -1,80 +0,0 @@ -package raft - -import ( - "fmt" - "os" - "path/filepath" -) - -// dataBackupHelper helps making and rotating backups from a folder. -// it will name them .old.0, .old.1... and so on. -// when a new backup is made, the old.0 is renamed to old.1 and so on. -// when the "keep" number is reached, the oldest is always -// discarded. -type dataBackupHelper struct { - baseDir string - folderName string - keep int -} - -func newDataBackupHelper(dataFolder string, keep int) *dataBackupHelper { - return &dataBackupHelper{ - baseDir: filepath.Dir(dataFolder), - folderName: filepath.Base(dataFolder), - keep: keep, - } -} - -func (dbh *dataBackupHelper) makeName(i int) string { - return filepath.Join(dbh.baseDir, fmt.Sprintf("%s.old.%d", dbh.folderName, i)) -} - -func (dbh *dataBackupHelper) listBackups() []string { - backups := []string{} - for i := 0; i < dbh.keep; i++ { - name := dbh.makeName(i) - if _, err := os.Stat(name); os.IsNotExist(err) { - return backups - } - backups = append(backups, name) - } - return backups -} - -func (dbh *dataBackupHelper) makeBackup() error { - folder := filepath.Join(dbh.baseDir, dbh.folderName) - if _, err := os.Stat(folder); os.IsNotExist(err) { - // nothing to backup - logger.Debug("nothing to backup") - return nil - } - - // make sure config folder exists - err := os.MkdirAll(dbh.baseDir, 0700) - if err != nil { - return err - } - - // list all backups in it - backups := dbh.listBackups() - // remove last / oldest. Ex. if max is five, remove name.old.4 - if len(backups) >= dbh.keep { - os.RemoveAll(backups[len(backups)-1]) - } else { // append new backup folder. Ex, if 2 exist: add name.old.2 - backups = append(backups, dbh.makeName(len(backups))) - } - - // increase number for all backups folders. - // If there are 3: 1->2, 0->1. - // Note in all cases the last backup in the list does not exist - // (either removed or not created, just added to this list) - for i := len(backups) - 1; i > 0; i-- { - err := os.Rename(backups[i-1], backups[i]) - if err != nil { - return err - } - } - - // save new as name.old.0 - return os.Rename(filepath.Join(dbh.baseDir, dbh.folderName), dbh.makeName(0)) -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/data_helper_test.go b/packages/networking/ipfs-cluster/consensus/raft/data_helper_test.go deleted file mode 100644 index c06ca9f..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/data_helper_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package raft - -import ( - "fmt" - "os" - "testing" -) - -func TestDataBackupHelper(t *testing.T) { - keep := 5 - - cleanup := func() { - os.RemoveAll("data_helper_testing") - for i := 0; i < 2*keep; i++ { - os.RemoveAll(fmt.Sprintf("data_helper_testing.old.%d", i)) - } - } - cleanup() - defer cleanup() - - os.MkdirAll("data_helper_testing", 0700) - helper := newDataBackupHelper("data_helper_testing", keep) - for i := 0; i < 2*keep; i++ { - err := helper.makeBackup() - if err != nil { - t.Fatal(err) - } - backups := helper.listBackups() - if (i < keep && len(backups) != i+1) || - (i >= keep && len(backups) != keep) { - t.Fatal("incorrect number of backups saved") - } - os.MkdirAll("data_helper_testing", 0700) - } -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/log_op.go b/packages/networking/ipfs-cluster/consensus/raft/log_op.go deleted file mode 100644 index 9f18d44..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/log_op.go +++ /dev/null @@ -1,105 +0,0 @@ -package raft - -import ( - "context" - "errors" - - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/state" - - consensus "github.com/libp2p/go-libp2p-consensus" -) - -// Type of consensus operation -const ( - LogOpPin = iota + 1 - LogOpUnpin -) - -// LogOpType expresses the type of a consensus Operation -type LogOpType int - -// LogOp represents an operation for the OpLogConsensus system. -// It implements the consensus.Op interface and it is used by the -// Consensus component. -type LogOp struct { - SpanCtx trace.SpanContext `codec:"s,omitempty"` - TagCtx []byte `codec:"t,omitempty"` - Cid api.Pin `codec:"c,omitempty"` - Type LogOpType `codec:"p,omitempty"` - consensus *Consensus `codec:"-"` - tracing bool `codec:"-"` -} - -// ApplyTo applies the operation to the State -func (op *LogOp) ApplyTo(cstate consensus.State) (consensus.State, error) { - var err error - ctx := context.Background() - if op.tracing { - tagmap, err := tag.Decode(op.TagCtx) - if err != nil { - logger.Error(err) - } - ctx = tag.NewContext(ctx, tagmap) - var span *trace.Span - ctx, span = trace.StartSpanWithRemoteParent(ctx, "consensus/raft/logop/ApplyTo", op.SpanCtx) - defer span.End() - } - - state, ok := cstate.(state.State) - if !ok { - // Should never be here - panic("received unexpected state type") - } - - pin := op.Cid - - switch op.Type { - case LogOpPin: - err = state.Add(ctx, pin) - if err != nil { - logger.Error(err) - goto ROLLBACK - } - // Async, we let the PinTracker take care of any problems - op.consensus.rpcClient.GoContext( - ctx, - "", - "PinTracker", - "Track", - pin, - &struct{}{}, - nil, - ) - case LogOpUnpin: - err = state.Rm(ctx, pin.Cid) - if err != nil { - logger.Error(err) - goto ROLLBACK - } - // Async, we let the PinTracker take care of any problems - op.consensus.rpcClient.GoContext( - ctx, - "", - "PinTracker", - "Untrack", - pin, - &struct{}{}, - nil, - ) - default: - logger.Error("unknown LogOp type. Ignoring") - } - return state, nil - -ROLLBACK: - // We failed to apply the operation to the state - // and therefore we need to request a rollback to the - // cluster to the previous state. This operation can only be performed - // by the cluster leader. - logger.Error("Rollbacks are not implemented") - return nil, errors.New("a rollback may be necessary. Reason: " + err.Error()) -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/log_op_test.go b/packages/networking/ipfs-cluster/consensus/raft/log_op_test.go deleted file mode 100644 index 27134b9..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/log_op_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package raft - -import ( - "context" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - "github.com/ipfs-cluster/ipfs-cluster/test" -) - -func TestApplyToPin(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - op := &LogOp{ - Cid: api.PinCid(test.Cid1), - Type: LogOpPin, - consensus: cc, - } - defer cleanRaft(1) - defer cc.Shutdown(ctx) - - st, err := dsstate.New(ctx, inmem.New(), "", dsstate.DefaultHandle()) - if err != nil { - t.Fatal(err) - } - op.ApplyTo(st) - - out := make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - - var pins []api.Pin - for p := range out { - pins = append(pins, p) - } - - if len(pins) != 1 || !pins[0].Cid.Equals(test.Cid1) { - t.Error("the state was not modified correctly") - } -} - -func TestApplyToUnpin(t *testing.T) { - ctx := context.Background() - cc := testingConsensus(t, 1) - op := &LogOp{ - Cid: api.PinCid(test.Cid1), - Type: LogOpUnpin, - consensus: cc, - } - defer cleanRaft(1) - defer cc.Shutdown(ctx) - - st, err := dsstate.New(ctx, inmem.New(), "", dsstate.DefaultHandle()) - if err != nil { - t.Fatal(err) - } - st.Add(ctx, testPin(test.Cid1)) - op.ApplyTo(st) - - out := make(chan api.Pin, 100) - err = st.List(ctx, out) - if err != nil { - t.Fatal(err) - } - if len(out) != 0 { - t.Error("the state was not modified correctly") - } -} - -func TestApplyToBadState(t *testing.T) { - defer func() { - if r := recover(); r == nil { - t.Error("should have recovered an error") - } - }() - - op := &LogOp{ - Cid: api.PinCid(test.Cid1), - Type: LogOpUnpin, - } - - var st interface{} - op.ApplyTo(st) -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/logging.go b/packages/networking/ipfs-cluster/consensus/raft/logging.go deleted file mode 100644 index f6b02ee..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/logging.go +++ /dev/null @@ -1,230 +0,0 @@ -package raft - -import ( - "fmt" - "io" - "log" - "strings" - "time" - - hclog "github.com/hashicorp/go-hclog" - logging "github.com/ipfs/go-log/v2" -) - -const ( - debug = iota - info - warn - err -) - -var raftLogger = logging.Logger("raftlib") - -// this implements github.com/hashicorp/go-hclog -type hcLogToLogger struct { - extraArgs []interface{} - name string -} - -func (log *hcLogToLogger) formatArgs(args []interface{}) string { - result := "" - args = append(args, log.extraArgs) - for i := 0; i < len(args); i = i + 2 { - key, ok := args[i].(string) - if !ok { - continue - } - val := args[i+1] - result += fmt.Sprintf(" %s=%s.", key, val) - } - return result -} - -func (log *hcLogToLogger) format(msg string, args []interface{}) string { - argstr := log.formatArgs(args) - if len(argstr) > 0 { - argstr = ". Args: " + argstr - } - name := log.name - if len(name) > 0 { - name += ": " - } - return name + msg + argstr -} - -func (log *hcLogToLogger) Log(level hclog.Level, msg string, args ...interface{}) { - switch level { - case hclog.Trace, hclog.Debug: - log.Debug(msg, args) - case hclog.NoLevel, hclog.Info: - log.Info(msg, args) - case hclog.Warn: - log.Warn(msg, args) - case hclog.Error: - log.Error(msg, args) - default: - log.Warn(msg, args) - } -} - -func (log *hcLogToLogger) Trace(msg string, args ...interface{}) { - raftLogger.Debug(log.format(msg, args)) -} - -func (log *hcLogToLogger) Debug(msg string, args ...interface{}) { - raftLogger.Debug(log.format(msg, args)) -} - -func (log *hcLogToLogger) Info(msg string, args ...interface{}) { - raftLogger.Info(log.format(msg, args)) -} - -func (log *hcLogToLogger) Warn(msg string, args ...interface{}) { - raftLogger.Warn(log.format(msg, args)) -} - -func (log *hcLogToLogger) Error(msg string, args ...interface{}) { - raftLogger.Error(log.format(msg, args)) -} - -func (log *hcLogToLogger) IsTrace() bool { - return true -} - -func (log *hcLogToLogger) IsDebug() bool { - return true -} - -func (log *hcLogToLogger) IsInfo() bool { - return true -} - -func (log *hcLogToLogger) IsWarn() bool { - return true -} - -func (log *hcLogToLogger) IsError() bool { - return true -} - -func (log *hcLogToLogger) Name() string { - return log.name -} - -func (log *hcLogToLogger) With(args ...interface{}) hclog.Logger { - return &hcLogToLogger{extraArgs: args} -} - -func (log *hcLogToLogger) Named(name string) hclog.Logger { - return &hcLogToLogger{name: log.name + ": " + name} -} - -func (log *hcLogToLogger) ResetNamed(name string) hclog.Logger { - return &hcLogToLogger{name: name} -} - -func (log *hcLogToLogger) SetLevel(level hclog.Level) {} - -func (log *hcLogToLogger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger { - return nil -} - -func (log *hcLogToLogger) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer { - return nil -} - -func (log *hcLogToLogger) ImpliedArgs() []interface{} { - return nil -} - -const repeatPoolSize = 10 -const repeatReset = time.Minute - -// This provides a custom logger for Raft which intercepts Raft log messages -// and rewrites us to our own logger (for "raft" facility). -type logForwarder struct { - lastMsgs map[int][]string - lastTip map[int]time.Time -} - -var raftStdLogger = log.New(&logForwarder{}, "", 0) - -// Write forwards to our go-log/v2 logger. -// According to https://golang.org/pkg/log/#Logger.Output -// it is called per line. -func (fw *logForwarder) Write(p []byte) (n int, e error) { - t := strings.TrimSuffix(string(p), "\n") - - switch { - case strings.Contains(t, "[DEBUG]"): - if !fw.repeated(debug, t) { - fw.log(debug, strings.TrimPrefix(t, "[DEBUG] raft: ")) - } - case strings.Contains(t, "[WARN]"): - if !fw.repeated(warn, t) { - fw.log(warn, strings.TrimPrefix(t, "[WARN] raft: ")) - } - case strings.Contains(t, "[ERR]"): - if !fw.repeated(err, t) { - fw.log(err, strings.TrimPrefix(t, "[ERR] raft: ")) - } - case strings.Contains(t, "[INFO]"): - if !fw.repeated(info, t) { - fw.log(info, strings.TrimPrefix(t, "[INFO] raft: ")) - } - default: - fw.log(debug, t) - } - return len(p), nil -} - -func (fw *logForwarder) repeated(t int, msg string) bool { - if fw.lastMsgs == nil { - fw.lastMsgs = make(map[int][]string) - fw.lastTip = make(map[int]time.Time) - } - - // We haven't tipped about repeated log messages - // in a while, do it and forget the list - if time.Now().After(fw.lastTip[t].Add(repeatReset)) { - fw.lastTip[t] = time.Now() - fw.lastMsgs[t] = nil - fw.log(t, "NOTICE: Some RAFT log messages repeat and will only be logged once") - } - - var found string - - // Do we know about this message - for _, lmsg := range fw.lastMsgs[t] { - if lmsg == msg { - found = lmsg - break - } - } - - if found == "" { // new message. Add to slice. - if len(fw.lastMsgs[t]) >= repeatPoolSize { // drop oldest - fw.lastMsgs[t] = fw.lastMsgs[t][1:] - } - fw.lastMsgs[t] = append(fw.lastMsgs[t], msg) - return false // not-repeated - } - - // repeated, don't log - return true -} - -func (fw *logForwarder) log(t int, msg string) { - switch t { - case debug: - raftLogger.Debug(msg) - case info: - raftLogger.Info(msg) - case warn: - raftLogger.Warn(msg) - case err: - raftLogger.Error(msg) - default: - raftLogger.Debug(msg) - } -} diff --git a/packages/networking/ipfs-cluster/consensus/raft/raft.go b/packages/networking/ipfs-cluster/consensus/raft/raft.go deleted file mode 100644 index 9b413cb..0000000 --- a/packages/networking/ipfs-cluster/consensus/raft/raft.go +++ /dev/null @@ -1,696 +0,0 @@ -package raft - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/state" - - p2praft "github.com/libp2p/go-libp2p-raft" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - - hraft "github.com/hashicorp/raft" - raftboltdb "github.com/hashicorp/raft-boltdb" - "go.opencensus.io/trace" -) - -// ErrWaitingForSelf is returned when we are waiting for ourselves to depart -// the peer set, which won't happen -var errWaitingForSelf = errors.New("waiting for ourselves to depart") - -// RaftMaxSnapshots indicates how many snapshots to keep in the consensus data -// folder. -// TODO: Maybe include this in Config. Not sure how useful it is to touch -// this anyways. -var RaftMaxSnapshots = 5 - -// RaftLogCacheSize is the maximum number of logs to cache in-memory. -// This is used to reduce disk I/O for the recently committed entries. -var RaftLogCacheSize = 512 - -// How long we wait for updates during shutdown before snapshotting -var waitForUpdatesShutdownTimeout = 5 * time.Second -var waitForUpdatesInterval = 400 * time.Millisecond - -// How many times to retry snapshotting when shutting down -var maxShutdownSnapshotRetries = 5 - -// raftWrapper wraps the hraft.Raft object and related things like the -// different stores used or the hraft.Configuration. -// Its methods provide functionality for working with Raft. -type raftWrapper struct { - ctx context.Context - cancel context.CancelFunc - raft *hraft.Raft - config *Config - host host.Host - serverConfig hraft.Configuration - transport *hraft.NetworkTransport - snapshotStore hraft.SnapshotStore - logStore hraft.LogStore - stableStore hraft.StableStore - boltdb *raftboltdb.BoltStore - staging bool -} - -// newRaftWrapper creates a Raft instance and initializes -// everything leaving it ready to use. Note, that Bootstrap() should be called -// to make sure the raft instance is usable. -func newRaftWrapper( - host host.Host, - cfg *Config, - fsm hraft.FSM, - staging bool, -) (*raftWrapper, error) { - - raftW := &raftWrapper{} - raftW.config = cfg - raftW.host = host - raftW.staging = staging - // Set correct LocalID - cfg.RaftConfig.LocalID = hraft.ServerID(host.ID().String()) - - df := cfg.GetDataFolder() - err := makeDataFolder(df) - if err != nil { - return nil, err - } - - raftW.makeServerConfig() - - err = raftW.makeTransport() - if err != nil { - return nil, err - } - - err = raftW.makeStores() - if err != nil { - return nil, err - } - - logger.Debug("creating Raft") - raftW.raft, err = hraft.NewRaft( - cfg.RaftConfig, - fsm, - raftW.logStore, - raftW.stableStore, - raftW.snapshotStore, - raftW.transport, - ) - if err != nil { - logger.Error("initializing raft: ", err) - return nil, err - } - - raftW.ctx, raftW.cancel = context.WithCancel(context.Background()) - go raftW.observePeers() - - return raftW, nil -} - -// makeDataFolder creates the folder that is meant to store Raft data. Ensures -// we always set 0700 mode. -func makeDataFolder(folder string) error { - return os.MkdirAll(folder, 0700) -} - -func (rw *raftWrapper) makeTransport() (err error) { - logger.Debug("creating libp2p Raft transport") - rw.transport, err = p2praft.NewLibp2pTransport( - rw.host, - rw.config.NetworkTimeout, - ) - return err -} - -func (rw *raftWrapper) makeStores() error { - logger.Debug("creating BoltDB store") - df := rw.config.GetDataFolder() - store, err := raftboltdb.NewBoltStore(filepath.Join(df, "raft.db")) - if err != nil { - return err - } - - // wraps the store in a LogCache to improve performance. - // See consul/agent/consul/server.go - cacheStore, err := hraft.NewLogCache(RaftLogCacheSize, store) - if err != nil { - return err - } - - logger.Debug("creating raft snapshot store") - snapstore, err := hraft.NewFileSnapshotStoreWithLogger( - df, - RaftMaxSnapshots, - raftStdLogger, - ) - if err != nil { - return err - } - - rw.logStore = cacheStore - rw.stableStore = store - rw.snapshotStore = snapstore - rw.boltdb = store - return nil -} - -// Bootstrap calls BootstrapCluster on the Raft instance with a valid -// Configuration (generated from InitPeerset) when Raft has no state -// and we are not setting up a staging peer. It returns if Raft -// was boostrapped (true) and an error. -func (rw *raftWrapper) Bootstrap() (bool, error) { - logger.Debug("checking for existing raft states") - hasState, err := hraft.HasExistingState( - rw.logStore, - rw.stableStore, - rw.snapshotStore, - ) - if err != nil { - return false, err - } - - if hasState { - logger.Debug("raft cluster is already initialized") - - // Inform the user that we are working with a pre-existing peerset - logger.Info("existing Raft state found! raft.InitPeerset will be ignored") - cf := rw.raft.GetConfiguration() - if err := cf.Error(); err != nil { - logger.Debug(err) - return false, err - } - currentCfg := cf.Configuration() - srvs := "" - for _, s := range currentCfg.Servers { - srvs += fmt.Sprintf(" %s\n", s.ID) - } - - logger.Debugf("Current Raft Peerset:\n%s\n", srvs) - return false, nil - } - - if rw.staging { - logger.Debug("staging servers do not need initialization") - logger.Info("peer is ready to join a cluster") - return false, nil - } - - voters := "" - for _, s := range rw.serverConfig.Servers { - voters += fmt.Sprintf(" %s\n", s.ID) - } - - logger.Infof("initializing raft cluster with the following voters:\n%s\n", voters) - - future := rw.raft.BootstrapCluster(rw.serverConfig) - if err := future.Error(); err != nil { - logger.Error("bootstrapping cluster: ", err) - return true, err - } - return true, nil -} - -// create Raft servers configuration. The result is used -// by Bootstrap() when it proceeds to Bootstrap. -func (rw *raftWrapper) makeServerConfig() { - rw.serverConfig = makeServerConf(append(rw.config.InitPeerset, rw.host.ID())) -} - -// creates a server configuration with all peers as Voters. -func makeServerConf(peers []peer.ID) hraft.Configuration { - sm := make(map[string]struct{}) - - servers := make([]hraft.Server, 0) - - // Servers are peers + self. We avoid duplicate entries below - for _, pid := range peers { - p := pid.String() - _, ok := sm[p] - if !ok { // avoid dups - sm[p] = struct{}{} - servers = append(servers, hraft.Server{ - Suffrage: hraft.Voter, - ID: hraft.ServerID(p), - Address: hraft.ServerAddress(p), - }) - } - } - return hraft.Configuration{Servers: servers} -} - -// WaitForLeader holds until Raft says we have a leader. -// Returns if ctx is canceled. -func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) { - ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForLeader") - defer span.End() - - ticker := time.NewTicker(time.Second / 2) - for { - select { - case <-ticker.C: - if l := rw.raft.Leader(); l != "" { - logger.Debug("waitForleaderTimer") - logger.Infof("Current Raft Leader: %s", l) - ticker.Stop() - return string(l), nil - } - case <-ctx.Done(): - return "", ctx.Err() - } - } -} - -func (rw *raftWrapper) WaitForVoter(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForVoter") - defer span.End() - - logger.Debug("waiting until we are promoted to a voter") - - pid := hraft.ServerID(rw.host.ID().String()) - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - logger.Debugf("%s: get configuration", pid) - configFuture := rw.raft.GetConfiguration() - if err := configFuture.Error(); err != nil { - return err - } - - if isVoter(pid, configFuture.Configuration()) { - return nil - } - logger.Debugf("%s: not voter yet", pid) - - time.Sleep(waitForUpdatesInterval) - } - } -} - -func isVoter(srvID hraft.ServerID, cfg hraft.Configuration) bool { - for _, server := range cfg.Servers { - if server.ID == srvID && server.Suffrage == hraft.Voter { - return true - } - } - return false -} - -// WaitForUpdates holds until Raft has synced to the last index in the log -func (rw *raftWrapper) WaitForUpdates(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForUpdates") - defer span.End() - - logger.Debug("Raft state is catching up to the latest known version. Please wait...") - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - lai := rw.raft.AppliedIndex() - li := rw.raft.LastIndex() - logger.Debugf("current Raft index: %d/%d", - lai, li) - if lai == li { - return nil - } - time.Sleep(waitForUpdatesInterval) - } - } -} - -func (rw *raftWrapper) WaitForPeer(ctx context.Context, pid string, depart bool) error { - ctx, span := trace.StartSpan(ctx, "consensus/raft/WaitForPeer") - defer span.End() - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - peers, err := rw.Peers(ctx) - if err != nil { - return err - } - - if len(peers) == 1 && pid == peers[0] && depart { - return errWaitingForSelf - } - - found := find(peers, pid) - - // departing - if depart && !found { - return nil - } - - // joining - if !depart && found { - return nil - } - - time.Sleep(50 * time.Millisecond) - } - } -} - -// Snapshot tells Raft to take a snapshot. -func (rw *raftWrapper) Snapshot() error { - future := rw.raft.Snapshot() - err := future.Error() - if err != nil && err.Error() != hraft.ErrNothingNewToSnapshot.Error() { - return err - } - return nil -} - -// snapshotOnShutdown attempts to take a snapshot before a shutdown. -// Snapshotting might fail if the raft applied index is not the last index. -// This waits for the updates and tries to take a snapshot when the -// applied index is up to date. -// It will retry if the snapshot still fails, in case more updates have arrived. -// If waiting for updates times-out, it will not try anymore, since something -// is wrong. This is a best-effort solution as there is no way to tell Raft -// to stop processing entries because we want to take a snapshot before -// shutting down. -func (rw *raftWrapper) snapshotOnShutdown() error { - var err error - for i := 0; i < maxShutdownSnapshotRetries; i++ { - ctx, cancel := context.WithTimeout(context.Background(), waitForUpdatesShutdownTimeout) - err = rw.WaitForUpdates(ctx) - cancel() - if err != nil { - logger.Warn("timed out waiting for state updates before shutdown. Snapshotting may fail") - return rw.Snapshot() - } - - err = rw.Snapshot() - if err == nil { - return nil // things worked - } - - // There was an error - err = errors.New("could not snapshot raft: " + err.Error()) - logger.Warnf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries) - } - return err -} - -// Shutdown shutdown Raft and closes the BoltDB. -func (rw *raftWrapper) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "consensus/raft/Shutdown") - defer span.End() - - errMsgs := "" - - rw.cancel() - - err := rw.snapshotOnShutdown() - if err != nil { - errMsgs += err.Error() + ".\n" - } - - future := rw.raft.Shutdown() - err = future.Error() - if err != nil { - errMsgs += "could not shutdown raft: " + err.Error() + ".\n" - } - - err = rw.boltdb.Close() // important! - if err != nil { - errMsgs += "could not close boltdb: " + err.Error() - } - - if errMsgs != "" { - return errors.New(errMsgs) - } - - return nil -} - -// AddPeer adds a peer to Raft -func (rw *raftWrapper) AddPeer(ctx context.Context, peer string) error { - ctx, span := trace.StartSpan(ctx, "consensus/raft/AddPeer") - defer span.End() - - // Check that we don't have it to not waste - // log entries if so. - peers, err := rw.Peers(ctx) - if err != nil { - return err - } - if find(peers, peer) { - logger.Infof("%s is already a raft peer", peer) - return nil - } - - future := rw.raft.AddVoter( - hraft.ServerID(peer), - hraft.ServerAddress(peer), - 0, - 0, - ) // TODO: Extra cfg value? - err = future.Error() - if err != nil { - logger.Error("raft cannot add peer: ", err) - } - return err -} - -// RemovePeer removes a peer from Raft -func (rw *raftWrapper) RemovePeer(ctx context.Context, peer string) error { - ctx, span := trace.StartSpan(ctx, "consensus/RemovePeer") - defer span.End() - - // Check that we have it to not waste - // log entries if we don't. - peers, err := rw.Peers(ctx) - if err != nil { - return err - } - if !find(peers, peer) { - logger.Infof("%s is not among raft peers", peer) - return nil - } - - if len(peers) == 1 && peers[0] == peer { - return errors.New("cannot remove ourselves from a 1-peer cluster") - } - - rmFuture := rw.raft.RemoveServer( - hraft.ServerID(peer), - 0, - 0, - ) // TODO: Extra cfg value? - err = rmFuture.Error() - if err != nil { - logger.Error("raft cannot remove peer: ", err) - return err - } - - return nil -} - -// Leader returns Raft's leader. It may be an empty string if -// there is no leader or it is unknown. -func (rw *raftWrapper) Leader(ctx context.Context) string { - _, span := trace.StartSpan(ctx, "consensus/raft/Leader") - defer span.End() - - return string(rw.raft.Leader()) -} - -func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) { - _, span := trace.StartSpan(ctx, "consensus/raft/Peers") - defer span.End() - - ids := make([]string, 0) - - configFuture := rw.raft.GetConfiguration() - if err := configFuture.Error(); err != nil { - return nil, err - } - - for _, server := range configFuture.Configuration().Servers { - ids = append(ids, string(server.ID)) - } - - return ids, nil -} - -// latestSnapshot looks for the most recent raft snapshot stored at the -// provided basedir. It returns the snapshot's metadata, and a reader -// to the snapshot's bytes -func latestSnapshot(raftDataFolder string) (*hraft.SnapshotMeta, io.ReadCloser, error) { - store, err := hraft.NewFileSnapshotStore(raftDataFolder, RaftMaxSnapshots, nil) - if err != nil { - return nil, nil, err - } - snapMetas, err := store.List() - if err != nil { - return nil, nil, err - } - if len(snapMetas) == 0 { // no error if snapshot isn't found - return nil, nil, nil - } - meta, r, err := store.Open(snapMetas[0].ID) - if err != nil { - return nil, nil, err - } - return meta, r, nil -} - -// LastStateRaw returns the bytes of the last snapshot stored, its metadata, -// and a flag indicating whether any snapshot was found. -func LastStateRaw(cfg *Config) (io.Reader, bool, error) { - // Read most recent snapshot - dataFolder := cfg.GetDataFolder() - if _, err := os.Stat(dataFolder); os.IsNotExist(err) { - // nothing to read - return nil, false, nil - } - - meta, r, err := latestSnapshot(dataFolder) - if err != nil { - return nil, false, err - } - if meta == nil { // no snapshots could be read - return nil, false, nil - } - return r, true, nil -} - -// SnapshotSave saves the provided state to a snapshot in the -// raft data path. Old raft data is backed up and replaced -// by the new snapshot. pids contains the config-specified -// peer ids to include in the snapshot metadata if no snapshot exists -// from which to copy the raft metadata -func SnapshotSave(cfg *Config, newState state.State, pids []peer.ID) error { - dataFolder := cfg.GetDataFolder() - err := makeDataFolder(dataFolder) - if err != nil { - return err - } - meta, _, err := latestSnapshot(dataFolder) - if err != nil { - return err - } - - // make a new raft snapshot - var raftSnapVersion hraft.SnapshotVersion = 1 // As of hraft v1.0.0 this is always 1 - configIndex := uint64(1) - var raftIndex uint64 - var raftTerm uint64 - var srvCfg hraft.Configuration - if meta != nil { - raftIndex = meta.Index - raftTerm = meta.Term - srvCfg = meta.Configuration - CleanupRaft(cfg) - } else { - // Begin the log after the index of a fresh start so that - // the snapshot's state propagate's during bootstrap - raftIndex = uint64(2) - raftTerm = uint64(1) - srvCfg = makeServerConf(pids) - } - - snapshotStore, err := hraft.NewFileSnapshotStoreWithLogger(dataFolder, RaftMaxSnapshots, nil) - if err != nil { - return err - } - _, dummyTransport := hraft.NewInmemTransport("") - - sink, err := snapshotStore.Create(raftSnapVersion, raftIndex, raftTerm, srvCfg, configIndex, dummyTransport) - if err != nil { - return err - } - - err = p2praft.EncodeSnapshot(newState, sink) - if err != nil { - sink.Cancel() - return err - } - err = sink.Close() - if err != nil { - return err - } - return nil -} - -// CleanupRaft moves the current data folder to a backup location -func CleanupRaft(cfg *Config) error { - dataFolder := cfg.GetDataFolder() - keep := cfg.BackupsRotate - - meta, _, err := latestSnapshot(dataFolder) - if meta == nil && err == nil { - // no snapshots at all. Avoid creating backups - // from empty state folders. - logger.Infof("cleaning empty Raft data folder (%s)", dataFolder) - os.RemoveAll(dataFolder) - return nil - } - - logger.Infof("cleaning and backing up Raft data folder (%s)", dataFolder) - dbh := newDataBackupHelper(dataFolder, keep) - err = dbh.makeBackup() - if err != nil { - logger.Warn(err) - logger.Warn("the state could not be cleaned properly") - logger.Warn("manual intervention may be needed before starting cluster again") - } - return nil -} - -// only call when Raft is shutdown -func (rw *raftWrapper) Clean() error { - return CleanupRaft(rw.config) -} - -func find(s []string, elem string) bool { - for _, selem := range s { - if selem == elem { - return true - } - } - return false -} - -func (rw *raftWrapper) observePeers() { - obsCh := make(chan hraft.Observation, 1) - defer close(obsCh) - - observer := hraft.NewObserver(obsCh, true, func(o *hraft.Observation) bool { - po, ok := o.Data.(hraft.PeerObservation) - return ok && po.Removed - }) - - rw.raft.RegisterObserver(observer) - defer rw.raft.DeregisterObserver(observer) - - for { - select { - case obs := <-obsCh: - pObs := obs.Data.(hraft.PeerObservation) - logger.Info("raft peer departed. Removing from peerstore: ", pObs.Peer.ID) - pID, err := peer.Decode(string(pObs.Peer.ID)) - if err != nil { - logger.Error(err) - continue - } - rw.host.Peerstore().ClearAddrs(pID) - case <-rw.ctx.Done(): - logger.Debug("stopped observing raft peers") - return - } - } -} diff --git a/packages/networking/ipfs-cluster/datastore/badger/badger.go b/packages/networking/ipfs-cluster/datastore/badger/badger.go deleted file mode 100644 index 2a2fabd..0000000 --- a/packages/networking/ipfs-cluster/datastore/badger/badger.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package badger provides a configurable BadgerDB go-datastore for use with -// IPFS Cluster. -package badger - -import ( - "os" - - ds "github.com/ipfs/go-datastore" - badgerds "github.com/ipfs/go-ds-badger" - "github.com/pkg/errors" -) - -// New returns a BadgerDB datastore configured with the given -// configuration. -func New(cfg *Config) (ds.Datastore, error) { - folder := cfg.GetFolder() - err := os.MkdirAll(folder, 0700) - if err != nil { - return nil, errors.Wrap(err, "creating badger folder") - } - opts := badgerds.Options{ - GcDiscardRatio: cfg.GCDiscardRatio, - GcInterval: cfg.GCInterval, - GcSleep: cfg.GCSleep, - Options: cfg.BadgerOptions, - } - return badgerds.NewDatastore(folder, &opts) -} - -// Cleanup deletes the badger datastore. -func Cleanup(cfg *Config) error { - folder := cfg.GetFolder() - if _, err := os.Stat(folder); os.IsNotExist(err) { - return nil - } - return os.RemoveAll(cfg.GetFolder()) - -} diff --git a/packages/networking/ipfs-cluster/datastore/badger/config.go b/packages/networking/ipfs-cluster/datastore/badger/config.go deleted file mode 100644 index 1b57531..0000000 --- a/packages/networking/ipfs-cluster/datastore/badger/config.go +++ /dev/null @@ -1,286 +0,0 @@ -package badger - -import ( - "encoding/json" - "errors" - "path/filepath" - "time" - - "github.com/dgraph-io/badger" - "github.com/dgraph-io/badger/options" - "github.com/imdario/mergo" - "github.com/kelseyhightower/envconfig" - - "github.com/ipfs-cluster/ipfs-cluster/config" -) - -const configKey = "badger" -const envConfigKey = "cluster_badger" - -// Default values for badger Config -const ( - DefaultSubFolder = "badger" -) - -var ( - // DefaultBadgerOptions has to be a var because badger.DefaultOptions - // is. Values are customized during Init(). - DefaultBadgerOptions badger.Options - - // DefaultGCDiscardRatio for GC operations. See Badger docs. - DefaultGCDiscardRatio float64 = 0.2 - // DefaultGCInterval specifies interval between GC cycles. - DefaultGCInterval time.Duration = 15 * time.Minute - // DefaultGCSleep specifies sleep time between GC rounds. - DefaultGCSleep time.Duration = 10 * time.Second -) - -func init() { - // Following go-ds-badger guidance - DefaultBadgerOptions = badger.DefaultOptions("") - DefaultBadgerOptions.CompactL0OnClose = false - DefaultBadgerOptions.Truncate = true - DefaultBadgerOptions.ValueLogLoadingMode = options.FileIO - // Explicitly set this to mmap. This doesn't use much memory anyways. - DefaultBadgerOptions.TableLoadingMode = options.MemoryMap - // Reduce this from 64MiB to 16MiB. That means badger will hold on to - // 20MiB by default instead of 80MiB. - DefaultBadgerOptions.MaxTableSize = 16 << 20 -} - -// Config is used to initialize a BadgerDB datastore. It implements the -// ComponentConfig interface. -type Config struct { - config.Saver - - // The folder for this datastore. Non-absolute paths are relative to - // the base configuration folder. - Folder string - - // For GC operation. See Badger documentation. - GCDiscardRatio float64 - - // Interval between GC cycles. Each GC cycle runs one or more - // rounds separated by GCSleep. - GCInterval time.Duration - - // Time between rounds in a GC cycle - GCSleep time.Duration - - BadgerOptions badger.Options -} - -// badgerOptions is a copy of options.BadgerOptions but -// without the Logger as it cannot be marshaled to/from -// JSON. -type badgerOptions struct { - Dir string `json:"dir"` - ValueDir string `json:"value_dir"` - SyncWrites bool `json:"sync_writes"` - TableLoadingMode *options.FileLoadingMode `json:"table_loading_mode"` - ValueLogLoadingMode *options.FileLoadingMode `json:"value_log_loading_mode"` - NumVersionsToKeep int `json:"num_versions_to_keep"` - MaxTableSize int64 `json:"max_table_size"` - LevelSizeMultiplier int `json:"level_size_multiplier"` - MaxLevels int `json:"max_levels"` - ValueThreshold int `json:"value_threshold"` - NumMemtables int `json:"num_memtables"` - NumLevelZeroTables int `json:"num_level_zero_tables"` - NumLevelZeroTablesStall int `json:"num_level_zero_tables_stall"` - LevelOneSize int64 `json:"level_one_size"` - ValueLogFileSize int64 `json:"value_log_file_size"` - ValueLogMaxEntries uint32 `json:"value_log_max_entries"` - NumCompactors int `json:"num_compactors"` - CompactL0OnClose bool `json:"compact_l_0_on_close"` - ReadOnly bool `json:"read_only"` - Truncate bool `json:"truncate"` -} - -func (bo *badgerOptions) Unmarshal() *badger.Options { - badgerOpts := &badger.Options{} - badgerOpts.Dir = bo.Dir - badgerOpts.ValueDir = bo.ValueDir - badgerOpts.SyncWrites = bo.SyncWrites - if tlm := bo.TableLoadingMode; tlm != nil { - badgerOpts.TableLoadingMode = *tlm - } - if vlm := bo.ValueLogLoadingMode; vlm != nil { - badgerOpts.ValueLogLoadingMode = *vlm - } - badgerOpts.NumVersionsToKeep = bo.NumVersionsToKeep - badgerOpts.MaxTableSize = bo.MaxTableSize - badgerOpts.LevelSizeMultiplier = bo.LevelSizeMultiplier - badgerOpts.MaxLevels = bo.MaxLevels - badgerOpts.ValueThreshold = bo.ValueThreshold - badgerOpts.NumMemtables = bo.NumMemtables - badgerOpts.NumLevelZeroTables = bo.NumLevelZeroTables - badgerOpts.NumLevelZeroTablesStall = bo.NumLevelZeroTablesStall - badgerOpts.LevelOneSize = bo.LevelOneSize - badgerOpts.ValueLogFileSize = bo.ValueLogFileSize - badgerOpts.ValueLogMaxEntries = bo.ValueLogMaxEntries - badgerOpts.NumCompactors = bo.NumCompactors - badgerOpts.CompactL0OnClose = bo.CompactL0OnClose - badgerOpts.ReadOnly = bo.ReadOnly - badgerOpts.Truncate = bo.Truncate - - return badgerOpts -} - -func (bo *badgerOptions) Marshal(badgerOpts *badger.Options) { - bo.Dir = badgerOpts.Dir - bo.ValueDir = badgerOpts.ValueDir - bo.SyncWrites = badgerOpts.SyncWrites - bo.TableLoadingMode = &badgerOpts.TableLoadingMode - bo.ValueLogLoadingMode = &badgerOpts.ValueLogLoadingMode - bo.NumVersionsToKeep = badgerOpts.NumVersionsToKeep - bo.MaxTableSize = badgerOpts.MaxTableSize - bo.LevelSizeMultiplier = badgerOpts.LevelSizeMultiplier - bo.MaxLevels = badgerOpts.MaxLevels - bo.ValueThreshold = badgerOpts.ValueThreshold - bo.NumMemtables = badgerOpts.NumMemtables - bo.NumLevelZeroTables = badgerOpts.NumLevelZeroTables - bo.NumLevelZeroTablesStall = badgerOpts.NumLevelZeroTablesStall - bo.LevelOneSize = badgerOpts.LevelOneSize - bo.ValueLogFileSize = badgerOpts.ValueLogFileSize - bo.ValueLogMaxEntries = badgerOpts.ValueLogMaxEntries - bo.NumCompactors = badgerOpts.NumCompactors - bo.CompactL0OnClose = badgerOpts.CompactL0OnClose - bo.ReadOnly = badgerOpts.ReadOnly - bo.Truncate = badgerOpts.Truncate -} - -type jsonConfig struct { - Folder string `json:"folder,omitempty"` - GCDiscardRatio float64 `json:"gc_discard_ratio"` - GCInterval string `json:"gc_interval"` - GCSleep string `json:"gc_sleep"` - BadgerOptions badgerOptions `json:"badger_options,omitempty"` -} - -// ConfigKey returns a human-friendly identifier for this type of Datastore. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.Folder = DefaultSubFolder - cfg.GCDiscardRatio = DefaultGCDiscardRatio - cfg.GCInterval = DefaultGCInterval - cfg.GCSleep = DefaultGCSleep - cfg.BadgerOptions = DefaultBadgerOptions - return nil -} - -// ApplyEnvVars fills in any Config fields found as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.Folder == "" { - return errors.New("folder is unset") - } - - if cfg.GCDiscardRatio <= 0 || cfg.GCDiscardRatio >= 1 { - return errors.New("gc_discard_ratio must be more than 0 and less than 1") - } - - return nil -} - -// LoadJSON reads the fields of this Config from a JSON byteslice as -// generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - return err - } - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - config.SetIfNotDefault(jcfg.Folder, &cfg.Folder) - - // 0 is an invalid option anyways. In that case, set default (0.2) - config.SetIfNotDefault(jcfg.GCDiscardRatio, &cfg.GCDiscardRatio) - - // If these durations are set, GC is enabled by default with default - // values. - err := config.ParseDurations("badger", - &config.DurationOpt{Duration: jcfg.GCInterval, Dst: &cfg.GCInterval, Name: "gc_interval"}, - &config.DurationOpt{Duration: jcfg.GCSleep, Dst: &cfg.GCSleep, Name: "gc_sleep"}, - ) - if err != nil { - return err - } - - badgerOpts := jcfg.BadgerOptions.Unmarshal() - - if err := mergo.Merge(&cfg.BadgerOptions, badgerOpts, mergo.WithOverride); err != nil { - return err - } - - if jcfg.BadgerOptions.TableLoadingMode != nil { - cfg.BadgerOptions.TableLoadingMode = *jcfg.BadgerOptions.TableLoadingMode - } - - if jcfg.BadgerOptions.ValueLogLoadingMode != nil { - cfg.BadgerOptions.ValueLogLoadingMode = *jcfg.BadgerOptions.ValueLogLoadingMode - } - - return cfg.Validate() -} - -// ToJSON generates a JSON-formatted human-friendly representation of this -// Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg := cfg.toJSONConfig() - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - jCfg := &jsonConfig{} - - if cfg.Folder != DefaultSubFolder { - jCfg.Folder = cfg.Folder - } - - jCfg.GCDiscardRatio = cfg.GCDiscardRatio - jCfg.GCInterval = cfg.GCInterval.String() - jCfg.GCSleep = cfg.GCSleep.String() - - bo := &badgerOptions{} - bo.Marshal(&cfg.BadgerOptions) - jCfg.BadgerOptions = *bo - - return jCfg -} - -// GetFolder returns the BadgerDB folder. -func (cfg *Config) GetFolder() string { - if filepath.IsAbs(cfg.Folder) { - return cfg.Folder - } - - return filepath.Join(cfg.BaseDir, cfg.Folder) -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/datastore/badger/config_test.go b/packages/networking/ipfs-cluster/datastore/badger/config_test.go deleted file mode 100644 index c2a9659..0000000 --- a/packages/networking/ipfs-cluster/datastore/badger/config_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package badger - -import ( - "testing" - "time" - - "github.com/dgraph-io/badger" - "github.com/dgraph-io/badger/options" -) - -var cfgJSON = []byte(` -{ - "folder": "test", - "gc_discard_ratio": 0.1, - "gc_sleep": "2m", - "badger_options": { - "max_levels": 4, - "value_log_loading_mode": 0 - } -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - - if cfg.GCDiscardRatio != 0.1 { - t.Fatal("GCDiscardRatio should be 0.1") - } - - if cfg.GCInterval != DefaultGCInterval { - t.Fatal("GCInterval should default as it is unset") - } - - if cfg.GCSleep != 2*time.Minute { - t.Fatal("GCSleep should be 2m") - } - - if cfg.BadgerOptions.ValueLogLoadingMode != options.FileIO { - t.Fatalf("got: %d, want: %d", cfg.BadgerOptions.ValueLogLoadingMode, options.FileIO) - } - - if cfg.BadgerOptions.ValueLogFileSize != badger.DefaultOptions("").ValueLogFileSize { - t.Fatalf( - "got: %d, want: %d", - cfg.BadgerOptions.ValueLogFileSize, - badger.DefaultOptions("").ValueLogFileSize, - ) - } - - if cfg.BadgerOptions.TableLoadingMode != badger.DefaultOptions("").TableLoadingMode { - t.Fatalf("TableLoadingMode is not nil: got: %v, want: %v", cfg.BadgerOptions.TableLoadingMode, badger.DefaultOptions("").TableLoadingMode) - } - - if cfg.BadgerOptions.MaxLevels != 4 { - t.Fatalf("MaxLevels should be 4, got: %d", cfg.BadgerOptions.MaxLevels) - } - - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.GCDiscardRatio = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} diff --git a/packages/networking/ipfs-cluster/datastore/inmem/inmem.go b/packages/networking/ipfs-cluster/datastore/inmem/inmem.go deleted file mode 100644 index a6ef5e5..0000000 --- a/packages/networking/ipfs-cluster/datastore/inmem/inmem.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package inmem provides a in-memory thread-safe datastore for use with -// Cluster. -package inmem - -import ( - ds "github.com/ipfs/go-datastore" - sync "github.com/ipfs/go-datastore/sync" -) - -// New returns a new thread-safe in-memory go-datastore. -func New() ds.Datastore { - mapDs := ds.NewMapDatastore() - return sync.MutexWrap(mapDs) -} diff --git a/packages/networking/ipfs-cluster/datastore/leveldb/config.go b/packages/networking/ipfs-cluster/datastore/leveldb/config.go deleted file mode 100644 index 6f0cdf3..0000000 --- a/packages/networking/ipfs-cluster/datastore/leveldb/config.go +++ /dev/null @@ -1,243 +0,0 @@ -package leveldb - -import ( - "encoding/json" - "errors" - "path/filepath" - - "github.com/imdario/mergo" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" - goleveldb "github.com/syndtr/goleveldb/leveldb/opt" -) - -const configKey = "leveldb" -const envConfigKey = "cluster_leveldb" - -// Default values for LevelDB Config -const ( - DefaultSubFolder = "leveldb" -) - -var ( - // DefaultLevelDBOptions carries default options. Values are customized during Init(). - DefaultLevelDBOptions goleveldb.Options -) - -func init() { - // go-ipfs uses defaults and only allows to configure compression, but - // otherwise stores a small amount of values in LevelDB. - // We leave defaults. - // Example: - DefaultLevelDBOptions.NoSync = false -} - -// Config is used to initialize a LevelDB datastore. It implements the -// ComponentConfig interface. -type Config struct { - config.Saver - - // The folder for this datastore. Non-absolute paths are relative to - // the base configuration folder. - Folder string - - LevelDBOptions goleveldb.Options -} - -// levelDBOptions allows json serialization in our configuration of the -// goleveldb Options. -type levelDBOptions struct { - BlockCacheCapacity int `json:"block_cache_capacity"` - BlockCacheEvictRemoved bool `json:"block_cache_evict_removed"` - BlockRestartInterval int `json:"block_restart_interval"` - BlockSize int `json:"block_size"` - CompactionExpandLimitFactor int `json:"compaction_expand_limit_factor"` - CompactionGPOverlapsFactor int `json:"compaction_gp_overlaps_factor"` - CompactionL0Trigger int `json:"compaction_l0_trigger"` - CompactionSourceLimitFactor int `json:"compaction_source_limit_factor"` - CompactionTableSize int `json:"compaction_table_size"` - CompactionTableSizeMultiplier float64 `json:"compaction_table_size_multiplier"` - CompactionTableSizeMultiplierPerLevel []float64 `json:"compaction_table_size_multiplier_per_level"` - CompactionTotalSize int `json:"compaction_total_size"` - CompactionTotalSizeMultiplier float64 `json:"compaction_total_size_multiplier"` - CompactionTotalSizeMultiplierPerLevel []float64 `json:"compaction_total_size_multiplier_per_level"` - Compression uint `json:"compression"` - DisableBufferPool bool `json:"disable_buffer_pool"` - DisableBlockCache bool `json:"disable_block_cache"` - DisableCompactionBackoff bool `json:"disable_compaction_backoff"` - DisableLargeBatchTransaction bool `json:"disable_large_batch_transaction"` - IteratorSamplingRate int `json:"iterator_sampling_rate"` - NoSync bool `json:"no_sync"` - NoWriteMerge bool `json:"no_write_merge"` - OpenFilesCacheCapacity int `json:"open_files_cache_capacity"` - ReadOnly bool `json:"read_only"` - Strict uint `json:"strict"` - WriteBuffer int `json:"write_buffer"` - WriteL0PauseTrigger int `json:"write_l0_pause_trigger"` - WriteL0SlowdownTrigger int `json:"write_l0_slowdown_trigger"` -} - -func (ldbo *levelDBOptions) Unmarshal() *goleveldb.Options { - goldbo := &goleveldb.Options{} - goldbo.BlockCacheCapacity = ldbo.BlockCacheCapacity - goldbo.BlockCacheEvictRemoved = ldbo.BlockCacheEvictRemoved - goldbo.BlockRestartInterval = ldbo.BlockRestartInterval - goldbo.BlockSize = ldbo.BlockSize - goldbo.CompactionExpandLimitFactor = ldbo.CompactionExpandLimitFactor - goldbo.CompactionGPOverlapsFactor = ldbo.CompactionGPOverlapsFactor - goldbo.CompactionL0Trigger = ldbo.CompactionL0Trigger - goldbo.CompactionSourceLimitFactor = ldbo.CompactionSourceLimitFactor - goldbo.CompactionTableSize = ldbo.CompactionTableSize - goldbo.CompactionTableSizeMultiplier = ldbo.CompactionTableSizeMultiplier - goldbo.CompactionTableSizeMultiplierPerLevel = ldbo.CompactionTableSizeMultiplierPerLevel - goldbo.CompactionTotalSize = ldbo.CompactionTotalSize - goldbo.CompactionTotalSizeMultiplier = ldbo.CompactionTotalSizeMultiplier - goldbo.CompactionTotalSizeMultiplierPerLevel = ldbo.CompactionTotalSizeMultiplierPerLevel - goldbo.Compression = goleveldb.Compression(ldbo.Compression) - goldbo.DisableBufferPool = ldbo.DisableBufferPool - goldbo.DisableBlockCache = ldbo.DisableBlockCache - goldbo.DisableCompactionBackoff = ldbo.DisableCompactionBackoff - goldbo.DisableLargeBatchTransaction = ldbo.DisableLargeBatchTransaction - goldbo.IteratorSamplingRate = ldbo.IteratorSamplingRate - goldbo.NoSync = ldbo.NoSync - goldbo.NoWriteMerge = ldbo.NoWriteMerge - goldbo.OpenFilesCacheCapacity = ldbo.OpenFilesCacheCapacity - goldbo.ReadOnly = ldbo.ReadOnly - goldbo.Strict = goleveldb.Strict(ldbo.Strict) - goldbo.WriteBuffer = ldbo.WriteBuffer - goldbo.WriteL0PauseTrigger = ldbo.WriteL0PauseTrigger - goldbo.WriteL0SlowdownTrigger = ldbo.WriteL0SlowdownTrigger - return goldbo -} - -func (ldbo *levelDBOptions) Marshal(goldbo *goleveldb.Options) { - ldbo.BlockCacheCapacity = goldbo.BlockCacheCapacity - ldbo.BlockCacheEvictRemoved = goldbo.BlockCacheEvictRemoved - ldbo.BlockRestartInterval = goldbo.BlockRestartInterval - ldbo.BlockSize = goldbo.BlockSize - ldbo.CompactionExpandLimitFactor = goldbo.CompactionExpandLimitFactor - ldbo.CompactionGPOverlapsFactor = goldbo.CompactionGPOverlapsFactor - ldbo.CompactionL0Trigger = goldbo.CompactionL0Trigger - ldbo.CompactionSourceLimitFactor = goldbo.CompactionSourceLimitFactor - ldbo.CompactionTableSize = goldbo.CompactionTableSize - ldbo.CompactionTableSizeMultiplier = goldbo.CompactionTableSizeMultiplier - ldbo.CompactionTableSizeMultiplierPerLevel = goldbo.CompactionTableSizeMultiplierPerLevel - ldbo.CompactionTotalSize = goldbo.CompactionTotalSize - ldbo.CompactionTotalSizeMultiplier = goldbo.CompactionTotalSizeMultiplier - ldbo.CompactionTotalSizeMultiplierPerLevel = goldbo.CompactionTotalSizeMultiplierPerLevel - ldbo.Compression = uint(goldbo.Compression) - ldbo.DisableBufferPool = goldbo.DisableBufferPool - ldbo.DisableBlockCache = goldbo.DisableBlockCache - ldbo.DisableCompactionBackoff = goldbo.DisableCompactionBackoff - ldbo.DisableLargeBatchTransaction = goldbo.DisableLargeBatchTransaction - ldbo.IteratorSamplingRate = goldbo.IteratorSamplingRate - ldbo.NoSync = goldbo.NoSync - ldbo.NoWriteMerge = goldbo.NoWriteMerge - ldbo.OpenFilesCacheCapacity = goldbo.OpenFilesCacheCapacity - ldbo.ReadOnly = goldbo.ReadOnly - ldbo.Strict = uint(goldbo.Strict) - ldbo.WriteBuffer = goldbo.WriteBuffer - ldbo.WriteL0PauseTrigger = goldbo.WriteL0PauseTrigger - ldbo.WriteL0SlowdownTrigger = goldbo.WriteL0SlowdownTrigger -} - -type jsonConfig struct { - Folder string `json:"folder,omitempty"` - LevelDBOptions levelDBOptions `json:"leveldb_options,omitempty"` -} - -// ConfigKey returns a human-friendly identifier for this type of Datastore. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.Folder = DefaultSubFolder - cfg.LevelDBOptions = DefaultLevelDBOptions - return nil -} - -// ApplyEnvVars fills in any Config fields found as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.Folder == "" { - return errors.New("folder is unset") - } - - return nil -} - -// LoadJSON reads the fields of this Config from a JSON byteslice as -// generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - return err - } - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - config.SetIfNotDefault(jcfg.Folder, &cfg.Folder) - - ldbOpts := jcfg.LevelDBOptions.Unmarshal() - - if err := mergo.Merge(&cfg.LevelDBOptions, ldbOpts, mergo.WithOverride); err != nil { - return err - } - - return cfg.Validate() -} - -// ToJSON generates a JSON-formatted human-friendly representation of this -// Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg := cfg.toJSONConfig() - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - jCfg := &jsonConfig{} - - if cfg.Folder != DefaultSubFolder { - jCfg.Folder = cfg.Folder - } - - bo := &levelDBOptions{} - bo.Marshal(&cfg.LevelDBOptions) - jCfg.LevelDBOptions = *bo - - return jCfg -} - -// GetFolder returns the LevelDB folder. -func (cfg *Config) GetFolder() string { - if filepath.IsAbs(cfg.Folder) { - return cfg.Folder - } - - return filepath.Join(cfg.BaseDir, cfg.Folder) -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/datastore/leveldb/config_test.go b/packages/networking/ipfs-cluster/datastore/leveldb/config_test.go deleted file mode 100644 index 2831f44..0000000 --- a/packages/networking/ipfs-cluster/datastore/leveldb/config_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package leveldb - -import ( - "testing" -) - -var cfgJSON = []byte(` -{ - "folder": "test", - "leveldb_options": { - "no_sync": true, - "compaction_total_size_multiplier": 1.5 - } -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - - if !cfg.LevelDBOptions.NoSync { - t.Fatalf("NoSync should be true") - } - - if cfg.LevelDBOptions.CompactionTotalSizeMultiplier != 1.5 { - t.Fatal("TotalSizeMultiplier should be 1.5") - } - - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} diff --git a/packages/networking/ipfs-cluster/datastore/leveldb/leveldb.go b/packages/networking/ipfs-cluster/datastore/leveldb/leveldb.go deleted file mode 100644 index acba1a7..0000000 --- a/packages/networking/ipfs-cluster/datastore/leveldb/leveldb.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package leveldb provides a configurable LevelDB go-datastore for use with -// IPFS Cluster. -package leveldb - -import ( - "os" - - ds "github.com/ipfs/go-datastore" - leveldbds "github.com/ipfs/go-ds-leveldb" - "github.com/pkg/errors" -) - -// New returns a LevelDB datastore configured with the given -// configuration. -func New(cfg *Config) (ds.Datastore, error) { - folder := cfg.GetFolder() - err := os.MkdirAll(folder, 0700) - if err != nil { - return nil, errors.Wrap(err, "creating leveldb folder") - } - return leveldbds.NewDatastore(folder, (*leveldbds.Options)(&cfg.LevelDBOptions)) -} - -// Cleanup deletes the leveldb datastore. -func Cleanup(cfg *Config) error { - folder := cfg.GetFolder() - if _, err := os.Stat(folder); os.IsNotExist(err) { - return nil - } - return os.RemoveAll(cfg.GetFolder()) - -} diff --git a/packages/networking/ipfs-cluster/docker-compose.yml b/packages/networking/ipfs-cluster/docker-compose.yml deleted file mode 100644 index ad25ec9..0000000 --- a/packages/networking/ipfs-cluster/docker-compose.yml +++ /dev/null @@ -1,119 +0,0 @@ -version: '3.4' - -# This is an example docker-compose file to quickly test an IPFS Cluster -# with multiple peers on a contained environment. - -# It runs 3 cluster peers (cluster0, cluster1...) attached to go-ipfs daemons -# (ipfs0, ipfs1...) using the CRDT consensus component. Cluster peers -# autodiscover themselves using mDNS on the docker internal network. -# -# To interact with the cluster use "ipfs-cluster-ctl" (the cluster0 API port is -# exposed to the locahost. You can also "docker exec -ti cluster0 sh" and run -# it from the container. "ipfs-cluster-ctl peers ls" should show all 3 peers a few -# seconds after start. -# -# For persistence, a "compose" folder is created and used to store configurations -# and states. This can be used to edit configurations in subsequent runs. It looks -# as follows: -# -# compose/ -# |-- cluster0 -# |-- cluster1 -# |-- ... -# |-- ipfs0 -# |-- ipfs1 -# |-- ... -# -# During the first start, default configurations are created for all peers. - -services: - -################################################################################## -## Cluster PEER 0 ################################################################ -################################################################################## - - ipfs0: - container_name: ipfs0 - image: ipfs/go-ipfs:latest -# ports: -# - "4001:4001" # ipfs swarm - expose if needed/wanted -# - "5001:5001" # ipfs api - expose if needed/wanted -# - "8080:8080" # ipfs gateway - expose if needed/wanted - volumes: - - ./compose/ipfs0:/data/ipfs - - cluster0: - container_name: cluster0 - image: ipfs/ipfs-cluster:latest - depends_on: - - ipfs0 - environment: - CLUSTER_PEERNAME: cluster0 - CLUSTER_SECRET: ${CLUSTER_SECRET} # From shell variable if set - CLUSTER_IPFSHTTP_NODEMULTIADDRESS: /dns4/ipfs0/tcp/5001 - CLUSTER_CRDT_TRUSTEDPEERS: '*' # Trust all peers in Cluster - CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS: /ip4/0.0.0.0/tcp/9094 # Expose API - CLUSTER_MONITORPINGINTERVAL: 2s # Speed up peer discovery - ports: - # Open API port (allows ipfs-cluster-ctl usage on host) - - "127.0.0.1:9094:9094" - # The cluster swarm port would need to be exposed if this container - # was to connect to cluster peers on other hosts. - # But this is just a testing cluster. - # - "9095:9095" # Cluster IPFS Proxy endpoint - # - "9096:9096" # Cluster swarm endpoint - volumes: - - ./compose/cluster0:/data/ipfs-cluster - -################################################################################## -## Cluster PEER 1 ################################################################ -################################################################################## - -# See Cluster PEER 0 for comments (all removed here and below) - ipfs1: - container_name: ipfs1 - image: ipfs/go-ipfs:latest - volumes: - - ./compose/ipfs1:/data/ipfs - - cluster1: - container_name: cluster1 - image: ipfs/ipfs-cluster:latest - depends_on: - - ipfs1 - environment: - CLUSTER_PEERNAME: cluster1 - CLUSTER_SECRET: ${CLUSTER_SECRET} - CLUSTER_IPFSHTTP_NODEMULTIADDRESS: /dns4/ipfs1/tcp/5001 - CLUSTER_CRDT_TRUSTEDPEERS: '*' - CLUSTER_MONITORPINGINTERVAL: 2s # Speed up peer discovery - volumes: - - ./compose/cluster1:/data/ipfs-cluster - -################################################################################## -## Cluster PEER 2 ################################################################ -################################################################################## - -# See Cluster PEER 0 for comments (all removed here and below) - ipfs2: - container_name: ipfs2 - image: ipfs/go-ipfs:latest - volumes: - - ./compose/ipfs2:/data/ipfs - - cluster2: - container_name: cluster2 - image: ipfs/ipfs-cluster:latest - depends_on: - - ipfs2 - environment: - CLUSTER_PEERNAME: cluster2 - CLUSTER_SECRET: ${CLUSTER_SECRET} - CLUSTER_IPFSHTTP_NODEMULTIADDRESS: /dns4/ipfs2/tcp/5001 - CLUSTER_CRDT_TRUSTEDPEERS: '*' - CLUSTER_MONITORPINGINTERVAL: 2s # Speed up peer discovery - volumes: - - ./compose/cluster2:/data/ipfs-cluster - -# For adding more peers, copy PEER 1 and rename things to ipfs2, cluster2. -# Keep bootstrapping to cluster0. diff --git a/packages/networking/ipfs-cluster/go.mod b/packages/networking/ipfs-cluster/go.mod deleted file mode 100644 index 1d23515..0000000 --- a/packages/networking/ipfs-cluster/go.mod +++ /dev/null @@ -1,217 +0,0 @@ -module github.com/ipfs-cluster/ipfs-cluster - -require ( - contrib.go.opencensus.io/exporter/jaeger v0.2.1 - contrib.go.opencensus.io/exporter/prometheus v0.4.2 - github.com/blang/semver v3.5.1+incompatible - github.com/coreos/go-systemd/v22 v22.3.2 - github.com/dgraph-io/badger v1.6.2 - github.com/dustin/go-humanize v1.0.0 - github.com/golang-jwt/jwt/v4 v4.4.2 - github.com/google/uuid v1.3.0 - github.com/gorilla/handlers v1.5.1 - github.com/gorilla/mux v1.8.0 - github.com/hashicorp/go-hclog v1.3.0 - github.com/hashicorp/raft v1.1.1 - github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 - github.com/hsanjuan/ipfs-lite v1.4.2 - github.com/imdario/mergo v0.3.13 - github.com/ipfs/go-block-format v0.0.3 - github.com/ipfs/go-cid v0.3.2 - github.com/ipfs/go-datastore v0.6.0 - github.com/ipfs/go-ds-badger v0.3.0 - github.com/ipfs/go-ds-crdt v0.3.7 - github.com/ipfs/go-ds-leveldb v0.5.0 - github.com/ipfs/go-fs-lock v0.0.7 - github.com/ipfs/go-ipfs-api v0.3.0 - github.com/ipfs/go-ipfs-chunker v0.0.5 - github.com/ipfs/go-ipfs-cmds v0.6.0 - github.com/ipfs/go-ipfs-ds-help v1.1.0 - github.com/ipfs/go-ipfs-files v0.1.1 - github.com/ipfs/go-ipfs-pinner v0.2.1 - github.com/ipfs/go-ipfs-posinfo v0.0.1 - github.com/ipfs/go-ipld-cbor v0.0.6 - github.com/ipfs/go-ipld-format v0.4.0 - github.com/ipfs/go-ipns v0.2.0 - github.com/ipfs/go-log/v2 v2.5.1 - github.com/ipfs/go-merkledag v0.8.1 - github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164 - github.com/ipfs/go-path v0.3.0 - github.com/ipfs/go-unixfs v0.4.0 - github.com/ipld/go-car v0.5.0 - github.com/kelseyhightower/envconfig v1.4.0 - github.com/kishansagathiya/go-dot v0.1.0 - github.com/lanzafame/go-libp2p-ocgorpc v0.1.1 - github.com/libp2p/go-libp2p v0.22.0 - github.com/libp2p/go-libp2p-consensus v0.0.1 - github.com/libp2p/go-libp2p-gorpc v0.5.0 - github.com/libp2p/go-libp2p-gostream v0.5.0 - github.com/libp2p/go-libp2p-http v0.4.0 - github.com/libp2p/go-libp2p-kad-dht v0.18.0 - github.com/libp2p/go-libp2p-pubsub v0.8.0 - github.com/libp2p/go-libp2p-raft v0.2.0 - github.com/libp2p/go-libp2p-record v0.2.0 - github.com/multiformats/go-multiaddr v0.6.0 - github.com/multiformats/go-multiaddr-dns v0.3.1 - github.com/multiformats/go-multicodec v0.6.0 - github.com/multiformats/go-multihash v0.2.1 - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.13.0 - github.com/rs/cors v1.8.2 - github.com/syndtr/goleveldb v1.0.0 - github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 - github.com/ugorji/go/codec v1.2.7 - github.com/urfave/cli v1.22.10 - github.com/urfave/cli/v2 v2.14.1 - go.opencensus.io v0.23.0 - go.uber.org/multierr v1.8.0 - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - google.golang.org/protobuf v1.28.1 -) - -require ( - github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect - github.com/benbjohnson/clock v1.3.0 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/boltdb/bolt v1.3.1 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cheekybits/genny v1.0.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect - github.com/cskr/pubsub v1.0.2 // indirect - github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect - github.com/dgraph-io/ristretto v0.0.2 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/elastic/gosigar v0.14.2 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/felixge/httpsnoop v1.0.1 // indirect - github.com/flynn/noise v1.0.0 // indirect - github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect - github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-immutable-radix v1.0.0 // indirect - github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/huin/goupnp v1.0.3 // indirect - github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-bitfield v1.0.0 // indirect - github.com/ipfs/go-bitswap v0.10.0 // indirect - github.com/ipfs/go-blockservice v0.4.0 // indirect - github.com/ipfs/go-cidutil v0.1.0 // indirect - github.com/ipfs/go-fetcher v1.6.1 // indirect - github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect - github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect - github.com/ipfs/go-ipfs-pq v0.0.2 // indirect - github.com/ipfs/go-ipfs-provider v0.7.1 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipld-legacy v0.1.0 // indirect - github.com/ipfs/go-log v1.0.5 // indirect - github.com/ipfs/go-metrics-interface v0.0.1 // indirect - github.com/ipfs/go-peertaskqueue v0.7.0 // indirect - github.com/ipfs/go-verifcid v0.0.1 // indirect - github.com/ipld/go-codec-dagpb v1.5.0 // indirect - github.com/ipld/go-ipld-prime v0.18.0 // indirect - github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/jbenet/goprocess v0.1.4 // indirect - github.com/klauspost/compress v1.15.1 // indirect - github.com/klauspost/cpuid/v2 v2.1.0 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect - github.com/libp2p/go-libp2p-core v0.20.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.4.7 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.2.3 // indirect - github.com/libp2p/go-msgio v0.2.0 // indirect - github.com/libp2p/go-nat v0.1.0 // indirect - github.com/libp2p/go-netroute v0.2.0 // indirect - github.com/libp2p/go-openssl v0.1.0 // indirect - github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/libp2p/go-yamux/v3 v3.1.2 // indirect - github.com/libp2p/zeroconf/v2 v2.2.0 // indirect - github.com/lucas-clemente/quic-go v0.28.1 // indirect - github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect - github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect - github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect - github.com/marten-seemann/qtls-go1-19 v0.1.0 // indirect - github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect - github.com/mattn/go-pointer v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/miekg/dns v1.1.50 // indirect - github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect - github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect - github.com/minio/sha256-simd v1.0.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.0.4 // indirect - github.com/multiformats/go-base36 v0.1.0 // indirect - github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multistream v0.3.3 // indirect - github.com/multiformats/go-varint v0.0.6 // indirect - github.com/nxadm/tail v1.4.8 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/prometheus/statsd_exporter v0.22.7 // indirect - github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect - github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 // indirect - github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect - github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c // indirect - github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect - github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - go.opentelemetry.io/otel v1.7.0 // indirect - go.opentelemetry.io/otel/trace v1.7.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/zap v1.22.0 // indirect - go4.org v0.0.0-20200411211856-f5505b9728dd // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect - golang.org/x/tools v0.1.12 // indirect - golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect - google.golang.org/api v0.45.0 // indirect - google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a // indirect - google.golang.org/grpc v1.45.0 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - lukechampine.com/blake3 v1.1.7 // indirect -) - -go 1.19 diff --git a/packages/networking/ipfs-cluster/go.sum b/packages/networking/ipfs-cluster/go.sum deleted file mode 100644 index b1c43f8..0000000 --- a/packages/networking/ipfs-cluster/go.sum +++ /dev/null @@ -1,1958 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= -contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= -contrib.go.opencensus.io/exporter/prometheus v0.4.2 h1:sqfsYl5GIY/L570iT+l93ehxaWJs2/OwXtiWwew3oAg= -contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= -github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= -github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= -github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= -github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= -github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= -github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= -github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= -github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do= -github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= -github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477 h1:bLsrEmB2NUwkHH18FOJBIa04wOV2RQalJrcafTYu6Lg= -github.com/hashicorp/raft-boltdb v0.0.0-20190605210249-ef2e128ed477/go.mod h1:aUF6HQr8+t3FC/ZHAC+pZreUBhTaxumuu3L+d37uRxk= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hsanjuan/ipfs-lite v1.4.2 h1:wzR1jx2uSDmVxCgyc12tFxNtv7St5BrHmS1RMizu2uQ= -github.com/hsanjuan/ipfs-lite v1.4.2/go.mod h1:YZrszULDL0OkPUYN7+FLVJ1AnVXlD9YkmnIi5GboNYk= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= -github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= -github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= -github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= -github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= -github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= -github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= -github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= -github.com/ipfs/go-bitswap v0.10.0 h1:g28r+F8m6RuAxnk1qLa43xRLw9xs5WZxEJBnndt3XSo= -github.com/ipfs/go-bitswap v0.10.0/go.mod h1:+fZEvycxviZ7c+5KlKwTzLm0M28g2ukCPqiuLfJk4KA= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= -github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= -github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= -github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= -github.com/ipfs/go-blockservice v0.4.0 h1:7MUijAW5SqdsqEW/EhnNFRJXVF8mGU5aGhZ3CQaCWbY= -github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= -github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= -github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= -github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= -github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= -github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= -github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= -github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= -github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= -github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-crdt v0.3.7 h1:LVOxRa6rOUPYhDN+tFrQrE4pu7dHTuDqKT57NUWjl1Y= -github.com/ipfs/go-ds-crdt v0.3.7/go.mod h1:h2hPQ3njd7DztdvUCOuV33Aq1QYRFwHXJdz+Z5oo2A0= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= -github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= -github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= -github.com/ipfs/go-fetcher v1.6.1 h1:UFuRVYX5AIllTiRhi5uK/iZkfhSpBCGX7L70nSZEmK8= -github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= -github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= -github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= -github.com/ipfs/go-ipfs-api v0.3.0 h1:ZzVrsTV31Z53ZlUare6a5UJ46lC7lW93q/s1/fXyATk= -github.com/ipfs/go-ipfs-api v0.3.0/go.mod h1:A1naQGm0Jg01GxDq7oDyVSZxt20SuRTNIBFNZJgPDmg= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= -github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= -github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= -github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= -github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= -github.com/ipfs/go-ipfs-cmds v0.6.0 h1:yAxdowQZzoFKjcLI08sXVNnqVj3jnABbf9smrPQmBsw= -github.com/ipfs/go-ipfs-cmds v0.6.0/go.mod h1:ZgYiWVnCk43ChwoH8hAmI1IRbuVtq3GSTHwtRB/Kqhk= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= -github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= -github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84= -github.com/ipfs/go-ipfs-files v0.1.1 h1:/MbEowmpLo9PJTEQk16m9rKzUHjeP4KRU9nWJyJO324= -github.com/ipfs/go-ipfs-files v0.1.1/go.mod h1:8xkIrMWH+Y5P7HvJ4Yc5XWwIW2e52dyXUiC0tZyjDbM= -github.com/ipfs/go-ipfs-pinner v0.2.1 h1:kw9hiqh2p8TatILYZ3WAfQQABby7SQARdrdA+5Z5QfY= -github.com/ipfs/go-ipfs-pinner v0.2.1/go.mod h1:l1AtLL5bovb7opnG77sh4Y10waINz3Y1ni6CvTzx7oo= -github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= -github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-provider v0.7.1 h1:eKToBUAb6ZY8iiA6AYVxzW4G1ep67XUaaEBUIYpxhfw= -github.com/ipfs/go-ipfs-provider v0.7.1/go.mod h1:QwdDYRYnC5sYGLlOwVDY/0ZB6T3zcMtu+5+GdGeUuw8= -github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= -github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= -github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= -github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= -github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= -github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= -github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= -github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= -github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= -github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= -github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= -github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= -github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= -github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= -github.com/ipfs/go-merkledag v0.8.1 h1:N3yrqSre/ffvdwtHL4MXy0n7XH+VzN8DlzDrJySPa94= -github.com/ipfs/go-merkledag v0.8.1/go.mod h1:uYUlWE34GhbcTjGuUDEcdPzsEtOdnOupL64NgSRjmWI= -github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= -github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164 h1:0ATu9s5KktHhm8aYRSe1ysOJPik3dRwU/uag1Bcz+tg= -github.com/ipfs/go-mfs v0.1.3-0.20210507195338-96fbfa122164/go.mod h1:A525zyeY2o078AoxhjJirOlDTXI1GnZxiYQnESGJ9WU= -github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= -github.com/ipfs/go-path v0.3.0 h1:tkjga3MtpXyM5v+3EbRvOHEoo+frwi4oumw5K+KYWyA= -github.com/ipfs/go-path v0.3.0/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= -github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= -github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= -github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= -github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0= -github.com/ipfs/go-unixfs v0.4.0 h1:qSyyxfB/OiDdWHYiSbyaqKC7zfSE/TFL0QdwkRjBm20= -github.com/ipfs/go-unixfs v0.4.0/go.mod h1:I7Nqtm06HgOOd+setAoCU6rf/HgVFHE+peeNuOv/5+g= -github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= -github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-car v0.5.0 h1:kcCEa3CvYMs0iE5BzD5sV7O2EwMiCIp3uF8tA6APQT8= -github.com/ipld/go-car v0.5.0/go.mod h1:ppiN5GWpjOZU9PgpAZ9HbZd9ZgSpwPMr48fGRJOWmvE= -github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= -github.com/ipld/go-codec-dagpb v1.5.0 h1:RspDRdsJpLfgCI0ONhTAnbHdySGD4t+LHSPK4X1+R0k= -github.com/ipld/go-codec-dagpb v1.5.0/go.mod h1:0yRIutEFD8o1DGVqw4RSHh+BUTlJA9XWldxaaWR/o4g= -github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.18.0 h1:xUk7NUBSWHEXdjiOu2sLXouFJOMs0yoYzeI5RAqhYQo= -github.com/ipld/go-ipld-prime v0.18.0/go.mod h1:735yXW548CKrLwVCYXzqx90p5deRJMVVxM9eJ4Qe+qE= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= -github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= -github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= -github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= -github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= -github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= -github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kishansagathiya/go-dot v0.1.0 h1:XPj/333a6Qn4VPFqF+e2EiyABL7yRObJ7RTAbriKA1s= -github.com/kishansagathiya/go-dot v0.1.0/go.mod h1:U1dCUFzZ+KnBgkaCWPj2JFUQygVepVudkINK9QRsxMs= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= -github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/lanzafame/go-libp2p-ocgorpc v0.1.1 h1:yDXjQYel7WVC/oozZoJIUzHg3DMfBGVtBr+TXtM/RMs= -github.com/lanzafame/go-libp2p-ocgorpc v0.1.1/go.mod h1:Naz1HcGy8RHTQtXtr2s8xDGreZRETtpOlVJqRx4ucuo= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= -github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= -github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= -github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw= -github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= -github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8= -github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= -github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= -github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= -github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-gorpc v0.1.0/go.mod h1:DrswTLnu7qjLgbqe4fekX4ISoPiHUqtA45thTsJdE1w= -github.com/libp2p/go-libp2p-gorpc v0.5.0 h1:mmxxAPdP3JzpYH4KcDf4csXnqtd1HazLPfdyB2MBRb8= -github.com/libp2p/go-libp2p-gorpc v0.5.0/go.mod h1:GpHuvY3m0YFkd0+inOGo4HDtc4up9OS/mBPXvEpNuRY= -github.com/libp2p/go-libp2p-gostream v0.5.0 h1:niNGTUrFoUDP/8jxMgu97zngMO+UGYBpVpbCKwIJBls= -github.com/libp2p/go-libp2p-gostream v0.5.0/go.mod h1:rXrb0CqfcRRxa7m3RSKORQiKiWgk3IPeXWda66ZXKsA= -github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= -github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= -github.com/libp2p/go-libp2p-http v0.4.0 h1:V+f9Rhe/8GkColmXoyJyA0NVsN9F3TCLZgW2hwjoX5w= -github.com/libp2p/go-libp2p-http v0.4.0/go.mod h1:92tmLGrlBliQFDlZRpBXT3BJM7rGFONy0vsNrG/bMPg= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.18.0 h1:akqO3gPMwixR7qFSFq70ezRun97g5hrA/lBW9jrjUYM= -github.com/libp2p/go-libp2p-kad-dht v0.18.0/go.mod h1:Gb92MYIPm3K2pJLGn8wl0m8wiKDvHrYpg+rOd0GzzPA= -github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= -github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= -github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= -github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= -github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.8.0 h1:bzTG693TA1Ju/zKmUCQzDLSqiJnyRFVwPpuloZ/OZtI= -github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= -github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= -github.com/libp2p/go-libp2p-pubsub v0.8.0 h1:KygfDpaa9AeUPGCVcpVenpXNFauDn+5kBYu3EjcL3Tg= -github.com/libp2p/go-libp2p-pubsub v0.8.0/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-raft v0.2.0 h1:hd/3Lgi0X0x6k1q24/ibG9J/aY8dZMinQkpKdr594S8= -github.com/libp2p/go-libp2p-raft v0.2.0/go.mod h1:Tu/cSKPZ5qXZfsw8skY0QziHKy+Pdt/P4oXH7BiZ1Oo= -github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= -github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= -github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= -github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= -github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= -github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= -github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= -github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= -github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= -github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= -github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= -github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= -github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= -github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= -github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= -github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= -github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= -github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= -github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= -github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= -github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= -github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= -github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= -github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= -github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= -github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= -github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= -github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ= -github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= -github.com/marten-seemann/qtls-go1-18 v0.1.2 h1:JH6jmzbduz0ITVQ7ShevK10Av5+jBEKAHMntXmIV7kM= -github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= -github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK5df3GufyYYU= -github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= -github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= -github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= -github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= -github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= -github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= -github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= -github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= -github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= -github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= -github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= -github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= -github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= -github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= -github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= -github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= -github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= -github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= -github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= -github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= -github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= -github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= -github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= -github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= -github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= -github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= -github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= -github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= -github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= -github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= -github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli/v2 v2.14.1 h1:0Sx+C9404t2+DPuIJ3UpZFOEFhNG3wPxMj7uZHyZKFA= -github.com/urfave/cli/v2 v2.14.1/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= -github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= -github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/warpfork/go-testmark v0.10.0 h1:E86YlUMYfwIacEsQGlnTvjk1IgYkyTGjPhF0RnwTCmw= -github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= -github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c h1:GGsyl0dZ2jJgVT+VvWBf/cNijrHRhkrTjkmp5wg7li0= -github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= -github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= -github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.22.0 h1:Zcye5DUgBloQ9BaT4qc9BnjOFog5TvBSAGkJ3Nf70c0= -go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= -go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww= -google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a h1:tzkHckzMzgPr8SC4taTC3AldLr4+oJivSoq1xf/nhsc= -google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= -lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/packages/networking/ipfs-cluster/informer/disk/config.go b/packages/networking/ipfs-cluster/informer/disk/config.go deleted file mode 100644 index 9150869..0000000 --- a/packages/networking/ipfs-cluster/informer/disk/config.go +++ /dev/null @@ -1,123 +0,0 @@ -package disk - -import ( - "encoding/json" - "errors" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" -) - -const configKey = "disk" -const envConfigKey = "cluster_disk" - -// Default values for disk Config -const ( - DefaultMetricTTL = 30 * time.Second - DefaultMetricType = MetricFreeSpace -) - -// Config is used to initialize an Informer and customize -// the type and parameters of the metric it produces. -type Config struct { - config.Saver - - MetricTTL time.Duration - MetricType MetricType -} - -type jsonConfig struct { - MetricTTL string `json:"metric_ttl"` - MetricType string `json:"metric_type"` -} - -// ConfigKey returns a human-friendly identifier for this type of Metric. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.MetricTTL = DefaultMetricTTL - cfg.MetricType = DefaultMetricType - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.MetricTTL <= 0 { - return errors.New("disk.metric_ttl is invalid") - } - - if cfg.MetricType.String() == "" { - return errors.New("disk.metric_type is invalid") - } - return nil -} - -// LoadJSON reads the fields of this Config from a JSON byteslice as -// generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling disk informer config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - t, _ := time.ParseDuration(jcfg.MetricTTL) - cfg.MetricTTL = t - - switch jcfg.MetricType { - case "reposize": - cfg.MetricType = MetricRepoSize - case "freespace": - cfg.MetricType = MetricFreeSpace - default: - return errors.New("disk.metric_type is invalid") - } - - return cfg.Validate() -} - -// ToJSON generates a JSON-formatted human-friendly representation of this -// Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg := cfg.toJSONConfig() - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - return &jsonConfig{ - MetricTTL: cfg.MetricTTL.String(), - MetricType: cfg.MetricType.String(), - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/informer/disk/config_test.go b/packages/networking/ipfs-cluster/informer/disk/config_test.go deleted file mode 100644 index acfc211..0000000 --- a/packages/networking/ipfs-cluster/informer/disk/config_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package disk - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "metric_ttl": "1s", - "metric_type": "freespace" -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - - json.Unmarshal(cfgJSON, j) - j.MetricTTL = "-10" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding metric_ttl") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.MetricType = "abc" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding check_interval") - } - - j = &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.MetricType = "reposize" - tst, _ = json.Marshal(j) - err = cfg.LoadJSON(tst) - if err != nil { - t.Error("reposize should be a valid type") - } - -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.MetricTTL = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.MetricType = MetricRepoSize - if cfg.Validate() != nil { - t.Fatal("MetricRepoSize is a valid type") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_DISK_METRICTTL", "22s") - cfg := &Config{} - cfg.ApplyEnvVars() - - if cfg.MetricTTL != 22*time.Second { - t.Fatal("failed to override metric_ttl with env var") - } -} diff --git a/packages/networking/ipfs-cluster/informer/disk/disk.go b/packages/networking/ipfs-cluster/informer/disk/disk.go deleted file mode 100644 index cd941a4..0000000 --- a/packages/networking/ipfs-cluster/informer/disk/disk.go +++ /dev/null @@ -1,161 +0,0 @@ -// Package disk implements an ipfs-cluster informer which can provide different -// disk-related metrics from the IPFS daemon as an api.Metric. -package disk - -import ( - "context" - "fmt" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/observations" - - logging "github.com/ipfs/go-log/v2" - rpc "github.com/libp2p/go-libp2p-gorpc" - - "go.opencensus.io/stats" - "go.opencensus.io/trace" -) - -// MetricType identifies the type of metric to fetch from the IPFS daemon. -type MetricType int - -const ( - // MetricFreeSpace provides the available space reported by IPFS - MetricFreeSpace MetricType = iota - // MetricRepoSize provides the used space reported by IPFS - MetricRepoSize -) - -// String returns a string representation for MetricType. -func (t MetricType) String() string { - switch t { - case MetricFreeSpace: - return "freespace" - case MetricRepoSize: - return "reposize" - } - return "" -} - -var logger = logging.Logger("diskinfo") - -// Informer is a simple object to implement the ipfscluster.Informer -// and Component interfaces. -type Informer struct { - config *Config // set when created, readonly - - mu sync.Mutex // guards access to following fields - rpcClient *rpc.Client -} - -// NewInformer returns an initialized informer using the given InformerConfig. -func NewInformer(cfg *Config) (*Informer, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - return &Informer{ - config: cfg, - }, nil -} - -// Name returns the name of the metric issued by this informer. -func (disk *Informer) Name() string { - return disk.config.MetricType.String() -} - -// SetClient provides us with an rpc.Client which allows -// contacting other components in the cluster. -func (disk *Informer) SetClient(c *rpc.Client) { - disk.mu.Lock() - defer disk.mu.Unlock() - disk.rpcClient = c -} - -// Shutdown is called on cluster shutdown. We just invalidate -// any metrics from this point. -func (disk *Informer) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "informer/disk/Shutdown") - defer span.End() - - disk.mu.Lock() - defer disk.mu.Unlock() - - disk.rpcClient = nil - return nil -} - -// GetMetrics returns the metric obtained by this Informer. It must always -// return at least one metric. -func (disk *Informer) GetMetrics(ctx context.Context) []api.Metric { - ctx, span := trace.StartSpan(ctx, "informer/disk/GetMetric") - defer span.End() - - disk.mu.Lock() - rpcClient := disk.rpcClient - disk.mu.Unlock() - - if rpcClient == nil { - return []api.Metric{ - { - Name: disk.Name(), - Valid: false, - }, - } - } - - var repoStat api.IPFSRepoStat - var weight uint64 - var value string - - valid := true - - err := rpcClient.CallContext( - ctx, - "", - "IPFSConnector", - "RepoStat", - struct{}{}, - &repoStat, - ) - if err != nil { - logger.Error(err) - valid = false - } else { - switch disk.config.MetricType { - case MetricFreeSpace: - size := repoStat.RepoSize - total := repoStat.StorageMax - if size < total { - weight = total - size - } else { - // Make sure we don't underflow and stop - // sending this metric when space is exhausted. - weight = 0 - valid = false - logger.Warn("reported freespace is 0") - } - value = fmt.Sprintf("%d", weight) - case MetricRepoSize: - // smaller repositories have more priority - weight = -repoStat.RepoSize - value = fmt.Sprintf("%d", repoStat.RepoSize) - } - } - - m := api.Metric{ - Name: disk.Name(), - Value: value, - Valid: valid, - Weight: int64(weight), - Partitionable: false, - } - - m.SetTTL(disk.config.MetricTTL) - - stats.Record(ctx, observations.InformerDisk.M(m.Weight)) - - return []api.Metric{m} -} diff --git a/packages/networking/ipfs-cluster/informer/disk/disk_test.go b/packages/networking/ipfs-cluster/informer/disk/disk_test.go deleted file mode 100644 index a68bc0a..0000000 --- a/packages/networking/ipfs-cluster/informer/disk/disk_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package disk - -import ( - "context" - "errors" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -type badRPCService struct { -} - -func badRPCClient(t *testing.T) *rpc.Client { - s := rpc.NewServer(nil, "mock") - c := rpc.NewClientWithServer(nil, "mock", s) - err := s.RegisterName("IPFSConnector", &badRPCService{}) - if err != nil { - t.Fatal(err) - } - return c -} - -func (mock *badRPCService) RepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error { - return errors.New("fake error") -} - -// Returns the first metric -func getMetrics(t *testing.T, inf *Informer) api.Metric { - t.Helper() - metrics := inf.GetMetrics(context.Background()) - if len(metrics) != 1 { - t.Fatal("expected 1 metric") - } - return metrics[0] -} - -func Test(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - inf, err := NewInformer(cfg) - if err != nil { - t.Fatal(err) - } - defer inf.Shutdown(ctx) - m := getMetrics(t, inf) - if m.Valid { - t.Error("metric should be invalid") - } - inf.SetClient(test.NewMockRPCClient(t)) - m = getMetrics(t, inf) - if !m.Valid { - t.Error("metric should be valid") - } -} - -func TestFreeSpace(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - cfg.MetricType = MetricFreeSpace - - inf, err := NewInformer(cfg) - if err != nil { - t.Fatal(err) - } - defer inf.Shutdown(ctx) - m := getMetrics(t, inf) - if m.Valid { - t.Error("metric should be invalid") - } - inf.SetClient(test.NewMockRPCClient(t)) - m = getMetrics(t, inf) - if !m.Valid { - t.Error("metric should be valid") - } - // The mock client reports 100KB and 2 pins of 1 KB - if m.Value != "98000" { - t.Error("bad metric value") - } -} - -func TestRepoSize(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - cfg.MetricType = MetricRepoSize - - inf, err := NewInformer(cfg) - if err != nil { - t.Fatal(err) - } - defer inf.Shutdown(ctx) - m := getMetrics(t, inf) - if m.Valid { - t.Error("metric should be invalid") - } - inf.SetClient(test.NewMockRPCClient(t)) - m = getMetrics(t, inf) - if !m.Valid { - t.Error("metric should be valid") - } - // The mock client reports 100KB and 2 pins of 1 KB - if m.Value != "2000" { - t.Error("bad metric value") - } -} - -func TestWithErrors(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - inf, err := NewInformer(cfg) - if err != nil { - t.Fatal(err) - } - defer inf.Shutdown(ctx) - inf.SetClient(badRPCClient(t)) - m := getMetrics(t, inf) - if m.Valid { - t.Errorf("metric should be invalid") - } -} diff --git a/packages/networking/ipfs-cluster/informer/numpin/config.go b/packages/networking/ipfs-cluster/informer/numpin/config.go deleted file mode 100644 index cf3b0c3..0000000 --- a/packages/networking/ipfs-cluster/informer/numpin/config.go +++ /dev/null @@ -1,102 +0,0 @@ -package numpin - -import ( - "encoding/json" - "errors" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" -) - -const configKey = "numpin" -const envConfigKey = "cluster_numpin" - -// These are the default values for a Config. -const ( - DefaultMetricTTL = 10 * time.Second -) - -// Config allows to initialize an Informer. -type Config struct { - config.Saver - - MetricTTL time.Duration -} - -type jsonConfig struct { - MetricTTL string `json:"metric_ttl"` -} - -// ConfigKey returns a human-friendly identifier for this -// Config's type. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.MetricTTL = DefaultMetricTTL - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this configuration have -// sensible values. -func (cfg *Config) Validate() error { - if cfg.MetricTTL <= 0 { - return errors.New("disk.metric_ttl is invalid") - } - - return nil -} - -// LoadJSON parses a raw JSON byte-slice as generated by ToJSON(). -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - t, _ := time.ParseDuration(jcfg.MetricTTL) - cfg.MetricTTL = t - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - return &jsonConfig{ - MetricTTL: cfg.MetricTTL.String(), - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/informer/numpin/config_test.go b/packages/networking/ipfs-cluster/informer/numpin/config_test.go deleted file mode 100644 index a88f259..0000000 --- a/packages/networking/ipfs-cluster/informer/numpin/config_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package numpin - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "metric_ttl": "1s" -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - - json.Unmarshal(cfgJSON, j) - j.MetricTTL = "-10" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding metric_ttl") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.MetricTTL = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_NUMPIN_METRICTTL", "22s") - cfg := &Config{} - cfg.ApplyEnvVars() - - if cfg.MetricTTL != 22*time.Second { - t.Fatal("failed to override metric_ttl with env var") - } -} diff --git a/packages/networking/ipfs-cluster/informer/numpin/numpin.go b/packages/networking/ipfs-cluster/informer/numpin/numpin.go deleted file mode 100644 index a070eb0..0000000 --- a/packages/networking/ipfs-cluster/informer/numpin/numpin.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package numpin implements an ipfs-cluster informer which determines how many -// items this peer is pinning and returns it as api.Metric -package numpin - -import ( - "context" - "fmt" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - rpc "github.com/libp2p/go-libp2p-gorpc" - - "go.opencensus.io/trace" -) - -// MetricName specifies the name of our metric -var MetricName = "numpin" - -// Informer is a simple object to implement the ipfscluster.Informer -// and Component interfaces -type Informer struct { - config *Config - - mu sync.Mutex - rpcClient *rpc.Client -} - -// NewInformer returns an initialized Informer. -func NewInformer(cfg *Config) (*Informer, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - return &Informer{ - config: cfg, - }, nil -} - -// SetClient provides us with an rpc.Client which allows -// contacting other components in the cluster. -func (npi *Informer) SetClient(c *rpc.Client) { - npi.mu.Lock() - npi.rpcClient = c - npi.mu.Unlock() -} - -// Shutdown is called on cluster shutdown. We just invalidate -// any metrics from this point. -func (npi *Informer) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "informer/numpin/Shutdown") - defer span.End() - - npi.mu.Lock() - npi.rpcClient = nil - npi.mu.Unlock() - return nil -} - -// Name returns the name of this informer -func (npi *Informer) Name() string { - return MetricName -} - -// GetMetrics contacts the IPFSConnector component and requests the `pin ls` -// command. We return the number of pins in IPFS. It must always return at -// least one metric. -func (npi *Informer) GetMetrics(ctx context.Context) []api.Metric { - ctx, span := trace.StartSpan(ctx, "informer/numpin/GetMetric") - defer span.End() - - npi.mu.Lock() - rpcClient := npi.rpcClient - npi.mu.Unlock() - - if rpcClient == nil { - return []api.Metric{ - { - Valid: false, - }, - } - } - - // make use of the RPC API to obtain information - // about the number of pins in IPFS. See RPCAPI docs. - in := make(chan []string, 1) - in <- []string{"recursive", "direct"} - close(in) - out := make(chan api.IPFSPinInfo, 1024) - - errCh := make(chan error, 1) - go func() { - defer close(errCh) - err := rpcClient.Stream( - ctx, - "", // Local call - "IPFSConnector", // Service name - "PinLs", // Method name - in, - out, - ) - errCh <- err - }() - - n := 0 - for range out { - n++ - } - - err := <-errCh - - valid := err == nil - - m := api.Metric{ - Name: MetricName, - Value: fmt.Sprintf("%d", n), - Valid: valid, - Partitionable: false, - } - - m.SetTTL(npi.config.MetricTTL) - return []api.Metric{m} -} diff --git a/packages/networking/ipfs-cluster/informer/numpin/numpin_test.go b/packages/networking/ipfs-cluster/informer/numpin/numpin_test.go deleted file mode 100644 index eb64341..0000000 --- a/packages/networking/ipfs-cluster/informer/numpin/numpin_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package numpin - -import ( - "context" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -type mockService struct{} - -func mockRPCClient(t *testing.T) *rpc.Client { - s := rpc.NewServer(nil, "mock") - c := rpc.NewClientWithServer(nil, "mock", s) - err := s.RegisterName("IPFSConnector", &mockService{}) - if err != nil { - t.Fatal(err) - } - return c -} - -func (mock *mockService) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error { - out <- api.IPFSPinInfo{Cid: api.Cid(test.Cid1), Type: api.IPFSPinStatusRecursive} - out <- api.IPFSPinInfo{Cid: api.Cid(test.Cid2), Type: api.IPFSPinStatusRecursive} - close(out) - return nil -} - -func Test(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - inf, err := NewInformer(cfg) - if err != nil { - t.Fatal(err) - } - metrics := inf.GetMetrics(ctx) - if len(metrics) != 1 { - t.Fatal("expected 1 metric") - } - m := metrics[0] - - if m.Valid { - t.Error("metric should be invalid") - } - inf.SetClient(mockRPCClient(t)) - - metrics = inf.GetMetrics(ctx) - if len(metrics) != 1 { - t.Fatal("expected 1 metric") - } - m = metrics[0] - if !m.Valid { - t.Error("metric should be valid") - } - if m.Value != "2" { - t.Error("bad metric value") - } -} diff --git a/packages/networking/ipfs-cluster/informer/pinqueue/config.go b/packages/networking/ipfs-cluster/informer/pinqueue/config.go deleted file mode 100644 index f03c60a..0000000 --- a/packages/networking/ipfs-cluster/informer/pinqueue/config.go +++ /dev/null @@ -1,111 +0,0 @@ -package pinqueue - -import ( - "encoding/json" - "errors" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" -) - -const configKey = "pinqueue" -const envConfigKey = "cluster_pinqueue" - -// These are the default values for a Config. -const ( - DefaultMetricTTL = 30 * time.Second - DefaultWeightBucketSize = 100000 // 100k pins -) - -// Config allows to initialize an Informer. -type Config struct { - config.Saver - - MetricTTL time.Duration - WeightBucketSize int -} - -type jsonConfig struct { - MetricTTL string `json:"metric_ttl"` - WeightBucketSize int `json:"weight_bucket_size"` -} - -// ConfigKey returns a human-friendly identifier for this -// Config's type. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.MetricTTL = DefaultMetricTTL - cfg.WeightBucketSize = DefaultWeightBucketSize - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this configuration have -// sensible values. -func (cfg *Config) Validate() error { - if cfg.MetricTTL <= 0 { - return errors.New("pinqueue.metric_ttl is invalid") - } - if cfg.WeightBucketSize < 0 { - return errors.New("pinqueue.WeightBucketSize is invalid") - } - - return nil -} - -// LoadJSON parses a raw JSON byte-slice as generated by ToJSON(). -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - t, _ := time.ParseDuration(jcfg.MetricTTL) - cfg.MetricTTL = t - cfg.WeightBucketSize = jcfg.WeightBucketSize - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - return &jsonConfig{ - MetricTTL: cfg.MetricTTL.String(), - WeightBucketSize: cfg.WeightBucketSize, - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/informer/pinqueue/config_test.go b/packages/networking/ipfs-cluster/informer/pinqueue/config_test.go deleted file mode 100644 index b72d747..0000000 --- a/packages/networking/ipfs-cluster/informer/pinqueue/config_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package pinqueue - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "metric_ttl": "1s" -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - - json.Unmarshal(cfgJSON, j) - j.MetricTTL = "-10" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding metric_ttl") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.MetricTTL = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - cfg.WeightBucketSize = -2 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_PINQUEUE_METRICTTL", "22s") - cfg := &Config{} - cfg.ApplyEnvVars() - - if cfg.MetricTTL != 22*time.Second { - t.Fatal("failed to override metric_ttl with env var") - } -} diff --git a/packages/networking/ipfs-cluster/informer/pinqueue/pinqueue.go b/packages/networking/ipfs-cluster/informer/pinqueue/pinqueue.go deleted file mode 100644 index e5142ec..0000000 --- a/packages/networking/ipfs-cluster/informer/pinqueue/pinqueue.go +++ /dev/null @@ -1,110 +0,0 @@ -// Package pinqueue implements an ipfs-cluster informer which issues the -// current size of the pinning queue. -package pinqueue - -import ( - "context" - "fmt" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - rpc "github.com/libp2p/go-libp2p-gorpc" - - "go.opencensus.io/trace" -) - -// MetricName specifies the name of our metric -var MetricName = "pinqueue" - -// Informer is a simple object to implement the ipfscluster.Informer -// and Component interfaces -type Informer struct { - config *Config - - mu sync.Mutex - rpcClient *rpc.Client -} - -// New returns an initialized Informer. -func New(cfg *Config) (*Informer, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - return &Informer{ - config: cfg, - }, nil -} - -// SetClient provides us with an rpc.Client which allows -// contacting other components in the cluster. -func (inf *Informer) SetClient(c *rpc.Client) { - inf.mu.Lock() - inf.rpcClient = c - inf.mu.Unlock() -} - -// Shutdown is called on cluster shutdown. We just invalidate -// any metrics from this point. -func (inf *Informer) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "informer/numpin/Shutdown") - defer span.End() - - inf.mu.Lock() - inf.rpcClient = nil - inf.mu.Unlock() - return nil -} - -// Name returns the name of this informer -func (inf *Informer) Name() string { - return MetricName -} - -// GetMetrics contacts the Pintracker component and requests the number of -// queued items for pinning. -func (inf *Informer) GetMetrics(ctx context.Context) []api.Metric { - ctx, span := trace.StartSpan(ctx, "informer/pinqueue/GetMetric") - defer span.End() - - inf.mu.Lock() - rpcClient := inf.rpcClient - inf.mu.Unlock() - - if rpcClient == nil { - return []api.Metric{ - { - Valid: false, - }, - } - } - - var queued int64 - - err := rpcClient.CallContext( - ctx, - "", - "PinTracker", - "PinQueueSize", - struct{}{}, - &queued, - ) - valid := err == nil - weight := -queued // smaller pin queues have more priority - if div := inf.config.WeightBucketSize; div > 0 { - weight = weight / int64(div) - } - - m := api.Metric{ - Name: MetricName, - Value: fmt.Sprintf("%d", queued), - Valid: valid, - Partitionable: false, - Weight: weight, - } - - m.SetTTL(inf.config.MetricTTL) - return []api.Metric{m} -} diff --git a/packages/networking/ipfs-cluster/informer/pinqueue/pinqueue_test.go b/packages/networking/ipfs-cluster/informer/pinqueue/pinqueue_test.go deleted file mode 100644 index 697517e..0000000 --- a/packages/networking/ipfs-cluster/informer/pinqueue/pinqueue_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package pinqueue - -import ( - "context" - "testing" - - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -type mockService struct{} - -func (mock *mockService) PinQueueSize(ctx context.Context, in struct{}, out *int64) error { - *out = 42 - return nil -} - -func mockRPCClient(t *testing.T) *rpc.Client { - s := rpc.NewServer(nil, "mock") - c := rpc.NewClientWithServer(nil, "mock", s) - err := s.RegisterName("PinTracker", &mockService{}) - if err != nil { - t.Fatal(err) - } - return c -} - -func Test(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - cfg.WeightBucketSize = 0 - inf, err := New(cfg) - if err != nil { - t.Fatal(err) - } - metrics := inf.GetMetrics(ctx) - if len(metrics) != 1 { - t.Fatal("expected 1 metric") - } - m := metrics[0] - - if m.Valid { - t.Error("metric should be invalid") - } - inf.SetClient(mockRPCClient(t)) - - metrics = inf.GetMetrics(ctx) - if len(metrics) != 1 { - t.Fatal("expected 1 metric") - } - m = metrics[0] - if !m.Valid { - t.Error("metric should be valid") - } - if m.Value != "42" { - t.Error("bad metric value", m.Value) - } - if m.Partitionable { - t.Error("should not be a partitionable metric") - } - if m.Weight != -42 { - t.Error("weight should be -42") - } - - cfg.WeightBucketSize = 5 - inf, err = New(cfg) - if err != nil { - t.Fatal(err) - } - inf.SetClient(mockRPCClient(t)) - metrics = inf.GetMetrics(ctx) - if len(metrics) != 1 { - t.Fatal("expected 1 metric") - } - m = metrics[0] - if m.Weight != -8 { - t.Error("weight should be -8, not", m.Weight) - } -} diff --git a/packages/networking/ipfs-cluster/informer/tags/config.go b/packages/networking/ipfs-cluster/informer/tags/config.go deleted file mode 100644 index 281c241..0000000 --- a/packages/networking/ipfs-cluster/informer/tags/config.go +++ /dev/null @@ -1,124 +0,0 @@ -package tags - -import ( - "encoding/json" - "errors" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" -) - -const configKey = "tags" -const envConfigKey = "cluster_tags" - -// Default values for tags Config -const ( - DefaultMetricTTL = 30 * time.Second -) - -// Default values for tags config -var ( - DefaultTags = map[string]string{ - "group": "default", - } -) - -// Config is used to initialize an Informer and customize -// the type and parameters of the metric it produces. -type Config struct { - config.Saver - - MetricTTL time.Duration - Tags map[string]string -} - -type jsonConfig struct { - MetricTTL string `json:"metric_ttl"` - Tags map[string]string `json:"tags"` -} - -// ConfigKey returns a human-friendly identifier for this type of Metric. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default initializes this Config with sensible values. -func (cfg *Config) Default() error { - cfg.MetricTTL = DefaultMetricTTL - cfg.Tags = DefaultTags - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.MetricTTL <= 0 { - return errors.New("tags.metric_ttl is invalid") - } - - return nil -} - -// LoadJSON reads the fields of this Config from a JSON byteslice as -// generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling disk informer config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - err := config.ParseDurations( - cfg.ConfigKey(), - &config.DurationOpt{Duration: jcfg.MetricTTL, Dst: &cfg.MetricTTL, Name: "metric_ttl"}, - ) - if err != nil { - return err - } - - cfg.Tags = jcfg.Tags - - return cfg.Validate() -} - -// ToJSON generates a JSON-formatted human-friendly representation of this -// Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg := cfg.toJSONConfig() - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - return &jsonConfig{ - MetricTTL: cfg.MetricTTL.String(), - Tags: cfg.Tags, - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/informer/tags/config_test.go b/packages/networking/ipfs-cluster/informer/tags/config_test.go deleted file mode 100644 index 70e96bc..0000000 --- a/packages/networking/ipfs-cluster/informer/tags/config_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package tags - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "metric_ttl": "1s", - "tags": { "a": "b" } -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - if cfg.Tags["a"] != "b" { - t.Fatal("tags not parsed") - } - - j := &jsonConfig{} - json.Unmarshal(cfgJSON, j) - j.MetricTTL = "-10" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding metric_ttl") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.MetricTTL = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - - cfg.Default() - if cfg.Tags["group"] != "default" { - t.Fatal("Tags default not set") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_TAGS_METRICTTL", "22s") - cfg := &Config{} - cfg.ApplyEnvVars() - - if cfg.MetricTTL != 22*time.Second { - t.Fatal("failed to override metric_ttl with env var") - } - - os.Setenv("CLUSTER_TAGS_TAGS", "z:q,y:w") - cfg = &Config{} - cfg.ApplyEnvVars() - - if cfg.Tags["z"] != "q" || cfg.Tags["y"] != "w" { - t.Fatal("could not override tags with env vars") - } -} diff --git a/packages/networking/ipfs-cluster/informer/tags/tags.go b/packages/networking/ipfs-cluster/informer/tags/tags.go deleted file mode 100644 index 45b5c81..0000000 --- a/packages/networking/ipfs-cluster/informer/tags/tags.go +++ /dev/null @@ -1,98 +0,0 @@ -// Package tags implements an ipfs-cluster informer publishes user-defined -// tags as metrics. -package tags - -import ( - "context" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - logging "github.com/ipfs/go-log/v2" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var logger = logging.Logger("tags") - -// MetricName specifies the name of our metric -var MetricName = "tags" - -// Informer is a simple object to implement the ipfscluster.Informer -// and Component interfaces. -type Informer struct { - config *Config // set when created, readonly - - mu sync.Mutex // guards access to following fields - rpcClient *rpc.Client -} - -// New returns an initialized informer using the given InformerConfig. -func New(cfg *Config) (*Informer, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - return &Informer{ - config: cfg, - }, nil -} - -// Name returns the name of this informer. Note the informer issues metrics -// with custom names. -func (tags *Informer) Name() string { - return MetricName -} - -// SetClient provides us with an rpc.Client which allows -// contacting other components in the cluster. -func (tags *Informer) SetClient(c *rpc.Client) { - tags.mu.Lock() - defer tags.mu.Unlock() - tags.rpcClient = c -} - -// Shutdown is called on cluster shutdown. We just invalidate -// any metrics from this point. -func (tags *Informer) Shutdown(ctx context.Context) error { - tags.mu.Lock() - defer tags.mu.Unlock() - - tags.rpcClient = nil - return nil -} - -// GetMetrics returns one metric for each tag defined in the configuration. -// The metric name is set as "tags:". When no tags are defined, -// a single invalid metric is returned. -func (tags *Informer) GetMetrics(ctx context.Context) []api.Metric { - // Note we could potentially extend the tag:value syntax to include manual weights - // ie: { "region": "us:100", ... } - // This would potentially allow to always give priority to peers of a certain group - - if len(tags.config.Tags) == 0 { - logger.Debug("no tags defined in tags informer") - m := api.Metric{ - Name: "tag:none", - Value: "", - Valid: false, - Partitionable: true, - } - m.SetTTL(tags.config.MetricTTL) - return []api.Metric{m} - } - - metrics := make([]api.Metric, 0, len(tags.config.Tags)) - for n, v := range tags.config.Tags { - m := api.Metric{ - Name: "tag:" + n, - Value: v, - Valid: true, - Partitionable: true, - } - m.SetTTL(tags.config.MetricTTL) - metrics = append(metrics, m) - } - - return metrics -} diff --git a/packages/networking/ipfs-cluster/informer/tags/tags_test.go b/packages/networking/ipfs-cluster/informer/tags/tags_test.go deleted file mode 100644 index 89d39de..0000000 --- a/packages/networking/ipfs-cluster/informer/tags/tags_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package tags - -import ( - "context" - "testing" -) - -func Test(t *testing.T) { - ctx := context.Background() - cfg := &Config{} - cfg.Default() - inf, err := New(cfg) - if err != nil { - t.Fatal(err) - } - defer inf.Shutdown(ctx) - m := inf.GetMetrics(ctx) - if len(m) != 1 || !m[0].Valid { - t.Error("metric should be valid") - } - - inf.config.Tags["x"] = "y" - m = inf.GetMetrics(ctx) - if len(m) != 2 { - t.Error("there should be 2 metrics") - } -} diff --git a/packages/networking/ipfs-cluster/ipfscluster.go b/packages/networking/ipfs-cluster/ipfscluster.go deleted file mode 100644 index 361fc90..0000000 --- a/packages/networking/ipfs-cluster/ipfscluster.go +++ /dev/null @@ -1,197 +0,0 @@ -// Package ipfscluster implements a wrapper for the IPFS deamon which -// allows to orchestrate pinning operations among several IPFS nodes. -// -// IPFS Cluster peers form a separate libp2p swarm. A Cluster peer uses -// multiple Cluster Components which perform different tasks like managing -// the underlying IPFS daemons, or providing APIs for external control. -package ipfscluster - -import ( - "context" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/state" - - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -// Component represents a piece of ipfscluster. Cluster components -// usually run their own goroutines (a http server for example). They -// communicate with the main Cluster component and other components -// (both local and remote), using an instance of rpc.Client. -type Component interface { - SetClient(*rpc.Client) - Shutdown(context.Context) error -} - -// Consensus is a component which keeps a shared state in -// IPFS Cluster and triggers actions on updates to that state. -// Currently, Consensus needs to be able to elect/provide a -// Cluster Leader and the implementation is very tight to -// the Cluster main component. -type Consensus interface { - Component - // Returns a channel to signal that the consensus layer is ready - // allowing the main component to wait for it during start. - Ready(context.Context) <-chan struct{} - // Logs a pin operation. - LogPin(context.Context, api.Pin) error - // Logs an unpin operation. - LogUnpin(context.Context, api.Pin) error - AddPeer(context.Context, peer.ID) error - RmPeer(context.Context, peer.ID) error - State(context.Context) (state.ReadOnly, error) - // Provide a node which is responsible to perform - // specific tasks which must only run in 1 cluster peer. - Leader(context.Context) (peer.ID, error) - // Only returns when the consensus state has all log - // updates applied to it. - WaitForSync(context.Context) error - // Clean removes all consensus data. - Clean(context.Context) error - // Peers returns the peerset participating in the Consensus. - Peers(context.Context) ([]peer.ID, error) - // IsTrustedPeer returns true if the given peer is "trusted". - // This will grant access to more rpc endpoints and a - // non-trusted one. This should be fast as it will be - // called repeatedly for every remote RPC request. - IsTrustedPeer(context.Context, peer.ID) bool - // Trust marks a peer as "trusted". - Trust(context.Context, peer.ID) error - // Distrust removes a peer from the "trusted" set. - Distrust(context.Context, peer.ID) error -} - -// API is a component which offers an API for Cluster. This is -// a base component. -type API interface { - Component -} - -// IPFSConnector is a component which allows cluster to interact with -// an IPFS daemon. This is a base component. -type IPFSConnector interface { - Component - ID(context.Context) (api.IPFSID, error) - Pin(context.Context, api.Pin) error - Unpin(context.Context, api.Cid) error - PinLsCid(context.Context, api.Pin) (api.IPFSPinStatus, error) - // PinLs returns pins in the pinset of the given types (recursive, direct...) - PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error - // ConnectSwarms make sure this peer's IPFS daemon is connected to - // other peers IPFS daemons. - ConnectSwarms(context.Context) error - // SwarmPeers returns the IPFS daemon's swarm peers. - SwarmPeers(context.Context) ([]peer.ID, error) - // ConfigKey returns the value for a configuration key. - // Subobjects are reached with keypaths as "Parent/Child/GrandChild...". - ConfigKey(keypath string) (interface{}, error) - // RepoStat returns the current repository size and max limit as - // provided by "repo stat". - RepoStat(context.Context) (api.IPFSRepoStat, error) - // RepoGC performs garbage collection sweep on the IPFS repo. - RepoGC(context.Context) (api.RepoGC, error) - // Resolve returns a cid given a path. - Resolve(context.Context, string) (api.Cid, error) - // BlockStream adds a stream of blocks to IPFS. - BlockStream(context.Context, <-chan api.NodeWithMeta) error - // BlockGet retrieves the raw data of an IPFS block. - BlockGet(context.Context, api.Cid) ([]byte, error) -} - -// Peered represents a component which needs to be aware of the peers -// in the Cluster and of any changes to the peer set. -type Peered interface { - AddPeer(ctx context.Context, p peer.ID) - RmPeer(ctx context.Context, p peer.ID) - //SetPeers(peers []peer.ID) -} - -// PinTracker represents a component which tracks the status of -// the pins in this cluster and ensures they are in sync with the -// IPFS daemon. This component should be thread safe. -type PinTracker interface { - Component - // Track tells the tracker that a Cid is now under its supervision - // The tracker may decide to perform an IPFS pin. - Track(context.Context, api.Pin) error - // Untrack tells the tracker that a Cid is to be forgotten. The tracker - // may perform an IPFS unpin operation. - Untrack(context.Context, api.Cid) error - // StatusAll returns the list of pins with their local status. Takes a - // filter to specify which statuses to report. - StatusAll(context.Context, api.TrackerStatus, chan<- api.PinInfo) error - // Status returns the local status of a given Cid. - Status(context.Context, api.Cid) api.PinInfo - // RecoverAll calls Recover() for all pins tracked. - RecoverAll(context.Context, chan<- api.PinInfo) error - // Recover retriggers a Pin/Unpin operation in a Cids with error status. - Recover(context.Context, api.Cid) (api.PinInfo, error) - // PinQueueSize returns the current size of the pinning queue. - PinQueueSize(context.Context) (int64, error) -} - -// Informer provides Metric information from a peer. The metrics produced by -// informers are then passed to a PinAllocator which will use them to -// determine where to pin content. The metric is agnostic to the rest of -// Cluster. -type Informer interface { - Component - Name() string - // GetMetrics returns the metrics obtained by this Informer. It must - // always return at least one metric. - GetMetrics(context.Context) []api.Metric -} - -// PinAllocator decides where to pin certain content. In order to make such -// decision, it receives the pin arguments, the peers which are currently -// allocated to the content and metrics available for all peers which could -// allocate the content. -type PinAllocator interface { - Component - // Allocate returns the list of peers that should be assigned to - // Pin content in order of preference (from the most preferred to the - // least). The "current" map contains valid metrics for peers - // which are currently pinning the content. The candidates map - // contains the metrics for all peers which are eligible for pinning - // the content. - Allocate(ctx context.Context, c api.Cid, current, candidates, priority api.MetricsSet) ([]peer.ID, error) - // Metrics returns the list of metrics that the allocator needs. - Metrics() []string -} - -// PeerMonitor is a component in charge of publishing a peer's metrics and -// reading metrics from other peers in the cluster. The PinAllocator will -// use the metrics provided by the monitor as candidates for Pin allocations. -// -// The PeerMonitor component also provides an Alert channel which is signaled -// when a metric is no longer received and the monitor identifies it -// as a problem. -type PeerMonitor interface { - Component - // LogMetric stores a metric. It can be used to manually inject - // a metric to a monitor. - LogMetric(context.Context, api.Metric) error - // PublishMetric sends a metric to the rest of the peers. - // How to send it, and to who, is to be decided by the implementation. - PublishMetric(context.Context, api.Metric) error - // LatestMetrics returns a map with the latest valid metrics of matching - // name for the current cluster peers. The result should only contain - // one metric per peer at most. - LatestMetrics(ctx context.Context, name string) []api.Metric - // Returns the latest metric received from a peer. It may be expired. - LatestForPeer(ctx context.Context, name string, pid peer.ID) api.Metric - // MetricNames returns a list of metric names. - MetricNames(ctx context.Context) []string - // Alerts delivers alerts generated when this peer monitor detects - // a problem (i.e. metrics not arriving as expected). Alerts can be used - // to trigger self-healing measures or re-pinnings of content. - Alerts() <-chan api.Alert -} - -// Tracer implements Component as a way -// to shutdown and flush and remaining traces. -type Tracer interface { - Component -} diff --git a/packages/networking/ipfs-cluster/ipfscluster_test.go b/packages/networking/ipfs-cluster/ipfscluster_test.go deleted file mode 100644 index a6fb64a..0000000 --- a/packages/networking/ipfs-cluster/ipfscluster_test.go +++ /dev/null @@ -1,2245 +0,0 @@ -package ipfscluster - -import ( - "context" - "errors" - "flag" - "fmt" - "math/rand" - "mime/multipart" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/allocator/balanced" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/api/rest" - "github.com/ipfs-cluster/ipfs-cluster/consensus/crdt" - "github.com/ipfs-cluster/ipfs-cluster/consensus/raft" - "github.com/ipfs-cluster/ipfs-cluster/datastore/badger" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/datastore/leveldb" - "github.com/ipfs-cluster/ipfs-cluster/informer/disk" - "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp" - "github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/test" - "github.com/ipfs-cluster/ipfs-cluster/version" - - ds "github.com/ipfs/go-datastore" - libp2p "github.com/libp2p/go-libp2p" - dht "github.com/libp2p/go-libp2p-kad-dht" - dual "github.com/libp2p/go-libp2p-kad-dht/dual" - pubsub "github.com/libp2p/go-libp2p-pubsub" - crypto "github.com/libp2p/go-libp2p/core/crypto" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" - ma "github.com/multiformats/go-multiaddr" -) - -var ( - // number of clusters to create - nClusters = 5 - - // number of pins to pin/unpin/check - nPins = 100 - - logLevel = "FATAL" - customLogLvlFacilities = logFacilities{} - - consensus = "crdt" - datastore = "badger" - - ttlDelayTime = 2 * time.Second // set on Main to diskInf.MetricTTL - testsFolder = "clusterTestsFolder" - - // When testing with fixed ports... - // clusterPort = 10000 - // apiPort = 10100 - // ipfsProxyPort = 10200 -) - -type logFacilities []string - -// String is the method to format the flag's value, part of the flag.Value interface. -func (lg *logFacilities) String() string { - return fmt.Sprint(*lg) -} - -// Set is the method to set the flag value, part of the flag.Value interface. -func (lg *logFacilities) Set(value string) error { - if len(*lg) > 0 { - return errors.New("logFacilities flag already set") - } - for _, lf := range strings.Split(value, ",") { - *lg = append(*lg, lf) - } - return nil -} - -// TestMain runs test initialization. Since Go1.13 we cannot run this on init() -// as flag.Parse() does not work well there -// (see https://golang.org/src/testing/testing.go#L211) -func TestMain(m *testing.M) { - rand.Seed(time.Now().UnixNano()) - ReadyTimeout = 11 * time.Second - - // GossipSub needs to heartbeat to discover newly connected hosts - // This speeds things up a little. - pubsub.GossipSubHeartbeatInterval = 50 * time.Millisecond - - flag.Var(&customLogLvlFacilities, "logfacs", "use -logLevel for only the following log facilities; comma-separated") - flag.StringVar(&logLevel, "loglevel", logLevel, "default log level for tests") - flag.IntVar(&nClusters, "nclusters", nClusters, "number of clusters to use") - flag.IntVar(&nPins, "npins", nPins, "number of pins to pin/unpin/check") - flag.StringVar(&consensus, "consensus", consensus, "consensus implementation") - flag.StringVar(&datastore, "datastore", datastore, "datastore backend") - flag.Parse() - - if len(customLogLvlFacilities) <= 0 { - for f := range LoggingFacilities { - SetFacilityLogLevel(f, logLevel) - } - - for f := range LoggingFacilitiesExtra { - SetFacilityLogLevel(f, logLevel) - } - } - - for _, f := range customLogLvlFacilities { - if _, ok := LoggingFacilities[f]; ok { - SetFacilityLogLevel(f, logLevel) - continue - } - if _, ok := LoggingFacilitiesExtra[f]; ok { - SetFacilityLogLevel(f, logLevel) - continue - } - } - - diskInfCfg := &disk.Config{} - diskInfCfg.LoadJSON(testingDiskInfCfg) - ttlDelayTime = diskInfCfg.MetricTTL * 2 - - os.Exit(m.Run()) -} - -func randomBytes() []byte { - bs := make([]byte, 64) - for i := 0; i < len(bs); i++ { - b := byte(rand.Int()) - bs[i] = b - } - return bs -} - -func createComponents( - t *testing.T, - host host.Host, - pubsub *pubsub.PubSub, - dht *dual.DHT, - i int, - staging bool, -) ( - *Config, - ds.Datastore, - Consensus, - []API, - IPFSConnector, - PinTracker, - PeerMonitor, - PinAllocator, - Informer, - Tracer, - *test.IpfsMock, -) { - ctx := context.Background() - mock := test.NewIpfsMock(t) - - //apiAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", apiPort+i)) - // Bind on port 0 - apiAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - // Bind on Port 0 - // proxyAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", ipfsProxyPort+i)) - proxyAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - nodeAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", mock.Addr, mock.Port)) - - peername := fmt.Sprintf("peer_%d", i) - - ident, clusterCfg, apiCfg, ipfsproxyCfg, ipfshttpCfg, badgerCfg, levelDBCfg, raftCfg, crdtCfg, statelesstrackerCfg, psmonCfg, allocBalancedCfg, diskInfCfg, tracingCfg := testingConfigs() - - ident.ID = host.ID() - ident.PrivateKey = host.Peerstore().PrivKey(host.ID()) - clusterCfg.Peername = peername - clusterCfg.LeaveOnShutdown = false - clusterCfg.SetBaseDir(filepath.Join(testsFolder, host.ID().Pretty())) - - apiCfg.HTTPListenAddr = []ma.Multiaddr{apiAddr} - - ipfsproxyCfg.ListenAddr = []ma.Multiaddr{proxyAddr} - ipfsproxyCfg.NodeAddr = nodeAddr - - ipfshttpCfg.NodeAddr = nodeAddr - - raftCfg.DataFolder = filepath.Join(testsFolder, host.ID().Pretty()) - - badgerCfg.Folder = filepath.Join(testsFolder, host.ID().Pretty(), "badger") - levelDBCfg.Folder = filepath.Join(testsFolder, host.ID().Pretty(), "leveldb") - - api, err := rest.NewAPI(ctx, apiCfg) - if err != nil { - t.Fatal(err) - } - - ipfsProxy, err := rest.NewAPI(ctx, apiCfg) - if err != nil { - t.Fatal(err) - } - - ipfs, err := ipfshttp.NewConnector(ipfshttpCfg) - if err != nil { - t.Fatal(err) - } - - alloc, err := balanced.New(allocBalancedCfg) - if err != nil { - t.Fatal(err) - } - inf, err := disk.NewInformer(diskInfCfg) - if err != nil { - t.Fatal(err) - } - - store := makeStore(t, badgerCfg, levelDBCfg) - cons := makeConsensus(t, store, host, pubsub, dht, raftCfg, staging, crdtCfg) - tracker := stateless.New(statelesstrackerCfg, ident.ID, clusterCfg.Peername, cons.State) - - var peersF func(context.Context) ([]peer.ID, error) - if consensus == "raft" { - peersF = cons.Peers - } - mon, err := pubsubmon.New(ctx, psmonCfg, pubsub, peersF) - if err != nil { - t.Fatal(err) - } - tracingCfg.ServiceName = peername - tracer, err := observations.SetupTracing(tracingCfg) - if err != nil { - t.Fatal(err) - } - - return clusterCfg, store, cons, []API{api, ipfsProxy}, ipfs, tracker, mon, alloc, inf, tracer, mock -} - -func makeStore(t *testing.T, badgerCfg *badger.Config, levelDBCfg *leveldb.Config) ds.Datastore { - switch consensus { - case "crdt": - if datastore == "badger" { - dstr, err := badger.New(badgerCfg) - if err != nil { - t.Fatal(err) - } - return dstr - } - dstr, err := leveldb.New(levelDBCfg) - if err != nil { - t.Fatal(err) - } - return dstr - default: - return inmem.New() - } -} - -func makeConsensus(t *testing.T, store ds.Datastore, h host.Host, psub *pubsub.PubSub, dht *dual.DHT, raftCfg *raft.Config, staging bool, crdtCfg *crdt.Config) Consensus { - switch consensus { - case "raft": - raftCon, err := raft.NewConsensus(h, raftCfg, store, staging) - if err != nil { - t.Fatal(err) - } - return raftCon - case "crdt": - crdtCon, err := crdt.New(h, dht, psub, crdtCfg, store) - if err != nil { - t.Fatal(err) - } - return crdtCon - default: - panic("bad consensus") - } -} - -func createCluster(t *testing.T, host host.Host, dht *dual.DHT, clusterCfg *Config, store ds.Datastore, consensus Consensus, apis []API, ipfs IPFSConnector, tracker PinTracker, mon PeerMonitor, alloc PinAllocator, inf Informer, tracer Tracer) *Cluster { - cl, err := NewCluster(context.Background(), host, dht, clusterCfg, store, consensus, apis, ipfs, tracker, mon, alloc, []Informer{inf}, tracer) - if err != nil { - t.Fatal(err) - } - return cl -} - -func createOnePeerCluster(t *testing.T, nth int, clusterSecret []byte) (*Cluster, *test.IpfsMock) { - hosts, pubsubs, dhts := createHosts(t, clusterSecret, 1) - clusterCfg, store, consensus, api, ipfs, tracker, mon, alloc, inf, tracer, mock := createComponents(t, hosts[0], pubsubs[0], dhts[0], nth, false) - cl := createCluster(t, hosts[0], dhts[0], clusterCfg, store, consensus, api, ipfs, tracker, mon, alloc, inf, tracer) - <-cl.Ready() - return cl, mock -} - -func createHosts(t *testing.T, clusterSecret []byte, nClusters int) ([]host.Host, []*pubsub.PubSub, []*dual.DHT) { - hosts := make([]host.Host, nClusters) - pubsubs := make([]*pubsub.PubSub, nClusters) - dhts := make([]*dual.DHT, nClusters) - - tcpaddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - quicAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/udp/0/quic") - for i := range hosts { - priv, _, err := crypto.GenerateKeyPair(crypto.RSA, 2048) - if err != nil { - t.Fatal(err) - } - - h, p, d := createHost(t, priv, clusterSecret, []ma.Multiaddr{quicAddr, tcpaddr}) - hosts[i] = h - dhts[i] = d - pubsubs[i] = p - } - - return hosts, pubsubs, dhts -} - -func createHost(t *testing.T, priv crypto.PrivKey, clusterSecret []byte, listen []ma.Multiaddr) (host.Host, *pubsub.PubSub, *dual.DHT) { - ctx := context.Background() - - h, err := newHost(ctx, clusterSecret, priv, libp2p.ListenAddrs(listen...)) - if err != nil { - t.Fatal(err) - } - - // DHT needs to be created BEFORE connecting the peers - d, err := newTestDHT(ctx, h) - if err != nil { - t.Fatal(err) - } - - // Pubsub needs to be created BEFORE connecting the peers, - // otherwise they are not picked up. - psub, err := newPubSub(ctx, h) - if err != nil { - t.Fatal(err) - } - - return routedhost.Wrap(h, d), psub, d -} - -func newTestDHT(ctx context.Context, h host.Host) (*dual.DHT, error) { - return newDHT(ctx, h, nil, - dual.DHTOption(dht.RoutingTableRefreshPeriod(600*time.Millisecond)), - dual.DHTOption(dht.RoutingTableRefreshQueryTimeout(300*time.Millisecond)), - ) -} - -func createClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock) { - ctx := context.Background() - os.RemoveAll(testsFolder) - cfgs := make([]*Config, nClusters) - stores := make([]ds.Datastore, nClusters) - cons := make([]Consensus, nClusters) - apis := make([][]API, nClusters) - ipfss := make([]IPFSConnector, nClusters) - trackers := make([]PinTracker, nClusters) - mons := make([]PeerMonitor, nClusters) - allocs := make([]PinAllocator, nClusters) - infs := make([]Informer, nClusters) - tracers := make([]Tracer, nClusters) - ipfsMocks := make([]*test.IpfsMock, nClusters) - - clusters := make([]*Cluster, nClusters) - - // Uncomment when testing with fixed ports - // clusterPeers := make([]ma.Multiaddr, nClusters, nClusters) - - hosts, pubsubs, dhts := createHosts(t, testingClusterSecret, nClusters) - - for i := 0; i < nClusters; i++ { - // staging = true for all except first (i==0) - cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i], ipfsMocks[i] = createComponents(t, hosts[i], pubsubs[i], dhts[i], i, i != 0) - } - - // Start first node - clusters[0] = createCluster(t, hosts[0], dhts[0], cfgs[0], stores[0], cons[0], apis[0], ipfss[0], trackers[0], mons[0], allocs[0], infs[0], tracers[0]) - <-clusters[0].Ready() - bootstrapAddr := clusterAddr(clusters[0]) - - // Start the rest and join - for i := 1; i < nClusters; i++ { - clusters[i] = createCluster(t, hosts[i], dhts[i], cfgs[i], stores[i], cons[i], apis[i], ipfss[i], trackers[i], mons[i], allocs[i], infs[i], tracers[i]) - err := clusters[i].Join(ctx, bootstrapAddr) - if err != nil { - logger.Error(err) - t.Fatal(err) - } - <-clusters[i].Ready() - } - - // connect all hosts - for _, h := range hosts { - for _, h2 := range hosts { - if h.ID() != h2.ID() { - h.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), peerstore.PermanentAddrTTL) - _, err := h.Network().DialPeer(ctx, h2.ID()) - if err != nil { - t.Log(err) - } - } - - } - } - - waitForLeader(t, clusters) - waitForClustersHealthy(t, clusters) - - return clusters, ipfsMocks -} - -func shutdownClusters(t *testing.T, clusters []*Cluster, m []*test.IpfsMock) { - for i, c := range clusters { - shutdownCluster(t, c, m[i]) - } - os.RemoveAll(testsFolder) -} - -func shutdownCluster(t *testing.T, c *Cluster, m *test.IpfsMock) { - err := c.Shutdown(context.Background()) - if err != nil { - t.Error(err) - } - c.dht.Close() - c.host.Close() - c.datastore.Close() - m.Close() -} - -func collectGlobalPinInfos(t *testing.T, out <-chan api.GlobalPinInfo, timeout time.Duration) []api.GlobalPinInfo { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - var gpis []api.GlobalPinInfo - for { - select { - case <-ctx.Done(): - t.Error(ctx.Err()) - return gpis - case gpi, ok := <-out: - if !ok { - return gpis - } - gpis = append(gpis, gpi) - } - } -} - -func collectPinInfos(t *testing.T, out <-chan api.PinInfo) []api.PinInfo { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var pis []api.PinInfo - for { - select { - case <-ctx.Done(): - t.Error(ctx.Err()) - return pis - case pi, ok := <-out: - if !ok { - return pis - } - pis = append(pis, pi) - } - } -} - -func runF(t *testing.T, clusters []*Cluster, f func(*testing.T, *Cluster)) { - t.Helper() - var wg sync.WaitGroup - for _, c := range clusters { - wg.Add(1) - go func(c *Cluster) { - defer wg.Done() - f(t, c) - }(c) - - } - wg.Wait() -} - -// //////////////////////////////////// -// Delay and wait functions -// -// Delays are used in tests to wait for certain events to happen: -// - ttlDelay() waits for metrics to arrive. If you pin something -// and your next operation depends on updated metrics, you need to wait -// - pinDelay() accounts for the time necessary to pin something and for the new -// log entry to be visible in all cluster peers -// - delay just sleeps a second or two. -// - waitForLeader functions make sure there is a raft leader, for example, -// after killing the leader. -// -// The values for delays are a result of testing and adjusting so tests pass -// in travis, jenkins etc., taking into account the values used in the -// testing configuration (config_test.go). -func delay() { - var d int - if nClusters > 10 { - d = 3000 - } else { - d = 2000 - } - time.Sleep(time.Duration(d) * time.Millisecond) -} - -func pinDelay() { - time.Sleep(800 * time.Millisecond) -} - -func ttlDelay() { - time.Sleep(ttlDelayTime) -} - -// Like waitForLeader but letting metrics expire before waiting, and -// waiting for new metrics to arrive afterwards. -func waitForLeaderAndMetrics(t *testing.T, clusters []*Cluster) { - ttlDelay() - waitForLeader(t, clusters) - ttlDelay() -} - -// Makes sure there is a leader and everyone knows about it. -func waitForLeader(t *testing.T, clusters []*Cluster) { - if consensus == "crdt" { - return // yai - } - ctx := context.Background() - timer := time.NewTimer(time.Minute) - ticker := time.NewTicker(100 * time.Millisecond) - -loop: - for { - select { - case <-timer.C: - t.Fatal("timed out waiting for a leader") - case <-ticker.C: - for _, cl := range clusters { - if cl.shutdownB { - continue // skip shutdown clusters - } - _, err := cl.consensus.Leader(ctx) - if err != nil { - continue loop - } - } - break loop - } - } -} - -func waitForClustersHealthy(t *testing.T, clusters []*Cluster) { - t.Helper() - if len(clusters) == 0 { - return - } - - timer := time.NewTimer(15 * time.Second) - for { - ttlDelay() - metrics := clusters[0].monitor.LatestMetrics(context.Background(), clusters[0].informers[0].Name()) - healthy := 0 - for _, m := range metrics { - if !m.Expired() { - healthy++ - } - } - if len(clusters) == healthy { - return - } - - select { - case <-timer.C: - t.Fatal("timed out waiting for clusters to be healthy") - default: - } - } -} - -///////////////////////////////////////// - -func TestClustersVersion(t *testing.T) { - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - f := func(t *testing.T, c *Cluster) { - v := c.Version() - if v != version.Version.String() { - t.Error("Bad version") - } - } - runF(t, clusters, f) -} - -func TestClustersPeers(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - - delay() - - j := rand.Intn(nClusters) // choose a random cluster peer - - out := make(chan api.ID, len(clusters)) - clusters[j].Peers(ctx, out) - - if len(out) != nClusters { - t.Fatal("expected as many peers as clusters") - } - - clusterIDMap := make(map[peer.ID]api.ID) - peerIDMap := make(map[peer.ID]api.ID) - - for _, c := range clusters { - id := c.ID(ctx) - clusterIDMap[id.ID] = id - } - - for p := range out { - if p.Error != "" { - t.Error(p.ID, p.Error) - continue - } - peerIDMap[p.ID] = p - } - - for k, id := range clusterIDMap { - id2, ok := peerIDMap[k] - if !ok { - t.Fatal("expected id in both maps") - } - //if !crypto.KeyEqual(id.PublicKey, id2.PublicKey) { - // t.Error("expected same public key") - //} - if id.IPFS.ID != id2.IPFS.ID { - t.Error("expected same ipfs daemon ID") - } - } -} - -func TestClustersPin(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - prefix := test.Cid1.Prefix() - - ttlDelay() - - for i := 0; i < nPins; i++ { - j := rand.Intn(nClusters) // choose a random cluster peer - h, err := prefix.Sum(randomBytes()) // create random cid - if err != nil { - t.Fatal(err) - } - _, err = clusters[j].Pin(ctx, api.NewCid(h), api.PinOptions{}) - if err != nil { - t.Errorf("error pinning %s: %s", h, err) - } - // // Test re-pin - // err = clusters[j].Pin(ctx, api.PinCid(h)) - // if err != nil { - // t.Errorf("error repinning %s: %s", h, err) - // } - } - switch consensus { - case "crdt": - time.Sleep(10 * time.Second) - default: - delay() - } - fpinned := func(t *testing.T, c *Cluster) { - out := make(chan api.PinInfo, 10) - - go func() { - err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out) - if err != nil { - t.Error(err) - } - }() - - status := collectPinInfos(t, out) - - for _, v := range status { - if v.Status != api.TrackerStatusPinned { - t.Errorf("%s should have been pinned but it is %s", v.Cid, v.Status) - } - } - if l := len(status); l != nPins { - t.Errorf("Pinned %d out of %d requests", l, nPins) - } - } - runF(t, clusters, fpinned) - - // Unpin everything - pinList, err := clusters[0].pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - - if len(pinList) != nPins { - t.Fatalf("pin list has %d but pinned %d", len(pinList), nPins) - } - - for i := 0; i < len(pinList); i++ { - // test re-unpin fails - j := rand.Intn(nClusters) // choose a random cluster peer - _, err := clusters[j].Unpin(ctx, pinList[i].Cid) - if err != nil { - t.Errorf("error unpinning %s: %s", pinList[i].Cid, err) - } - } - - switch consensus { - case "crdt": - time.Sleep(10 * time.Second) - default: - delay() - } - - for i := 0; i < len(pinList); i++ { - j := rand.Intn(nClusters) // choose a random cluster peer - _, err := clusters[j].Unpin(ctx, pinList[i].Cid) - if err == nil { - t.Errorf("expected error re-unpinning %s", pinList[i].Cid) - } - } - - delay() - - funpinned := func(t *testing.T, c *Cluster) { - out := make(chan api.PinInfo) - go func() { - err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out) - if err != nil { - t.Error(err) - } - }() - - status := collectPinInfos(t, out) - for _, v := range status { - t.Errorf("%s should have been unpinned but it is %s", v.Cid, v.Status) - } - } - runF(t, clusters, funpinned) -} - -func TestClustersPinUpdate(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - prefix := test.Cid1.Prefix() - - ttlDelay() - - h, _ := prefix.Sum(randomBytes()) // create random cid - h2, _ := prefix.Sum(randomBytes()) // create random cid - - _, err := clusters[0].PinUpdate(ctx, api.NewCid(h), api.NewCid(h2), api.PinOptions{}) - if err == nil || err != state.ErrNotFound { - t.Fatal("pin update should fail when from is not pinned") - } - - _, err = clusters[0].Pin(ctx, api.NewCid(h), api.PinOptions{}) - if err != nil { - t.Errorf("error pinning %s: %s", h, err) - } - - pinDelay() - expiry := time.Now().AddDate(1, 0, 0) - opts2 := api.PinOptions{ - UserAllocations: []peer.ID{clusters[0].host.ID()}, // should not be used - PinUpdate: api.NewCid(h), - Name: "new name", - ExpireAt: expiry, - } - - _, err = clusters[0].Pin(ctx, api.NewCid(h2), opts2) // should call PinUpdate - if err != nil { - t.Errorf("error pin-updating %s: %s", h2, err) - } - - pinDelay() - - f := func(t *testing.T, c *Cluster) { - pinget, err := c.PinGet(ctx, api.NewCid(h2)) - if err != nil { - t.Fatal(err) - } - - if len(pinget.Allocations) != 0 { - t.Error("new pin should be allocated everywhere like pin1") - } - - if pinget.MaxDepth != -1 { - t.Error("updated pin should be recursive like pin1") - } - // We compare Unix seconds because our protobuf serde will have - // lost any sub-second precision. - if pinget.ExpireAt.Unix() != expiry.Unix() { - t.Errorf("Expiry didn't match. Expected: %s. Got: %s", expiry, pinget.ExpireAt) - } - - if pinget.Name != "new name" { - t.Error("name should be kept") - } - } - runF(t, clusters, f) -} - -func TestClustersPinDirect(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - prefix := test.Cid1.Prefix() - - ttlDelay() - - h, _ := prefix.Sum(randomBytes()) // create random cid - - _, err := clusters[0].Pin(ctx, api.NewCid(h), api.PinOptions{Mode: api.PinModeDirect}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - f := func(t *testing.T, c *Cluster, mode api.PinMode) { - pinget, err := c.PinGet(ctx, api.NewCid(h)) - if err != nil { - t.Fatal(err) - } - - if pinget.Mode != mode { - t.Error("pin should be pinned in direct mode") - } - - if pinget.MaxDepth != mode.ToPinDepth() { - t.Errorf("pin should have max-depth %d but has %d", mode.ToPinDepth(), pinget.MaxDepth) - } - - pInfo := c.StatusLocal(ctx, api.NewCid(h)) - if pInfo.Error != "" { - t.Error(pInfo.Error) - } - if pInfo.Status != api.TrackerStatusPinned { - t.Error(pInfo.Error) - t.Error("the status should show the hash as pinned") - } - } - - runF(t, clusters, func(t *testing.T, c *Cluster) { - f(t, c, api.PinModeDirect) - }) - - // Convert into a recursive mode - _, err = clusters[0].Pin(ctx, api.NewCid(h), api.PinOptions{Mode: api.PinModeRecursive}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - runF(t, clusters, func(t *testing.T, c *Cluster) { - f(t, c, api.PinModeRecursive) - }) - - // This should fail as we cannot convert back to direct - _, err = clusters[0].Pin(ctx, api.NewCid(h), api.PinOptions{Mode: api.PinModeDirect}) - if err == nil { - t.Error("a recursive pin cannot be converted back to direct pin") - } -} - -func TestClustersStatusAll(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - h := test.Cid1 - clusters[0].Pin(ctx, h, api.PinOptions{Name: "test"}) - pinDelay() - // Global status - f := func(t *testing.T, c *Cluster) { - out := make(chan api.GlobalPinInfo, 10) - go func() { - err := c.StatusAll(ctx, api.TrackerStatusUndefined, out) - if err != nil { - t.Error(err) - } - }() - - statuses := collectGlobalPinInfos(t, out, 5*time.Second) - if len(statuses) != 1 { - t.Fatal("bad status. Expected one item") - } - if !statuses[0].Cid.Equals(h) { - t.Error("bad cid in status") - } - - if statuses[0].Name != "test" { - t.Error("globalPinInfo should have the name") - } - - info := statuses[0].PeerMap - if len(info) != nClusters { - t.Error("bad info in status") - } - - for _, pi := range info { - if pi.IPFS != test.PeerID1 { - t.Error("ipfs not set in pin status") - } - } - - pid := c.host.ID().String() - if info[pid].Status != api.TrackerStatusPinned { - t.Error("the hash should have been pinned") - } - - status, err := c.Status(ctx, h) - if err != nil { - t.Error(err) - } - - pinfo, ok := status.PeerMap[pid] - if !ok { - t.Fatal("Host not in status") - } - - if pinfo.Status != api.TrackerStatusPinned { - t.Error(pinfo.Error) - t.Error("the status should show the hash as pinned") - } - } - runF(t, clusters, f) -} - -func TestClustersStatusAllWithErrors(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - h := test.Cid1 - clusters[0].Pin(ctx, h, api.PinOptions{Name: "test"}) - pinDelay() - - // shutdown 1 cluster peer - clusters[1].Shutdown(ctx) - clusters[1].host.Close() - delay() - - f := func(t *testing.T, c *Cluster) { - // skip if it's the shutdown peer - if c.ID(ctx).ID == clusters[1].ID(ctx).ID { - return - } - - out := make(chan api.GlobalPinInfo, 10) - go func() { - err := c.StatusAll(ctx, api.TrackerStatusUndefined, out) - if err != nil { - t.Error(err) - } - }() - - statuses := collectGlobalPinInfos(t, out, 5*time.Second) - - if len(statuses) != 1 { - t.Fatal("bad status. Expected one item") - } - - if !statuses[0].Cid.Equals(h) { - t.Error("wrong Cid in globalPinInfo") - } - - if statuses[0].Name != "test" { - t.Error("wrong Name in globalPinInfo") - } - - // Raft and CRDT behave differently here - switch consensus { - case "raft": - // Raft will have all statuses with one of them - // being in ERROR because the peer is off - - stts := statuses[0] - if len(stts.PeerMap) != nClusters { - t.Error("bad number of peers in status") - } - - pid := clusters[1].id.String() - errst := stts.PeerMap[pid] - - if errst.Status != api.TrackerStatusClusterError { - t.Error("erroring status should be set to ClusterError:", errst.Status) - } - if errst.PeerName != "peer_1" { - t.Error("peername should have been set in the erroring peer too from the cache") - } - - if errst.IPFS != test.PeerID1 { - t.Error("IPFS ID should have been set in the erroring peer too from the cache") - } - - // now check with Cid status - status, err := c.Status(ctx, h) - if err != nil { - t.Error(err) - } - - pinfo := status.PeerMap[pid] - - if pinfo.Status != api.TrackerStatusClusterError { - t.Error("erroring status should be ClusterError:", pinfo.Status) - } - - if pinfo.PeerName != "peer_1" { - t.Error("peername should have been set in the erroring peer too from the cache") - } - - if pinfo.IPFS != test.PeerID1 { - t.Error("IPFS ID should have been set in the erroring peer too from the cache") - } - case "crdt": - // CRDT will not have contacted the offline peer because - // its metric expired and therefore is not in the - // peerset. - if len(statuses[0].PeerMap) != nClusters-1 { - t.Error("expected a different number of statuses") - } - default: - t.Fatal("bad consensus") - - } - - } - runF(t, clusters, f) -} - -func TestClustersRecoverLocal(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - h := test.ErrorCid // This cid always fails - h2 := test.Cid2 - - ttlDelay() - - clusters[0].Pin(ctx, h, api.PinOptions{}) - clusters[0].Pin(ctx, h2, api.PinOptions{}) - pinDelay() - pinDelay() - - f := func(t *testing.T, c *Cluster) { - _, err := c.RecoverLocal(ctx, h) - if err != nil { - t.Fatal(err) - } - // Wait for queue to be processed - delay() - - info := c.StatusLocal(ctx, h) - if info.Status != api.TrackerStatusPinError { - t.Errorf("element is %s and not PinError", info.Status) - } - - // Recover good ID - info, _ = c.RecoverLocal(ctx, h2) - if info.Status != api.TrackerStatusPinned { - t.Error("element should be in Pinned state") - } - } - // Test Local syncs - runF(t, clusters, f) -} - -func TestClustersRecover(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - h := test.ErrorCid // This cid always fails - h2 := test.Cid2 - - ttlDelay() - - clusters[0].Pin(ctx, h, api.PinOptions{}) - clusters[0].Pin(ctx, h2, api.PinOptions{}) - - pinDelay() - pinDelay() - - j := rand.Intn(nClusters) - ginfo, err := clusters[j].Recover(ctx, h) - if err != nil { - // we always attempt to return a valid response - // with errors contained in GlobalPinInfo - t.Fatal("did not expect an error") - } - if len(ginfo.PeerMap) != nClusters { - t.Error("number of peers do not match") - } - // Wait for queue to be processed - delay() - - ginfo, err = clusters[j].Status(ctx, h) - if err != nil { - t.Fatal(err) - } - - pinfo, ok := ginfo.PeerMap[clusters[j].host.ID().String()] - if !ok { - t.Fatal("should have info for this host") - } - if pinfo.Error == "" { - t.Error("pinInfo error should not be empty") - } - - for _, c := range clusters { - inf, ok := ginfo.PeerMap[c.host.ID().String()] - if !ok { - t.Fatal("GlobalPinInfo should not be empty for this host") - } - - if inf.Status != api.TrackerStatusPinError { - t.Logf("%+v", inf) - t.Error("should be PinError in all peers") - } - } - - // Test with a good Cid - j = rand.Intn(nClusters) - ginfo, err = clusters[j].Recover(ctx, h2) - if err != nil { - t.Fatal(err) - } - if !ginfo.Cid.Equals(h2) { - t.Error("GlobalPinInfo should be for testrCid2") - } - if len(ginfo.PeerMap) != nClusters { - t.Error("number of peers do not match") - } - - for _, c := range clusters { - inf, ok := ginfo.PeerMap[c.host.ID().String()] - if !ok { - t.Fatal("GlobalPinInfo should have this cluster") - } - if inf.Status != api.TrackerStatusPinned { - t.Error("the GlobalPinInfo should show Pinned in all peers") - } - } -} - -func TestClustersRecoverAll(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - h1 := test.Cid1 - hError := test.ErrorCid - - ttlDelay() - - clusters[0].Pin(ctx, h1, api.PinOptions{}) - clusters[0].Pin(ctx, hError, api.PinOptions{}) - - pinDelay() - - out := make(chan api.GlobalPinInfo) - go func() { - err := clusters[rand.Intn(nClusters)].RecoverAll(ctx, out) - if err != nil { - t.Error(err) - } - }() - - gInfos := collectGlobalPinInfos(t, out, 5*time.Second) - - if len(gInfos) != 1 { - t.Error("expected one items") - } - - for _, gInfo := range gInfos { - if len(gInfo.PeerMap) != nClusters { - t.Error("number of peers do not match") - } - } -} - -func TestClustersShutdown(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - - f := func(t *testing.T, c *Cluster) { - err := c.Shutdown(ctx) - if err != nil { - t.Error("should be able to shutdown cleanly") - } - } - // Shutdown 3 times - runF(t, clusters, f) - runF(t, clusters, f) - runF(t, clusters, f) -} - -func TestClustersReplicationOverall(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - 1 - } - - // Why is replication factor nClusters - 1? - // Because that way we know that pinning nCluster - // pins with an strategy like numpins/disk - // will result in each peer holding locally exactly - // nCluster pins. - - prefix := test.Cid1.Prefix() - - for i := 0; i < nClusters; i++ { - // Pick a random cluster and hash - j := rand.Intn(nClusters) // choose a random cluster peer - h, err := prefix.Sum(randomBytes()) // create random cid - if err != nil { - t.Fatal(err) - } - _, err = clusters[j].Pin(ctx, api.NewCid(h), api.PinOptions{}) - if err != nil { - t.Error(err) - } - pinDelay() - - // check that it is held by exactly nClusters - 1 peers - gpi, err := clusters[j].Status(ctx, api.NewCid(h)) - if err != nil { - t.Fatal(err) - } - - numLocal := 0 - numRemote := 0 - for _, v := range gpi.PeerMap { - if v.Status == api.TrackerStatusPinned { - numLocal++ - } else if v.Status == api.TrackerStatusRemote { - numRemote++ - } - } - if numLocal != nClusters-1 { - t.Errorf( - "We wanted replication %d but it's only %d", - nClusters-1, - numLocal, - ) - } - - if numRemote != 1 { - t.Errorf("We wanted 1 peer track as remote but %d do", numRemote) - } - ttlDelay() - } - - f := func(t *testing.T, c *Cluster) { - // confirm that the pintracker state matches the current global state - out := make(chan api.PinInfo, 100) - - go func() { - err := c.tracker.StatusAll(ctx, api.TrackerStatusUndefined, out) - if err != nil { - t.Error(err) - } - }() - pinfos := collectPinInfos(t, out) - if len(pinfos) != nClusters { - t.Error("Pinfos does not have the expected pins") - } - - numRemote := 0 - numLocal := 0 - for _, pi := range pinfos { - switch pi.Status { - case api.TrackerStatusPinned: - numLocal++ - - case api.TrackerStatusRemote: - numRemote++ - } - } - if numLocal != nClusters-1 { - t.Errorf("%s: Expected %d local pins but got %d", c.id.String(), nClusters-1, numLocal) - } - - if numRemote != 1 { - t.Errorf("%s: Expected 1 remote pin but got %d", c.id.String(), numRemote) - } - - outPins := make(chan api.Pin) - go func() { - err := c.Pins(ctx, outPins) - if err != nil { - t.Error(err) - } - }() - for pin := range outPins { - allocs := pin.Allocations - if len(allocs) != nClusters-1 { - t.Errorf("Allocations are [%s]", allocs) - } - for _, a := range allocs { - if a == c.id { - pinfo := c.tracker.Status(ctx, pin.Cid) - if pinfo.Status != api.TrackerStatusPinned { - t.Errorf("Peer %s was allocated but it is not pinning cid", c.id) - } - } - } - } - } - - runF(t, clusters, f) -} - -// This test checks that we pin with ReplicationFactorMax when -// we can -func TestClustersReplicationFactorMax(t *testing.T) { - ctx := context.Background() - if nClusters < 3 { - t.Skip("Need at least 3 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = 1 - c.config.ReplicationFactorMax = nClusters - 1 - } - - ttlDelay() - - h := test.Cid1 - _, err := clusters[0].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - f := func(t *testing.T, c *Cluster) { - p, err := c.PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - if len(p.Allocations) != nClusters-1 { - t.Error("should have pinned nClusters - 1 allocations") - } - - if p.ReplicationFactorMin != 1 { - t.Error("rplMin should be 1") - } - - if p.ReplicationFactorMax != nClusters-1 { - t.Error("rplMax should be nClusters-1") - } - } - runF(t, clusters, f) -} - -// This tests checks that repinning something that is overpinned -// removes some allocations -func TestClustersReplicationFactorMaxLower(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = 1 - c.config.ReplicationFactorMax = nClusters - } - - ttlDelay() // make sure we have places to pin - - h := test.Cid1 - _, err := clusters[0].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - p1, err := clusters[0].PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - if len(p1.Allocations) != nClusters { - t.Fatal("allocations should be nClusters") - } - - opts := api.PinOptions{ - ReplicationFactorMin: 1, - ReplicationFactorMax: 2, - } - _, err = clusters[0].Pin(ctx, h, opts) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - p2, err := clusters[0].PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - if len(p2.Allocations) != 2 { - t.Fatal("allocations should have been reduced to 2") - } -} - -// This test checks that when not all nodes are available, -// we pin in as many as we can aiming for ReplicationFactorMax -func TestClustersReplicationFactorInBetween(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = 1 - c.config.ReplicationFactorMax = nClusters - } - - ttlDelay() - - // Shutdown two peers - clusters[nClusters-1].Shutdown(ctx) - clusters[nClusters-2].Shutdown(ctx) - - waitForLeaderAndMetrics(t, clusters) - - h := test.Cid1 - _, err := clusters[0].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - f := func(t *testing.T, c *Cluster) { - if c == clusters[nClusters-1] || c == clusters[nClusters-2] { - return - } - p, err := c.PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - if len(p.Allocations) != nClusters-2 { - t.Error("should have pinned nClusters-2 allocations") - } - - if p.ReplicationFactorMin != 1 { - t.Error("rplMin should be 1") - } - - if p.ReplicationFactorMax != nClusters { - t.Error("rplMax should be nClusters") - } - } - runF(t, clusters, f) -} - -// This test checks that we do not pin something for which -// we cannot reach ReplicationFactorMin -func TestClustersReplicationFactorMin(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - } - - // Shutdown two peers - clusters[nClusters-1].Shutdown(ctx) - waitForLeaderAndMetrics(t, clusters) - clusters[nClusters-2].Shutdown(ctx) - waitForLeaderAndMetrics(t, clusters) - - h := test.Cid1 - _, err := clusters[0].Pin(ctx, h, api.PinOptions{}) - if err == nil { - t.Error("Pin should have failed as rplMin cannot be satisfied") - } - t.Log(err) - if !strings.Contains(err.Error(), "not enough peers to allocate CID") { - t.Fatal(err) - } -} - -// This tests checks that repinning something that has becomed -// underpinned actually changes nothing if it's sufficiently pinned -func TestClustersReplicationMinMaxNoRealloc(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = 1 - c.config.ReplicationFactorMax = nClusters - } - - ttlDelay() - - h := test.Cid1 - _, err := clusters[0].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - // Shutdown two peers - clusters[nClusters-1].Shutdown(ctx) - waitForLeaderAndMetrics(t, clusters) - clusters[nClusters-2].Shutdown(ctx) - waitForLeaderAndMetrics(t, clusters) - - _, err = clusters[0].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - p, err := clusters[0].PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - if len(p.Allocations) != nClusters { - t.Error("allocations should still be nCluster even if not all available") - } - - if p.ReplicationFactorMax != nClusters { - t.Error("rplMax should have not changed") - } -} - -// This test checks that repinning something that has becomed -// underpinned does re-allocations when it's not sufficiently -// pinned anymore. -// FIXME: The manual repin only works if the pin options changed. -func TestClustersReplicationMinMaxRealloc(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = 3 - c.config.ReplicationFactorMax = 4 - } - - ttlDelay() // make sure metrics are in - - h := test.Cid1 - _, err := clusters[0].Pin(ctx, h, api.PinOptions{ - Name: "a", - }) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - p, err := clusters[0].PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - firstAllocations := p.Allocations - - peerIDMap := make(map[peer.ID]*Cluster) - for _, a := range clusters { - peerIDMap[a.id] = a - } - - // kill two of the allocations - // Only the first allocated peer (or the second if the first is - // alerting) will automatically repin. - alloc1 := peerIDMap[firstAllocations[1]] - alloc2 := peerIDMap[firstAllocations[2]] - safePeer := peerIDMap[firstAllocations[0]] - - alloc1.Shutdown(ctx) - alloc2.Shutdown(ctx) - - waitForLeaderAndMetrics(t, clusters) - - // Repin - (although this should have been taken of as alerts - // happen for the shutdown nodes. We force re-allocation by - // changing the name. - _, err = safePeer.Pin(ctx, h, api.PinOptions{ - Name: "b", - }) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - p, err = safePeer.PinGet(ctx, h) - if err != nil { - t.Fatal(err) - } - - secondAllocations := p.Allocations - - strings1 := api.PeersToStrings(firstAllocations) - strings2 := api.PeersToStrings(secondAllocations) - sort.Strings(strings1) - sort.Strings(strings2) - t.Logf("Allocs1: %s", strings1) - t.Logf("Allocs2: %s", strings2) - - if fmt.Sprintf("%s", strings1) == fmt.Sprintf("%s", strings2) { - t.Error("allocations should have changed") - } - - lenSA := len(secondAllocations) - expected := minInt(nClusters-2, 4) - if lenSA != expected { - t.Errorf("Insufficient reallocation, could have allocated to %d peers but instead only allocated to %d peers", expected, lenSA) - } - - if lenSA < 3 { - t.Error("allocations should be more than rplMin") - } -} - -// In this test we check that repinning something -// when a node has gone down will re-assign the pin -func TestClustersReplicationRealloc(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - 1 - } - - ttlDelay() - - j := rand.Intn(nClusters) - h := test.Cid1 - _, err := clusters[j].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - // Let the pin arrive - pinDelay() - - pinList, err := clusters[j].pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - pin := pinList[0] - allocs := sort.StringSlice(api.PeersToStrings(pin.Allocations)) - allocs.Sort() - allocsStr := fmt.Sprintf("%s", allocs) - - // Re-pin should work and be allocated to the same - // nodes - _, err = clusters[j].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - pinList2, err := clusters[j].pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - pin2 := pinList2[0] - allocs2 := sort.StringSlice(api.PeersToStrings(pin2.Allocations)) - allocs2.Sort() - allocsStr2 := fmt.Sprintf("%s", allocs2) - if allocsStr != allocsStr2 { - t.Fatal("allocations changed without reason") - } - //t.Log(allocsStr) - //t.Log(allocsStr2) - - var killedClusterIndex int - // find someone that pinned it and kill that cluster - for i, c := range clusters { - pinfo := c.tracker.Status(ctx, h) - if pinfo.Status == api.TrackerStatusPinned { - //t.Logf("Killing %s", c.id.Pretty()) - killedClusterIndex = i - t.Logf("Shutting down %s", c.ID(ctx).ID) - c.Shutdown(ctx) - break - } - } - - // let metrics expire and give time for the cluster to - // see if they have lost the leader - waitForLeaderAndMetrics(t, clusters) - - // Make sure we haven't killed our randomly - // selected cluster - for j == killedClusterIndex { - j = rand.Intn(nClusters) - } - - // now pin should succeed - _, err = clusters[j].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - numPinned := 0 - for i, c := range clusters { - if i == killedClusterIndex { - continue - } - pinfo := c.tracker.Status(ctx, h) - if pinfo.Status == api.TrackerStatusPinned { - //t.Log(pinfo.Peer.Pretty()) - numPinned++ - } - } - - if numPinned != nClusters-1 { - t.Error("pin should have been correctly re-assigned") - } -} - -// In this test we try to pin something when there are not -// as many available peers a we need. It's like before, except -// more peers are killed. -func TestClustersReplicationNotEnoughPeers(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - 1 - } - - ttlDelay() - - j := rand.Intn(nClusters) - _, err := clusters[j].Pin(ctx, test.Cid1, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - // Let the pin arrive - pinDelay() - - clusters[0].Shutdown(ctx) - clusters[1].Shutdown(ctx) - - waitForLeaderAndMetrics(t, clusters) - - _, err = clusters[2].Pin(ctx, test.Cid2, api.PinOptions{}) - if err == nil { - t.Fatal("expected an error") - } - if !strings.Contains(err.Error(), "not enough peers to allocate") { - t.Error("different error than expected") - t.Error(err) - } - //t.Log(err) -} - -func TestClustersRebalanceOnPeerDown(t *testing.T) { - ctx := context.Background() - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - 1 - } - - // pin something - h := test.Cid1 - clusters[0].Pin(ctx, h, api.PinOptions{}) - pinDelay() - pinLocal := 0 - pinRemote := 0 - var localPinner string - var remotePinner string - var remotePinnerCluster *Cluster - - status, _ := clusters[0].Status(ctx, h) - - // check it was correctly pinned - for p, pinfo := range status.PeerMap { - if pinfo.Status == api.TrackerStatusPinned { - pinLocal++ - localPinner = p - } else if pinfo.Status == api.TrackerStatusRemote { - pinRemote++ - remotePinner = p - } - } - - if pinLocal != nClusters-1 || pinRemote != 1 { - t.Fatal("Not pinned as expected") - } - - // kill the local pinner - for _, c := range clusters { - clid := c.id.String() - if clid == localPinner { - c.Shutdown(ctx) - } else if clid == remotePinner { - remotePinnerCluster = c - } - } - - delay() - waitForLeaderAndMetrics(t, clusters) // in case we killed the leader - - // It should be now pinned in the remote pinner - if s := remotePinnerCluster.tracker.Status(ctx, h).Status; s != api.TrackerStatusPinned { - t.Errorf("it should be pinned and is %s", s) - } -} - -// Helper function for verifying cluster graph. Will only pass if exactly the -// peers in clusterIDs are fully connected to each other and the expected ipfs -// mock connectivity exists. Cluster peers not in clusterIDs are assumed to -// be disconnected and the graph should reflect this -func validateClusterGraph(t *testing.T, graph api.ConnectGraph, clusterIDs map[string]struct{}, peerNum int) { - // Check that all cluster peers see each other as peers - for id1, peers := range graph.ClusterLinks { - if _, ok := clusterIDs[id1]; !ok { - if len(peers) != 0 { - t.Errorf("disconnected peer %s is still connected in graph", id1) - } - continue - } - t.Logf("id: %s, peers: %v\n", id1, peers) - if len(peers) > len(clusterIDs)-1 { - t.Errorf("More peers recorded in graph than expected") - } - // Make lookup index for peers connected to id1 - peerIndex := make(map[string]struct{}) - for _, p := range peers { - peerIndex[p.String()] = struct{}{} - } - for id2 := range clusterIDs { - if _, ok := peerIndex[id2]; id1 != id2 && !ok { - t.Errorf("Expected graph to see peer %s connected to peer %s", id1, id2) - } - } - } - if len(graph.ClusterLinks) != peerNum { - t.Errorf("Unexpected number of cluster nodes in graph") - } - - // Check that all cluster peers are recorded as nodes in the graph - for id := range clusterIDs { - if _, ok := graph.ClusterLinks[id]; !ok { - t.Errorf("Expected graph to record peer %s as a node", id) - } - } - - if len(graph.ClusterTrustLinks) != peerNum { - t.Errorf("Unexpected number of trust links in graph") - } - - // Check that the mocked ipfs swarm is recorded - if len(graph.IPFSLinks) != 1 { - t.Error("Expected exactly one ipfs peer for all cluster nodes, the mocked peer") - } - links, ok := graph.IPFSLinks[test.PeerID1.String()] - if !ok { - t.Error("Expected the mocked ipfs peer to be a node in the graph") - } else { - if len(links) != 2 || links[0] != test.PeerID4 || - links[1] != test.PeerID5 { - t.Error("Swarm peers of mocked ipfs are not those expected") - } - } - - // Check that the cluster to ipfs connections are all recorded - for id := range clusterIDs { - if ipfsID, ok := graph.ClustertoIPFS[id]; !ok { - t.Errorf("Expected graph to record peer %s's ipfs connection", id) - } else { - if ipfsID != test.PeerID1 { - t.Errorf("Unexpected error %s", ipfsID) - } - } - } - if len(graph.ClustertoIPFS) > len(clusterIDs) { - t.Error("More cluster to ipfs links recorded in graph than expected") - } -} - -// In this test we get a cluster graph report from a random peer in a healthy -// fully connected cluster and verify that it is formed as expected. -func TestClustersGraphConnected(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - - ttlDelay() - - j := rand.Intn(nClusters) // choose a random cluster peer to query - graph, err := clusters[j].ConnectGraph() - if err != nil { - t.Fatal(err) - } - - clusterIDs := make(map[string]struct{}) - for _, c := range clusters { - id := c.ID(ctx).ID.String() - clusterIDs[id] = struct{}{} - } - validateClusterGraph(t, graph, clusterIDs, nClusters) -} - -// Similar to the previous test we get a cluster graph report from a peer. -// However now 2 peers have been shutdown and so we do not expect to see -// them in the graph -func TestClustersGraphUnhealthy(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - if nClusters < 5 { - t.Skip("Need at least 5 peers") - } - - j := rand.Intn(nClusters) // choose a random cluster peer to query - // chose the clusters to shutdown - discon1 := -1 - discon2 := -1 - for i := range clusters { - if i != j { - if discon1 == -1 { - discon1 = i - } else { - discon2 = i - break - } - } - } - - clusters[discon1].Shutdown(ctx) - clusters[discon1].host.Close() - clusters[discon2].Shutdown(ctx) - clusters[discon2].host.Close() - - waitForLeaderAndMetrics(t, clusters) - - graph, err := clusters[j].ConnectGraph() - if err != nil { - t.Fatal(err) - } - - clusterIDs := make(map[string]struct{}) - for i, c := range clusters { - if i == discon1 || i == discon2 { - continue - } - id := c.ID(ctx).ID.String() - clusterIDs[id] = struct{}{} - } - peerNum := nClusters - switch consensus { - case "crdt": - peerNum = nClusters - 2 - } - - validateClusterGraph(t, graph, clusterIDs, peerNum) -} - -// Check that the pin is not re-assigned when a node -// that has disabled repinning goes down. -func TestClustersDisabledRepinning(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - 1 - c.config.DisableRepinning = true - } - - ttlDelay() - - j := rand.Intn(nClusters) - h := test.Cid1 - _, err := clusters[j].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - // Let the pin arrive - pinDelay() - - var killedClusterIndex int - // find someone that pinned it and kill that cluster - for i, c := range clusters { - pinfo := c.tracker.Status(ctx, h) - if pinfo.Status == api.TrackerStatusPinned { - killedClusterIndex = i - t.Logf("Shutting down %s", c.ID(ctx).ID) - c.Shutdown(ctx) - break - } - } - - // let metrics expire and give time for the cluster to - // see if they have lost the leader - waitForLeaderAndMetrics(t, clusters) - - // Make sure we haven't killed our randomly - // selected cluster - for j == killedClusterIndex { - j = rand.Intn(nClusters) - } - - numPinned := 0 - for i, c := range clusters { - if i == killedClusterIndex { - continue - } - pinfo := c.tracker.Status(ctx, h) - if pinfo.Status == api.TrackerStatusPinned { - //t.Log(pinfo.Peer.Pretty()) - numPinned++ - } - } - - if numPinned != nClusters-2 { - t.Errorf("expected %d replicas for pin, got %d", nClusters-2, numPinned) - } -} - -func TestRepoGC(t *testing.T) { - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - f := func(t *testing.T, c *Cluster) { - gRepoGC, err := c.RepoGC(context.Background()) - if err != nil { - t.Fatal("gc should have worked:", err) - } - - if gRepoGC.PeerMap == nil { - t.Fatal("expected a non-nil peer map") - } - - if len(gRepoGC.PeerMap) != nClusters { - t.Errorf("expected repo gc information for %d peer", nClusters) - } - for _, repoGC := range gRepoGC.PeerMap { - testRepoGC(t, repoGC) - } - } - - runF(t, clusters, f) -} - -func TestClustersFollowerMode(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - - _, err := clusters[0].Pin(ctx, test.Cid1, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - _, err = clusters[0].Pin(ctx, test.ErrorCid, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - // Let the pins arrive - pinDelay() - - // Set Cluster1 to follower mode - clusters[1].config.FollowerMode = true - - t.Run("follower cannot pin", func(t *testing.T) { - _, err := clusters[1].PinPath(ctx, "/ipfs/"+test.Cid2.String(), api.PinOptions{}) - if err != errFollowerMode { - t.Error("expected follower mode error") - } - _, err = clusters[1].Pin(ctx, test.Cid2, api.PinOptions{}) - if err != errFollowerMode { - t.Error("expected follower mode error") - } - }) - - t.Run("follower cannot unpin", func(t *testing.T) { - _, err := clusters[1].UnpinPath(ctx, "/ipfs/"+test.Cid1.String()) - if err != errFollowerMode { - t.Error("expected follower mode error") - } - _, err = clusters[1].Unpin(ctx, test.Cid1) - if err != errFollowerMode { - t.Error("expected follower mode error") - } - }) - - t.Run("follower cannot add", func(t *testing.T) { - sth := test.NewShardingTestHelper() - defer sth.Clean(t) - params := api.DefaultAddParams() - params.Shard = false - params.Name = "testlocal" - mfr, closer := sth.GetTreeMultiReader(t) - defer closer.Close() - r := multipart.NewReader(mfr, mfr.Boundary()) - _, err = clusters[1].AddFile(ctx, r, params) - if err != errFollowerMode { - t.Error("expected follower mode error") - } - }) - - t.Run("follower status itself only", func(t *testing.T) { - gpi, err := clusters[1].Status(ctx, test.Cid1) - if err != nil { - t.Error("status should work") - } - if len(gpi.PeerMap) != 1 { - t.Fatal("globalPinInfo should only have one peer") - } - }) -} - -func TestClusterPinsWithExpiration(t *testing.T) { - ctx := context.Background() - - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - - ttlDelay() - - cl := clusters[rand.Intn(nClusters)] // choose a random cluster peer to query - - c := test.Cid1 - expireIn := 1 * time.Second - opts := api.PinOptions{ - ExpireAt: time.Now().Add(expireIn), - } - _, err := cl.Pin(ctx, c, opts) - if err != nil { - t.Fatal("pin should have worked:", err) - } - - pinDelay() - - pins, err := cl.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 { - t.Error("pin should be part of the state") - } - - // wait till expiry time - time.Sleep(expireIn) - - // manually call state sync on all peers, so we don't have to wait till - // state sync interval - for _, c := range clusters { - err = c.StateSync(ctx) - if err != nil { - t.Error(err) - } - } - - pinDelay() - - // state sync should have unpinned expired pin - pins, err = cl.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != 0 { - t.Error("pin should not be part of the state") - } -} - -func TestClusterAlerts(t *testing.T) { - ctx := context.Background() - clusters, mock := createClusters(t) - defer shutdownClusters(t, clusters, mock) - - if len(clusters) < 2 { - t.Skip("need at least 2 nodes for this test") - } - - ttlDelay() - - for _, c := range clusters[1:] { - c.Shutdown(ctx) - } - - ttlDelay() - - alerts := clusters[0].Alerts() - if len(alerts) == 0 { - t.Error("expected at least one alert") - } -} diff --git a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/config.go b/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/config.go deleted file mode 100644 index ef4a909..0000000 --- a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/config.go +++ /dev/null @@ -1,230 +0,0 @@ -package ipfshttp - -import ( - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/kelseyhightower/envconfig" - - "github.com/ipfs-cluster/ipfs-cluster/config" - - ma "github.com/multiformats/go-multiaddr" -) - -const configKey = "ipfshttp" -const envConfigKey = "cluster_ipfshttp" - -// Default values for Config. -const ( - DefaultNodeAddr = "/ip4/127.0.0.1/tcp/5001" - DefaultConnectSwarmsDelay = 30 * time.Second - DefaultIPFSRequestTimeout = 5 * time.Minute - DefaultPinTimeout = 2 * time.Minute - DefaultUnpinTimeout = 3 * time.Hour - DefaultRepoGCTimeout = 24 * time.Hour - DefaultInformerTriggerInterval = 0 // disabled - DefaultUnpinDisable = false -) - -// Config is used to initialize a Connector and allows to customize -// its behavior. It implements the config.ComponentConfig interface. -type Config struct { - config.Saver - - // Host/Port for the IPFS daemon. - NodeAddr ma.Multiaddr - - // ConnectSwarmsDelay specifies how long to wait after startup before - // attempting to open connections from this peer's IPFS daemon to the - // IPFS daemons of other peers. - ConnectSwarmsDelay time.Duration - - // IPFS Daemon HTTP Client POST timeout - IPFSRequestTimeout time.Duration - - // Pin Operation timeout - PinTimeout time.Duration - - // Unpin Operation timeout - UnpinTimeout time.Duration - - // RepoGC Operation timeout - RepoGCTimeout time.Duration - - // How many pin and block/put operations need to happen before we do a - // special broadcast informer metrics to the network. 0 to disable. - InformerTriggerInterval int - - // Disables the unpin operation and returns an error. - UnpinDisable bool - - // Tracing flag used to skip tracing specific paths when not enabled. - Tracing bool -} - -type jsonConfig struct { - NodeMultiaddress string `json:"node_multiaddress"` - ConnectSwarmsDelay string `json:"connect_swarms_delay"` - IPFSRequestTimeout string `json:"ipfs_request_timeout"` - PinTimeout string `json:"pin_timeout"` - UnpinTimeout string `json:"unpin_timeout"` - RepoGCTimeout string `json:"repogc_timeout"` - InformerTriggerInterval int `json:"informer_trigger_interval"` - UnpinDisable bool `json:"unpin_disable,omitempty"` -} - -// ConfigKey provides a human-friendly identifier for this type of Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default sets the fields of this Config to sensible default values. -func (cfg *Config) Default() error { - node, _ := ma.NewMultiaddr(DefaultNodeAddr) - cfg.NodeAddr = node - cfg.ConnectSwarmsDelay = DefaultConnectSwarmsDelay - cfg.IPFSRequestTimeout = DefaultIPFSRequestTimeout - cfg.PinTimeout = DefaultPinTimeout - cfg.UnpinTimeout = DefaultUnpinTimeout - cfg.RepoGCTimeout = DefaultRepoGCTimeout - cfg.InformerTriggerInterval = DefaultInformerTriggerInterval - cfg.UnpinDisable = DefaultUnpinDisable - - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return err - } - - err = envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have sensible values, -// at least in appearance. -func (cfg *Config) Validate() error { - var err error - if cfg.NodeAddr == nil { - err = errors.New("ipfshttp.node_multiaddress not set") - } - - if cfg.ConnectSwarmsDelay < 0 { - err = errors.New("ipfshttp.connect_swarms_delay is invalid") - } - - if cfg.IPFSRequestTimeout < 0 { - err = errors.New("ipfshttp.ipfs_request_timeout invalid") - } - - if cfg.PinTimeout < 0 { - err = errors.New("ipfshttp.pin_timeout invalid") - } - - if cfg.UnpinTimeout < 0 { - err = errors.New("ipfshttp.unpin_timeout invalid") - } - - if cfg.RepoGCTimeout < 0 { - err = errors.New("ipfshttp.repogc_timeout invalid") - } - if cfg.InformerTriggerInterval < 0 { - err = errors.New("ipfshttp.update_metrics_after") - } - - return err - -} - -// LoadJSON parses a JSON representation of this Config as generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling ipfshttp config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - nodeAddr, err := ma.NewMultiaddr(jcfg.NodeMultiaddress) - if err != nil { - return fmt.Errorf("error parsing ipfs_node_multiaddress: %s", err) - } - - cfg.NodeAddr = nodeAddr - cfg.UnpinDisable = jcfg.UnpinDisable - cfg.InformerTriggerInterval = jcfg.InformerTriggerInterval - - err = config.ParseDurations( - "ipfshttp", - &config.DurationOpt{Duration: jcfg.ConnectSwarmsDelay, Dst: &cfg.ConnectSwarmsDelay, Name: "connect_swarms_delay"}, - &config.DurationOpt{Duration: jcfg.IPFSRequestTimeout, Dst: &cfg.IPFSRequestTimeout, Name: "ipfs_request_timeout"}, - &config.DurationOpt{Duration: jcfg.PinTimeout, Dst: &cfg.PinTimeout, Name: "pin_timeout"}, - &config.DurationOpt{Duration: jcfg.UnpinTimeout, Dst: &cfg.UnpinTimeout, Name: "unpin_timeout"}, - &config.DurationOpt{Duration: jcfg.RepoGCTimeout, Dst: &cfg.RepoGCTimeout, Name: "repogc_timeout"}, - ) - if err != nil { - return err - } - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() (raw []byte, err error) { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return - } - - raw, err = config.DefaultJSONMarshal(jcfg) - return -} - -func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) { - // Multiaddress String() may panic - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%s", r) - } - }() - - jcfg = &jsonConfig{} - - // Set all configuration fields - jcfg.NodeMultiaddress = cfg.NodeAddr.String() - jcfg.ConnectSwarmsDelay = cfg.ConnectSwarmsDelay.String() - jcfg.IPFSRequestTimeout = cfg.IPFSRequestTimeout.String() - jcfg.PinTimeout = cfg.PinTimeout.String() - jcfg.UnpinTimeout = cfg.UnpinTimeout.String() - jcfg.RepoGCTimeout = cfg.RepoGCTimeout.String() - jcfg.InformerTriggerInterval = cfg.InformerTriggerInterval - jcfg.UnpinDisable = cfg.UnpinDisable - - return -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - jcfg, err := cfg.toJSONConfig() - if err != nil { - return nil, err - } - - return config.DisplayJSON(jcfg) -} diff --git a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/config_test.go b/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/config_test.go deleted file mode 100644 index 87e1258..0000000 --- a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/config_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package ipfshttp - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "connect_swarms_delay": "7s", - "ipfs_request_timeout": "5m0s", - "pin_timeout": "2m", - "unpin_timeout": "3h", - "repogc_timeout": "24h", - "informer_trigger_interval": 10 -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - json.Unmarshal(cfgJSON, j) - - if cfg.InformerTriggerInterval != 10 { - t.Error("missing value") - } - - j.NodeMultiaddress = "abc" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error in node_multiaddress") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.NodeAddr = nil - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} - -func TestApplyEnvVar(t *testing.T) { - os.Setenv("CLUSTER_IPFSHTTP_PINTIMEOUT", "22m") - cfg := &Config{} - cfg.Default() - cfg.ApplyEnvVars() - - if cfg.PinTimeout != 22*time.Minute { - t.Fatal("failed to override pin_timeout with env var") - } -} diff --git a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/ipfshttp.go b/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/ipfshttp.go deleted file mode 100644 index ee397ec..0000000 --- a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/ipfshttp.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Package ipfshttp implements an IPFS Cluster IPFSConnector component. It -// uses the IPFS HTTP API to communicate to IPFS. -package ipfshttp - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/tv42/httpunix" - - cid "github.com/ipfs/go-cid" - files "github.com/ipfs/go-ipfs-files" - ipfspinner "github.com/ipfs/go-ipfs-pinner" - logging "github.com/ipfs/go-log/v2" - gopath "github.com/ipfs/go-path" - rpc "github.com/libp2p/go-libp2p-gorpc" - peer "github.com/libp2p/go-libp2p/core/peer" - "github.com/multiformats/go-multiaddr" - madns "github.com/multiformats/go-multiaddr-dns" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/multiformats/go-multicodec" - multihash "github.com/multiformats/go-multihash" - - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/stats" - "go.opencensus.io/trace" -) - -// DNSTimeout is used when resolving DNS multiaddresses in this module -var DNSTimeout = 5 * time.Second - -var logger = logging.Logger("ipfshttp") - -// Connector implements the IPFSConnector interface -// and provides a component which is used to perform -// on-demand requests against the configured IPFS daemom -// (such as a pin request). -type Connector struct { - ctx context.Context - cancel func() - - config *Config - nodeAddr string - nodeAddrScheme string - - rpcClient *rpc.Client - rpcReady chan struct{} - - client *http.Client // client to ipfs daemon - - updateMetricCount uint64 - - ipfsPinCount int64 - - shutdownLock sync.Mutex - shutdown bool - wg sync.WaitGroup -} - -type ipfsError struct { - path string - code int - Message string -} - -func (ie ipfsError) Error() string { - return fmt.Sprintf( - "IPFS error (%s). Code: %d. Message: %s", - ie.path, - ie.code, - ie.Message, - ) -} - -type ipfsUnpinnedError ipfsError - -func (unpinned ipfsUnpinnedError) Is(target error) bool { - ierr, ok := target.(ipfsError) - if !ok { - return false - } - return strings.HasSuffix(ierr.Message, "not pinned") -} - -func (unpinned ipfsUnpinnedError) Error() string { - return ipfsError(unpinned).Error() -} - -type ipfsIDResp struct { - ID string - Addresses []string -} - -type ipfsResolveResp struct { - Path string -} - -type ipfsRepoGCResp struct { - Key cid.Cid - Error string -} - -type ipfsPinsResp struct { - Pins []string - Progress int -} - -type ipfsSwarmPeersResp struct { - Peers []ipfsPeer -} - -type ipfsBlockPutResp struct { - Key api.Cid - Size int -} - -type ipfsPeer struct { - Peer string -} - -// NewConnector creates the component and leaves it ready to be started -func NewConnector(cfg *Config) (*Connector, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - nodeMAddr := cfg.NodeAddr - // dns multiaddresses need to be resolved first - if madns.Matches(nodeMAddr) { - ctx, cancel := context.WithTimeout(context.Background(), DNSTimeout) - defer cancel() - resolvedAddrs, err := madns.Resolve(ctx, cfg.NodeAddr) - if err != nil { - logger.Error(err) - return nil, err - } - nodeMAddr = resolvedAddrs[0] - } - - _, nodeAddr, err := manet.DialArgs(nodeMAddr) - if err != nil { - return nil, err - } - - var c *http.Client - var nodeAddrScheme string - if unixSocketPath, err := nodeMAddr.ValueForProtocol(multiaddr.P_UNIX); err == nil { - u := &httpunix.Transport{} - u.RegisterLocation("ipfsapiunix", unixSocketPath) - nodeAddr = "ipfsapiunix" - - t := &http.Transport{} - t.RegisterProtocol(httpunix.Scheme, u) - - c = &http.Client{ - Transport: t, - } - nodeAddrScheme = "http+unix" - } else { - c = &http.Client{} // timeouts are handled by context timeouts - nodeAddrScheme = "http" - } - - if cfg.Tracing { - c.Transport = &ochttp.Transport{ - Base: http.DefaultTransport, - Propagation: &tracecontext.HTTPFormat{}, - StartOptions: trace.StartOptions{SpanKind: trace.SpanKindClient}, - FormatSpanName: func(req *http.Request) string { return req.Host + ":" + req.URL.Path + ":" + req.Method }, - NewClientTrace: ochttp.NewSpanAnnotatingClientTrace, - } - } - - ctx, cancel := context.WithCancel(context.Background()) - - ipfs := &Connector{ - ctx: ctx, - config: cfg, - cancel: cancel, - nodeAddr: nodeAddr, - nodeAddrScheme: nodeAddrScheme, - rpcReady: make(chan struct{}, 1), - client: c, - } - - initializeMetrics(ctx) - - go ipfs.run() - return ipfs, nil -} - -func initializeMetrics(ctx context.Context) { - // initialize metrics - stats.Record(ctx, observations.PinsIpfsPins.M(0)) - stats.Record(ctx, observations.PinsPinAdd.M(0)) - stats.Record(ctx, observations.PinsPinAddError.M(0)) - stats.Record(ctx, observations.BlocksPut.M(0)) - stats.Record(ctx, observations.BlocksAddedSize.M(0)) - stats.Record(ctx, observations.BlocksAdded.M(0)) - stats.Record(ctx, observations.BlocksAddedError.M(0)) -} - -// connects all ipfs daemons when -// we receive the rpcReady signal. -func (ipfs *Connector) run() { - <-ipfs.rpcReady - - // Do not shutdown while launching threads - // -- prevents race conditions with ipfs.wg. - ipfs.shutdownLock.Lock() - defer ipfs.shutdownLock.Unlock() - - if ipfs.config.ConnectSwarmsDelay == 0 { - return - } - - // This runs ipfs swarm connect to the daemons of other cluster members - ipfs.wg.Add(1) - go func() { - defer ipfs.wg.Done() - - // It does not hurt to wait a little bit. i.e. think cluster - // peers which are started at the same time as the ipfs - // daemon... - tmr := time.NewTimer(ipfs.config.ConnectSwarmsDelay) - defer tmr.Stop() - select { - case <-tmr.C: - // do not hang this goroutine if this call hangs - // otherwise we hang during shutdown - go ipfs.ConnectSwarms(ipfs.ctx) - case <-ipfs.ctx.Done(): - return - } - }() -} - -// SetClient makes the component ready to perform RPC -// requests. -func (ipfs *Connector) SetClient(c *rpc.Client) { - ipfs.rpcClient = c - ipfs.rpcReady <- struct{}{} -} - -// Shutdown stops any listeners and stops the component from taking -// any requests. -func (ipfs *Connector) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Shutdown") - defer span.End() - - ipfs.shutdownLock.Lock() - defer ipfs.shutdownLock.Unlock() - - if ipfs.shutdown { - logger.Debug("already shutdown") - return nil - } - - logger.Info("stopping IPFS Connector") - - ipfs.cancel() - close(ipfs.rpcReady) - - ipfs.wg.Wait() - ipfs.shutdown = true - - return nil -} - -// ID performs an ID request against the configured -// IPFS daemon. It returns the fetched information. -// If the request fails, or the parsing fails, it -// returns an error. -func (ipfs *Connector) ID(ctx context.Context) (api.IPFSID, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/ID") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - - body, err := ipfs.postCtx(ctx, "id", "", nil) - if err != nil { - return api.IPFSID{}, err - } - - var res ipfsIDResp - err = json.Unmarshal(body, &res) - if err != nil { - return api.IPFSID{}, err - } - - pID, err := peer.Decode(res.ID) - if err != nil { - return api.IPFSID{}, err - } - - id := api.IPFSID{ - ID: pID, - } - - mAddrs := make([]api.Multiaddr, len(res.Addresses)) - for i, strAddr := range res.Addresses { - mAddr, err := api.NewMultiaddr(strAddr) - if err != nil { - id.Error = err.Error() - return id, err - } - mAddrs[i] = mAddr - } - id.Addresses = mAddrs - return id, nil -} - -func pinArgs(maxDepth api.PinDepth) string { - q := url.Values{} - switch { - case maxDepth < 0: - q.Set("recursive", "true") - case maxDepth == 0: - q.Set("recursive", "false") - default: - q.Set("recursive", "true") - q.Set("max-depth", strconv.Itoa(int(maxDepth))) - } - return q.Encode() -} - -// Pin performs a pin request against the configured IPFS -// daemon. -func (ipfs *Connector) Pin(ctx context.Context, pin api.Pin) error { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Pin") - defer span.End() - - hash := pin.Cid - maxDepth := pin.MaxDepth - - pinStatus, err := ipfs.PinLsCid(ctx, pin) - if err != nil { - return err - } - - if pinStatus.IsPinned(maxDepth) { - logger.Debug("IPFS object is already pinned: ", hash) - return nil - } - - defer ipfs.updateInformerMetric(ctx) - - ctx, cancelRequest := context.WithCancel(ctx) - defer cancelRequest() - - // If the pin has origins, tell ipfs to connect to a maximum of 10. - bound := len(pin.Origins) - if bound > 10 { - bound = 10 - } - for _, orig := range pin.Origins[0:bound] { - // do it in the background, ignoring errors. - go func(o string) { - logger.Debugf("swarm-connect to origin before pinning: %s", o) - _, err := ipfs.postCtx( - ctx, - fmt.Sprintf("swarm/connect?arg=%s", o), - "", - nil, - ) - if err != nil { - logger.Debug(err) - return - } - logger.Debugf("swarm-connect success to origin: %s", o) - }(url.QueryEscape(orig.String())) - } - - // If we have a pin-update, and the old object - // is pinned recursively, then do pin/update. - // Otherwise do a normal pin. - if from := pin.PinUpdate; from.Defined() { - fromPin := api.PinWithOpts(from, pin.PinOptions) - pinStatus, _ := ipfs.PinLsCid(ctx, fromPin) - if pinStatus.IsPinned(-1) { // pinned recursively. - // As a side note, if PinUpdate == pin.Cid, we are - // somehow pinning an already pinned thing and we'd - // better use update for that - return ipfs.pinUpdate(ctx, from, pin.Cid) - } - } - - // Pin request and timeout if there is no progress - outPins := make(chan int) - go func() { - var lastProgress int - lastProgressTime := time.Now() - - ticker := time.NewTicker(ipfs.config.PinTimeout) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if time.Since(lastProgressTime) > ipfs.config.PinTimeout { - // timeout request - cancelRequest() - return - } - case p := <-outPins: - // ipfs will send status messages every second - // or so but we need make sure there was - // progress by looking at number of nodes - // fetched. - if p > lastProgress { - lastProgress = p - lastProgressTime = time.Now() - } - case <-ctx.Done(): - return - } - } - }() - - stats.Record(ipfs.ctx, observations.PinsPinAdd.M(1)) - err = ipfs.pinProgress(ctx, hash, maxDepth, outPins) - if err != nil { - stats.Record(ipfs.ctx, observations.PinsPinAddError.M(1)) - return err - } - totalPins := atomic.AddInt64(&ipfs.ipfsPinCount, 1) - stats.Record(ipfs.ctx, observations.PinsIpfsPins.M(totalPins)) - - logger.Info("IPFS Pin request succeeded: ", hash) - return nil -} - -// pinProgress pins an item and sends fetched node's progress on a -// channel. Blocks until done or error. pinProgress will always close the out -// channel. pinProgress will not block on sending to the channel if it is full. -func (ipfs *Connector) pinProgress(ctx context.Context, hash api.Cid, maxDepth api.PinDepth, out chan<- int) error { - defer close(out) - - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/pinsProgress") - defer span.End() - - pinArgs := pinArgs(maxDepth) - path := fmt.Sprintf("pin/add?arg=%s&%s&progress=true", hash, pinArgs) - res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, "", nil) - if err != nil { - return err - } - defer res.Body.Close() - - _, err = checkResponse(path, res) - if err != nil { - return err - } - - dec := json.NewDecoder(res.Body) - for { - var pins ipfsPinsResp - if err := dec.Decode(&pins); err != nil { - // If we canceled the request we should tell the user - // (in case dec.Decode() exited cleanly with an EOF). - select { - case <-ctx.Done(): - return ctx.Err() - default: - if err == io.EOF { - return nil // clean exit. Pinned! - } - return err // error decoding - } - } - - select { - case out <- pins.Progress: - default: - } - } -} - -func (ipfs *Connector) pinUpdate(ctx context.Context, from, to api.Cid) error { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/pinUpdate") - defer span.End() - - path := fmt.Sprintf("pin/update?arg=%s&arg=%s&unpin=false", from, to) - _, err := ipfs.postCtx(ctx, path, "", nil) - if err != nil { - return err - } - totalPins := atomic.AddInt64(&ipfs.ipfsPinCount, 1) - stats.Record(ipfs.ctx, observations.PinsIpfsPins.M(totalPins)) - logger.Infof("IPFS Pin Update request succeeded. %s -> %s (unpin=false)", from, to) - return nil -} - -// Unpin performs an unpin request against the configured IPFS -// daemon. -func (ipfs *Connector) Unpin(ctx context.Context, hash api.Cid) error { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Unpin") - defer span.End() - - if ipfs.config.UnpinDisable { - return errors.New("ipfs unpinning is disallowed by configuration on this peer") - } - - defer ipfs.updateInformerMetric(ctx) - - path := fmt.Sprintf("pin/rm?arg=%s", hash) - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.UnpinTimeout) - defer cancel() - - // We will call unpin in any case, if the CID is not pinned, - // then we ignore the error (although this is a bit flaky). - _, err := ipfs.postCtx(ctx, path, "", nil) - if err != nil { - ipfsErr, ok := err.(ipfsError) - if !ok || ipfsErr.Message != ipfspinner.ErrNotPinned.Error() { - return err - } - logger.Debug("IPFS object is already unpinned: ", hash) - return nil - } - - totalPins := atomic.AddInt64(&ipfs.ipfsPinCount, -1) - stats.Record(ipfs.ctx, observations.PinsIpfsPins.M(totalPins)) - - logger.Info("IPFS Unpin request succeeded:", hash) - return nil -} - -// PinLs performs a "pin ls --type typeFilter" request against the configured -// IPFS daemon and sends the results on the given channel. Returns when done. -func (ipfs *Connector) PinLs(ctx context.Context, typeFilters []string, out chan<- api.IPFSPinInfo) error { - defer close(out) - bodies := make([]io.ReadCloser, len(typeFilters)) - - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/PinLs") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - - var err error - var totalPinCount int64 - defer func() { - if err != nil { - atomic.StoreInt64(&ipfs.ipfsPinCount, totalPinCount) - stats.Record(ipfs.ctx, observations.PinsIpfsPins.M(totalPinCount)) - } - }() - -nextFilter: - for i, typeFilter := range typeFilters { - // Post and read streaming response - path := "pin/ls?stream=true&type=" + typeFilter - bodies[i], err = ipfs.postCtxStreamResponse(ctx, path, "", nil) - if err != nil { - logger.Error("error querying pinset: %s", err) - return err - } - defer bodies[i].Close() - - dec := json.NewDecoder(bodies[i]) - - for { - select { - case <-ctx.Done(): - err = fmt.Errorf("aborting pin/ls operation: %w", ctx.Err()) - logger.Error(err) - return err - default: - } - - var ipfsPin api.IPFSPinInfo - err = dec.Decode(&ipfsPin) - if err == io.EOF { - break nextFilter - } - if err != nil { - err = fmt.Errorf("error decoding ipfs pin: %w", err) - return err - } - - select { - case <-ctx.Done(): - err = fmt.Errorf("aborting pin/ls operation: %w", ctx.Err()) - logger.Error(err) - return err - case out <- ipfsPin: - totalPinCount++ - } - } - } - - return nil -} - -// PinLsCid performs a "pin ls " request. It will use "type=recursive" or -// "type=direct" (or other) depending on the given pin's MaxDepth setting. -// It returns an api.IPFSPinStatus for that hash. -func (ipfs *Connector) PinLsCid(ctx context.Context, pin api.Pin) (api.IPFSPinStatus, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/PinLsCid") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - - if !pin.Defined() { - return api.IPFSPinStatusBug, errors.New("calling PinLsCid without a defined CID") - } - - pinType := pin.MaxDepth.ToPinMode().String() - lsPath := fmt.Sprintf("pin/ls?stream=true&arg=%s&type=%s", pin.Cid, pinType) - body, err := ipfs.postCtxStreamResponse(ctx, lsPath, "", nil) - if err != nil { - if errors.Is(ipfsUnpinnedError{}, err) { - return api.IPFSPinStatusUnpinned, nil - } - return api.IPFSPinStatusError, err - } - defer body.Close() - - var res api.IPFSPinInfo - dec := json.NewDecoder(body) - - err = dec.Decode(&res) - if err != nil { - logger.Error("error parsing pin/ls?arg=cid response") - return api.IPFSPinStatusError, err - } - - return res.Type, nil -} - -func (ipfs *Connector) doPostCtx(ctx context.Context, client *http.Client, apiURL, path string, contentType string, postBody io.Reader) (*http.Response, error) { - logger.Debugf("posting /%s", path) - urlstr := fmt.Sprintf("%s/%s", apiURL, path) - - req, err := http.NewRequest("POST", urlstr, postBody) - if err != nil { - logger.Error("error creating POST request:", err) - } - - req.Header.Set("Content-Type", contentType) - req = req.WithContext(ctx) - res, err := ipfs.client.Do(req) - if err != nil { - logger.Error("error posting to IPFS:", err) - } - - return res, err -} - -// checkResponse tries to parse an error message on non StatusOK responses -// from ipfs. -func checkResponse(path string, res *http.Response) ([]byte, error) { - if res.StatusCode == http.StatusOK { - return nil, nil - } - - body, err := io.ReadAll(res.Body) - res.Body.Close() - if err == nil { - var ipfsErr ipfsError - if err := json.Unmarshal(body, &ipfsErr); err == nil { - ipfsErr.code = res.StatusCode - ipfsErr.path = path - return body, ipfsErr - } - } - - // No error response with useful message from ipfs - return nil, fmt.Errorf( - "IPFS request failed (is it running?) (%s). Code %d: %s", - path, - res.StatusCode, - string(body)) -} - -// postCtx makes a POST request against -// the ipfs daemon, reads the full body of the response and -// returns it after checking for errors. -func (ipfs *Connector) postCtx(ctx context.Context, path string, contentType string, postBody io.Reader) ([]byte, error) { - rdr, err := ipfs.postCtxStreamResponse(ctx, path, contentType, postBody) - if err != nil { - return nil, err - } - defer rdr.Close() - - body, err := io.ReadAll(rdr) - if err != nil { - logger.Errorf("error reading response body: %s", err) - return nil, err - } - return body, nil -} - -// postCtxStreamResponse makes a POST request against the ipfs daemon, and -// returns the body reader after checking the request for errros. -func (ipfs *Connector) postCtxStreamResponse(ctx context.Context, path string, contentType string, postBody io.Reader) (io.ReadCloser, error) { - res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), path, contentType, postBody) - if err != nil { - return nil, err - } - - _, err = checkResponse(path, res) - if err != nil { - return nil, err - } - return res.Body, nil -} - -// apiURL is a short-hand for building the url of the IPFS -// daemon API. -func (ipfs *Connector) apiURL() string { - return fmt.Sprintf("%s://%s/api/v0", ipfs.nodeAddrScheme, ipfs.nodeAddr) -} - -// ConnectSwarms requests the ipfs addresses of other peers and -// triggers ipfs swarm connect requests -func (ipfs *Connector) ConnectSwarms(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/ConnectSwarms") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - - in := make(chan struct{}) - close(in) - out := make(chan api.ID) - go func() { - err := ipfs.rpcClient.Stream( - ctx, - "", - "Cluster", - "Peers", - in, - out, - ) - if err != nil { - logger.Error(err) - } - }() - - for id := range out { - ipfsID := id.IPFS - if id.Error != "" || ipfsID.Error != "" { - continue - } - for _, addr := range ipfsID.Addresses { - // This is a best effort attempt - // We ignore errors which happens - // when passing in a bunch of addresses - _, err := ipfs.postCtx( - ctx, - fmt.Sprintf("swarm/connect?arg=%s", url.QueryEscape(addr.String())), - "", - nil, - ) - if err != nil { - logger.Debug(err) - continue - } - logger.Debugf("ipfs successfully connected to %s", addr) - } - } - return nil -} - -// ConfigKey fetches the IPFS daemon configuration and retrieves the value for -// a given configuration key. For example, "Datastore/StorageMax" will return -// the value for StorageMax in the Datastore configuration object. -func (ipfs *Connector) ConfigKey(keypath string) (interface{}, error) { - ctx, cancel := context.WithTimeout(ipfs.ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - res, err := ipfs.postCtx(ctx, "config/show", "", nil) - if err != nil { - logger.Error(err) - return nil, err - } - - var cfg map[string]interface{} - err = json.Unmarshal(res, &cfg) - if err != nil { - logger.Error(err) - return nil, err - } - - path := strings.SplitN(keypath, "/", 2) - if len(path) == 0 { - return nil, errors.New("cannot lookup without a path") - } - - return getConfigValue(path, cfg) -} - -func getConfigValue(path []string, cfg map[string]interface{}) (interface{}, error) { - value, ok := cfg[path[0]] - if !ok { - return nil, errors.New("key not found in configuration") - } - - if len(path) == 1 { - return value, nil - } - - switch v := value.(type) { - case map[string]interface{}: - return getConfigValue(path[1:], v) - default: - return nil, errors.New("invalid path") - } -} - -// RepoStat returns the DiskUsage and StorageMax repo/stat values from the -// ipfs daemon, in bytes, wrapped as an IPFSRepoStat object. -func (ipfs *Connector) RepoStat(ctx context.Context) (api.IPFSRepoStat, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/RepoStat") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - res, err := ipfs.postCtx(ctx, "repo/stat?size-only=true", "", nil) - if err != nil { - logger.Error(err) - return api.IPFSRepoStat{}, err - } - - var stats api.IPFSRepoStat - err = json.Unmarshal(res, &stats) - if err != nil { - logger.Error(err) - return api.IPFSRepoStat{}, err - } - return stats, nil -} - -// RepoGC performs a garbage collection sweep on the cluster peer's IPFS repo. -func (ipfs *Connector) RepoGC(ctx context.Context) (api.RepoGC, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/RepoGC") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.RepoGCTimeout) - defer cancel() - - res, err := ipfs.doPostCtx(ctx, ipfs.client, ipfs.apiURL(), "repo/gc?stream-errors=true", "", nil) - if err != nil { - logger.Error(err) - return api.RepoGC{}, err - } - defer res.Body.Close() - - dec := json.NewDecoder(res.Body) - repoGC := api.RepoGC{ - Keys: []api.IPFSRepoGC{}, - } - for { - resp := ipfsRepoGCResp{} - - if err := dec.Decode(&resp); err != nil { - // If we canceled the request we should tell the user - // (in case dec.Decode() exited cleanly with an EOF). - select { - case <-ctx.Done(): - return repoGC, ctx.Err() - default: - if err == io.EOF { - return repoGC, nil // clean exit - } - logger.Error(err) - return repoGC, err // error decoding - } - } - - repoGC.Keys = append(repoGC.Keys, api.IPFSRepoGC{Key: api.NewCid(resp.Key), Error: resp.Error}) - } -} - -// Resolve accepts ipfs or ipns path and resolves it into a cid -func (ipfs *Connector) Resolve(ctx context.Context, path string) (api.Cid, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/Resolve") - defer span.End() - - validPath, err := gopath.ParsePath(path) - if err != nil { - logger.Error("could not parse path: " + err.Error()) - return api.CidUndef, err - } - if !strings.HasPrefix(path, "/ipns") && validPath.IsJustAKey() { - ci, _, err := gopath.SplitAbsPath(validPath) - return api.NewCid(ci), err - } - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - res, err := ipfs.postCtx(ctx, "resolve?arg="+url.QueryEscape(path), "", nil) - if err != nil { - logger.Error(err) - return api.CidUndef, err - } - - var resp ipfsResolveResp - err = json.Unmarshal(res, &resp) - if err != nil { - logger.Error("could not unmarshal response: " + err.Error()) - return api.CidUndef, err - } - - ci, _, err := gopath.SplitAbsPath(gopath.FromString(resp.Path)) - return api.NewCid(ci), err -} - -// SwarmPeers returns the peers currently connected to this ipfs daemon. -func (ipfs *Connector) SwarmPeers(ctx context.Context) ([]peer.ID, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/SwarmPeers") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - - res, err := ipfs.postCtx(ctx, "swarm/peers", "", nil) - if err != nil { - logger.Error(err) - return nil, err - } - var peersRaw ipfsSwarmPeersResp - err = json.Unmarshal(res, &peersRaw) - if err != nil { - logger.Error(err) - return nil, err - } - - swarm := make([]peer.ID, len(peersRaw.Peers)) - for i, p := range peersRaw.Peers { - pID, err := peer.Decode(p.Peer) - if err != nil { - logger.Error(err) - return swarm, err - } - swarm[i] = pID - } - return swarm, nil -} - -// chanDirectory implements the files.Directory interface -type chanDirectory struct { - iterator files.DirIterator -} - -// Close is a no-op and it is not used. -func (cd *chanDirectory) Close() error { - return nil -} - -// not implemented, I think not needed for multipart. -func (cd *chanDirectory) Size() (int64, error) { - return 0, nil -} - -func (cd *chanDirectory) Entries() files.DirIterator { - return cd.iterator -} - -// chanIterator implements the files.DirIterator interface. -type chanIterator struct { - ctx context.Context - blocks <-chan api.NodeWithMeta - - current api.NodeWithMeta - peeked api.NodeWithMeta - done bool - err error - - seenMu sync.Mutex - seen *multihash.Set -} - -func (ci *chanIterator) Name() string { - if !ci.current.Cid.Defined() { - return "" - } - return ci.current.Cid.String() -} - -// return NewBytesFile. -func (ci *chanIterator) Node() files.Node { - if !ci.current.Cid.Defined() { - return nil - } - logger.Debugf("it.node(): %s", ci.current.Cid) - ci.seenMu.Lock() - ci.seen.Add(ci.current.Cid.Hash()) - ci.seenMu.Unlock() - - stats.Record(ci.ctx, observations.BlocksAdded.M(1)) - stats.Record(ci.ctx, observations.BlocksAddedSize.M(int64(len(ci.current.Data)))) - - return files.NewBytesFile(ci.current.Data) -} - -func (ci *chanIterator) Seen(c api.Cid) bool { - ci.seenMu.Lock() - has := ci.seen.Has(c.Cid.Hash()) - ci.seen.Remove(c.Cid.Hash()) - ci.seenMu.Unlock() - return has -} - -func (ci *chanIterator) Done() bool { - return ci.done -} - -// Peek reads one block from the channel but saves it so that Next also -// returns it. -func (ci *chanIterator) Peek() (api.NodeWithMeta, bool) { - if ci.done { - return api.NodeWithMeta{}, false - } - - select { - case <-ci.ctx.Done(): - return api.NodeWithMeta{}, false - case next, ok := <-ci.blocks: - if !ok { - return api.NodeWithMeta{}, false - } - ci.peeked = next - return next, true - } -} - -func (ci *chanIterator) Next() bool { - if ci.done { - return false - } - if ci.peeked.Cid.Defined() { - ci.current = ci.peeked - ci.peeked = api.NodeWithMeta{} - return true - } - select { - case <-ci.ctx.Done(): - ci.done = true - ci.err = ci.ctx.Err() - return false - case next, ok := <-ci.blocks: - if !ok { - ci.done = true - return false - } - logger.Debugf("it.Next() %s", next.Cid) - ci.current = next - return true - } -} - -func (ci *chanIterator) Err() error { - return ci.err -} - -func blockPutQuery(prefix cid.Prefix) (url.Values, error) { - q := make(url.Values, 3) - - codec := multicodec.Code(prefix.Codec).String() - if codec == "" { - return q, fmt.Errorf("cannot find name for the blocks' CID codec: %x", prefix.Codec) - } - - mhType, ok := multihash.Codes[prefix.MhType] - if !ok { - return q, fmt.Errorf("cannot find name for the blocks' Multihash type: %x", prefix.MhType) - } - - // From go-ipfs 0.13.0 format is deprecated and we use cid-codec - q.Set("cid-codec", codec) - q.Set("mhtype", mhType) - q.Set("mhlen", strconv.Itoa(prefix.MhLength)) - q.Set("pin", "false") - q.Set("allow-big-block", "true") - return q, nil -} - -// BlockStream performs a multipart request to block/put with the blocks -// received on the channel. -func (ipfs *Connector) BlockStream(ctx context.Context, blocks <-chan api.NodeWithMeta) error { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/BlockStream") - defer span.End() - - logger.Debug("streaming blocks to IPFS") - defer ipfs.updateInformerMetric(ctx) - - it := &chanIterator{ - ctx: ctx, - blocks: blocks, - seen: multihash.NewSet(), - } - dir := &chanDirectory{ - iterator: it, - } - - // We need to pick into the first block to know which Cid prefix we - // are writing blocks with, so that ipfs calculates the expected - // multihash (we select the function used). This means that all blocks - // in a stream should use the same. - peek, ok := it.Peek() - if !ok { - return errors.New("BlockStream: no blocks to peek in blocks channel") - } - - q, err := blockPutQuery(peek.Cid.Prefix()) - if err != nil { - return err - } - url := "block/put?" + q.Encode() - - // Now we stream the blocks to ipfs. In case of error, we return - // directly, but leave a goroutine draining the channel until it is - // closed, which should be soon after returning. - stats.Record(ctx, observations.BlocksPut.M(1)) - multiFileR := files.NewMultiFileReader(dir, true) - contentType := "multipart/form-data; boundary=" + multiFileR.Boundary() - body, err := ipfs.postCtxStreamResponse(ctx, url, contentType, multiFileR) - if err != nil { - return err - } - defer body.Close() - - dec := json.NewDecoder(body) - for { - var res ipfsBlockPutResp - err = dec.Decode(&res) - if err == io.EOF { - return nil - } - if err != nil { - logger.Error(err) - break - } - logger.Debugf("response block: %s", res.Key) - if !it.Seen(res.Key) { - logger.Warningf("blockPut response CID (%s) does not match the multihash of any blocks sent", res.Key) - } - } - - // keep draining blocks channel until closed. - go func() { - for range blocks { - } - }() - - if err != nil { - stats.Record(ipfs.ctx, observations.BlocksAddedError.M(1)) - } - return err -} - -// BlockGet retrieves an ipfs block with the given cid -func (ipfs *Connector) BlockGet(ctx context.Context, c api.Cid) ([]byte, error) { - ctx, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/BlockGet") - defer span.End() - - ctx, cancel := context.WithTimeout(ctx, ipfs.config.IPFSRequestTimeout) - defer cancel() - url := "block/get?arg=" + c.String() - return ipfs.postCtx(ctx, url, "", nil) -} - -// // FetchRefs asks IPFS to download blocks recursively to the given depth. -// // It discards the response, but waits until it completes. -// func (ipfs *Connector) FetchRefs(ctx context.Context, c api.Cid, maxDepth int) error { -// ctx, cancel := context.WithTimeout(ipfs.ctx, ipfs.config.PinTimeout) -// defer cancel() - -// q := url.Values{} -// q.Set("recursive", "true") -// q.Set("unique", "false") // same memory on IPFS side -// q.Set("max-depth", fmt.Sprintf("%d", maxDepth)) -// q.Set("arg", c.String()) - -// url := fmt.Sprintf("refs?%s", q.Encode()) -// err := ipfs.postDiscardBodyCtx(ctx, url) -// if err != nil { -// return err -// } -// logger.Debugf("refs for %s successfully fetched", c) -// return nil -// } - -// Returns true every updateMetricsMod-th time that we -// call this function. -func (ipfs *Connector) shouldUpdateMetric() bool { - if ipfs.config.InformerTriggerInterval <= 0 { - return false - } - curCount := atomic.AddUint64(&ipfs.updateMetricCount, 1) - if curCount%uint64(ipfs.config.InformerTriggerInterval) == 0 { - atomic.StoreUint64(&ipfs.updateMetricCount, 0) - return true - } - return false -} - -// Trigger a broadcast of the local informer metrics. -func (ipfs *Connector) updateInformerMetric(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "ipfsconn/ipfshttp/updateInformerMetric") - defer span.End() - ctx = trace.NewContext(ipfs.ctx, span) - - if !ipfs.shouldUpdateMetric() { - return nil - } - - err := ipfs.rpcClient.GoContext( - ctx, - "", - "Cluster", - "SendInformersMetrics", - struct{}{}, - &struct{}{}, - nil, - ) - if err != nil { - logger.Error(err) - } - return err -} diff --git a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/ipfshttp_test.go b/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/ipfshttp_test.go deleted file mode 100644 index f82b2d2..0000000 --- a/packages/networking/ipfs-cluster/ipfsconn/ipfshttp/ipfshttp_test.go +++ /dev/null @@ -1,507 +0,0 @@ -package ipfshttp - -import ( - "bytes" - "context" - "fmt" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - ma "github.com/multiformats/go-multiaddr" - - merkledag "github.com/ipfs/go-merkledag" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" -) - -func init() { - _ = logging.Logger - logging.SetLogLevel("*", "DEBUG") -} - -func testIPFSConnector(t *testing.T) (*Connector, *test.IpfsMock) { - mock := test.NewIpfsMock(t) - nodeMAddr := ma.StringCast(fmt.Sprintf("/ip4/%s/tcp/%d", mock.Addr, mock.Port)) - - cfg := &Config{} - cfg.Default() - cfg.NodeAddr = nodeMAddr - cfg.ConnectSwarmsDelay = 0 - cfg.InformerTriggerInterval = 10 - - ipfs, err := NewConnector(cfg) - if err != nil { - t.Fatal("creating an IPFSConnector should work: ", err) - } - - ipfs.SetClient(test.NewMockRPCClient(t)) - return ipfs, mock -} - -func TestNewConnector(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) -} - -func TestIPFSID(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer ipfs.Shutdown(ctx) - id, err := ipfs.ID(ctx) - if err != nil { - t.Fatal(err) - } - if id.ID != test.PeerID1 { - t.Error("expected testPeerID") - } - if len(id.Addresses) != 2 { - t.Error("expected 2 address") - } - if id.Error != "" { - t.Error("expected no error") - } - mock.Close() - _, err = ipfs.ID(ctx) - if err == nil { - t.Error("expected an error") - } -} - -func TestPin(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - pin := api.PinCid(test.Cid1) - pin.Origins = []api.Multiaddr{ - api.NewMultiaddrWithValue(ma.StringCast("/ip4/1.2.3.4/tcp/1234/p2p/12D3KooWKewdAMAU3WjYHm8qkAJc5eW6KHbHWNigWraXXtE1UCng")), - api.NewMultiaddrWithValue(ma.StringCast("/ip4/2.3.3.4/tcp/1234/p2p/12D3KooWF6BgwX966ge5AVFs9Gd2wVTBmypxZVvaBR12eYnUmXkR")), - } - err := ipfs.Pin(ctx, pin) - if err != nil { - t.Error("expected success pinning cid:", err) - } - pinSt, err := ipfs.PinLsCid(ctx, pin) - if err != nil { - t.Fatal("expected success doing ls:", err) - } - if !pinSt.IsPinned(-1) { - t.Error("cid should have been pinned") - } - - pin2 := api.PinCid(test.ErrorCid) - err = ipfs.Pin(ctx, pin2) - if err == nil { - t.Error("expected error pinning cid") - } - - ipfs.config.PinTimeout = 5 * time.Second - c4 := test.SlowCid1 - err = ipfs.Pin(ctx, api.PinCid(c4)) - if err == nil { - t.Error("expected error pinning cid") - } -} - -func TestPinUpdate(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - pin := api.PinCid(test.Cid1) - pin.PinUpdate = test.Cid1 - err := ipfs.Pin(ctx, pin) - if err != nil { - t.Error("pin update should have worked even if not pinned") - } - - err = ipfs.Pin(ctx, pin) - if err != nil { - t.Fatal(err) - } - - // This should trigger the pin/update path - pin.Cid = test.Cid2 - err = ipfs.Pin(ctx, pin) - if err != nil { - t.Fatal(err) - } - - if mock.GetCount("pin/update") != 1 { - t.Error("pin/update should have been called once") - } - - if mock.GetCount("pin/add") != 1 { - t.Error("pin/add should have been called once") - } -} - -func TestIPFSUnpin(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - c := test.Cid1 - err := ipfs.Unpin(ctx, c) - if err != nil { - t.Error("expected success unpinning non-pinned cid") - } - ipfs.Pin(ctx, api.PinCid(c)) - err = ipfs.Unpin(ctx, c) - if err != nil { - t.Error("expected success unpinning pinned cid") - } -} - -func TestIPFSUnpinDisabled(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - ipfs.config.UnpinDisable = true - err := ipfs.Pin(ctx, api.PinCid(test.Cid1)) - if err != nil { - t.Fatal(err) - } - - err = ipfs.Unpin(ctx, test.Cid1) - if err == nil { - t.Fatal("pin should be disabled") - } -} - -func TestIPFSPinLsCid(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - c := test.Cid1 - c2 := test.Cid2 - - pin := api.PinCid(c) - ipfs.Pin(ctx, pin) - ips, err := ipfs.PinLsCid(ctx, pin) - if err != nil { - t.Error(err) - } - - if !ips.IsPinned(-1) { - t.Error("c should appear pinned") - } - - ips, err = ipfs.PinLsCid(ctx, api.PinCid(c2)) - if err != nil || ips != api.IPFSPinStatusUnpinned { - t.Error("c2 should appear unpinned") - } -} - -func TestIPFSPinLsCid_DifferentEncoding(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - c := test.Cid4 // ipfs mock treats this specially - - pin := api.PinCid(c) - ipfs.Pin(ctx, pin) - ips, err := ipfs.PinLsCid(ctx, pin) - if err != nil { - t.Error(err) - } - - if !ips.IsPinned(-1) { - t.Error("c should appear pinned") - } -} - -func collectPins(t *testing.T, pch <-chan api.IPFSPinInfo) []api.IPFSPinInfo { - t.Helper() - - var pins []api.IPFSPinInfo - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - for { - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - return nil - case p, ok := <-pch: - if !ok { - return pins - } - pins = append(pins, p) - } - } -} - -func TestIPFSPinLs(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - c := test.Cid1 - c2 := test.Cid2 - - ipfs.Pin(ctx, api.PinCid(c)) - ipfs.Pin(ctx, api.PinCid(c2)) - pinCh := make(chan api.IPFSPinInfo, 10) - go func() { - err := ipfs.PinLs(ctx, []string{""}, pinCh) - if err != nil { - t.Error("should not error") - } - }() - - pins := collectPins(t, pinCh) - - if len(pins) != 2 { - t.Fatal("the pin list does not contain the expected number of keys") - } - - if !pins[0].Type.IsPinned(-1) || !pins[1].Type.IsPinned(-1) { - t.Error("c1 and c2 should appear pinned") - } -} - -func TestIPFSShutdown(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - if err := ipfs.Shutdown(ctx); err != nil { - t.Error("expected a clean shutdown") - } - if err := ipfs.Shutdown(ctx); err != nil { - t.Error("expected a second clean shutdown") - } -} - -func TestConnectSwarms(t *testing.T) { - // In order to interactively test uncomment the following. - // Otherwise there is no good way to test this with the - // ipfs mock - // logging.SetDebugLogging() - - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - time.Sleep(time.Second) -} - -func TestSwarmPeers(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - swarmPeers, err := ipfs.SwarmPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(swarmPeers) != 2 { - t.Fatal("expected 2 swarm peers") - } - if swarmPeers[0] != test.PeerID4 { - t.Error("unexpected swarm peer") - } - if swarmPeers[1] != test.PeerID5 { - t.Error("unexpected swarm peer") - } -} - -func TestBlockStream(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - blocks := make(chan api.NodeWithMeta, 10) - blocks <- api.NodeWithMeta{ - Data: []byte(test.Cid4Data), - Cid: test.Cid4, - } - - // Because this has a different prefix, - // it will produce a warning. - blocks <- api.NodeWithMeta{ - Data: []byte(test.Cid5Data), - Cid: test.Cid5, - } - close(blocks) - - err := ipfs.BlockStream(ctx, blocks) - if err != nil { - t.Error(err) - } - - // Try only adding v0 cid now - blocks2 := make(chan api.NodeWithMeta, 1) - blocks2 <- api.NodeWithMeta{ - Data: []byte(test.Cid5Data), - Cid: test.Cid5, - } - close(blocks2) - - err = ipfs.BlockStream(ctx, blocks2) - if err != nil { - t.Error(err) - } -} - -func TestBlockGet(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - shardCid := test.ShardCid - // Fail when getting before putting - _, err := ipfs.BlockGet(ctx, shardCid) - if err == nil { - t.Fatal("expected to fail getting unput block") - } - - blocks := make(chan api.NodeWithMeta, 1) - blocks <- api.NodeWithMeta{ - Data: test.ShardData, - Cid: test.ShardCid, - } - close(blocks) - err = ipfs.BlockStream(ctx, blocks) - if err != nil { - t.Fatal(err) - } - - data, err := ipfs.BlockGet(ctx, shardCid) - if err != nil { - t.Error(err) - } - if !bytes.Equal(data, test.ShardData) { - t.Fatal("unexpected data returned") - } -} - -func TestRepoStat(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - s, err := ipfs.RepoStat(ctx) - if err != nil { - t.Fatal(err) - } - // See the ipfs mock implementation - if s.RepoSize != 0 { - t.Error("expected 0 bytes of size") - } - - c := test.Cid1 - err = ipfs.Pin(ctx, api.PinCid(c)) - if err != nil { - t.Error("expected success pinning cid") - } - - s, err = ipfs.RepoStat(ctx) - if err != nil { - t.Fatal(err) - } - if s.RepoSize != 1000 { - t.Error("expected 1000 bytes of size") - } -} - -func TestResolve(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - s, err := ipfs.Resolve(ctx, test.PathIPFS2) - if err != nil { - t.Error(err) - } - if !s.Equals(test.CidResolved) { - t.Errorf("expected different cid, expected: %s, found: %s\n", test.CidResolved, s.String()) - } -} - -func TestConfigKey(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - v, err := ipfs.ConfigKey("Datastore/StorageMax") - if err != nil { - t.Fatal(err) - } - sto, ok := v.(string) - if !ok { - t.Fatal("error converting to string") - } - if sto != "10G" { - t.Error("StorageMax shouold be 10G") - } - - v, err = ipfs.ConfigKey("Datastore") - if err != nil { - t.Fatal(err) - } - _, ok = v.(map[string]interface{}) - if !ok { - t.Error("should have returned the whole Datastore config object") - } - - _, err = ipfs.ConfigKey("") - if err == nil { - t.Error("should not work with an empty path") - } - - _, err = ipfs.ConfigKey("Datastore/abc") - if err == nil { - t.Error("should not work with a bad path") - } -} - -func TestRepoGC(t *testing.T) { - ctx := context.Background() - ipfs, mock := testIPFSConnector(t) - defer mock.Close() - defer ipfs.Shutdown(ctx) - - res, err := ipfs.RepoGC(ctx) - if err != nil { - t.Fatal(err) - } - - if res.Error != "" { - t.Errorf("expected error to be empty: %s", res.Error) - } - - if res.Keys == nil { - t.Fatal("expected a non-nil array of IPFSRepoGC") - } - - if len(res.Keys) < 5 { - t.Fatal("expected at least five keys") - } - - if !res.Keys[0].Key.Equals(test.Cid1) { - t.Errorf("expected different cid, expected: %s, found: %s\n", test.Cid1, res.Keys[0].Key) - } - - if !res.Keys[3].Key.Equals(test.Cid4) { - t.Errorf("expected different cid, expected: %s, found: %s\n", test.Cid4, res.Keys[3].Key) - } - - if res.Keys[4].Error != merkledag.ErrLinkNotFound.Error() { - t.Errorf("expected different error, expected: %s, found: %s\n", merkledag.ErrLinkNotFound, res.Keys[4].Error) - } -} diff --git a/packages/networking/ipfs-cluster/logging.go b/packages/networking/ipfs-cluster/logging.go deleted file mode 100644 index 25a30a9..0000000 --- a/packages/networking/ipfs-cluster/logging.go +++ /dev/null @@ -1,67 +0,0 @@ -package ipfscluster - -import ( - logging "github.com/ipfs/go-log/v2" -) - -var logger = logging.Logger("cluster") - -// LoggingFacilities provides a list of logging identifiers -// used by cluster and their default logging level. -var LoggingFacilities = map[string]string{ - "cluster": "INFO", - "restapi": "INFO", - "restapilog": "INFO", - "pinsvcapi": "INFO", - "pinsvcapilog": "INFO", - "ipfsproxy": "INFO", - "ipfsproxylog": "INFO", - "ipfshttp": "INFO", - "monitor": "INFO", - "dsstate": "INFO", - "raft": "INFO", - "crdt": "INFO", - "pintracker": "INFO", - "diskinfo": "INFO", - "tags": "INFO", - "apitypes": "INFO", - "config": "INFO", - "shardingdags": "INFO", - "singledags": "INFO", - "adder": "INFO", - "optracker": "INFO", - "pstoremgr": "INFO", - "allocator": "INFO", -} - -// LoggingFacilitiesExtra provides logging identifiers -// used in ipfs-cluster dependencies, which may be useful -// to display. Along with their default value. -var LoggingFacilitiesExtra = map[string]string{ - "p2p-gorpc": "ERROR", - "swarm2": "ERROR", - "libp2p-raft": "FATAL", - "raftlib": "ERROR", - "badger": "INFO", -} - -// SetFacilityLogLevel sets the log level for a given module -func SetFacilityLogLevel(f, l string) { - /* - case "debug", "DEBUG": - *l = DebugLevel - case "info", "INFO", "": // make the zero value useful - *l = InfoLevel - case "warn", "WARN": - *l = WarnLevel - case "error", "ERROR": - *l = ErrorLevel - case "dpanic", "DPANIC": - *l = DPanicLevel - case "panic", "PANIC": - *l = PanicLevel - case "fatal", "FATAL": - *l = FatalLevel - */ - logging.SetLogLevel(f, l) -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/checker.go b/packages/networking/ipfs-cluster/monitor/metrics/checker.go deleted file mode 100644 index dc3f2f9..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/checker.go +++ /dev/null @@ -1,173 +0,0 @@ -package metrics - -import ( - "context" - "errors" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// AlertChannelCap specifies how much buffer the alerts channel has. -var AlertChannelCap = 256 - -// MaxAlertThreshold specifies how many alerts will occur per a peer is -// removed from the list of monitored peers. -var MaxAlertThreshold = 1 - -// ErrAlertChannelFull is returned if the alert channel is full. -var ErrAlertChannelFull = errors.New("alert channel is full") - -// Checker provides utilities to find expired metrics -// for a given peerset and send alerts if it proceeds to do so. -type Checker struct { - ctx context.Context - alertCh chan api.Alert - metrics *Store - - failedPeersMu sync.Mutex - failedPeers map[peer.ID]map[string]int -} - -// NewChecker creates a Checker using the given -// MetricsStore. The threshold value indicates when a -// monitored component should be considered to have failed. -// The greater the threshold value the more leniency is granted. -// -// A value between 2.0 and 4.0 is suggested for the threshold. -func NewChecker(ctx context.Context, metrics *Store) *Checker { - return &Checker{ - ctx: ctx, - alertCh: make(chan api.Alert, AlertChannelCap), - metrics: metrics, - failedPeers: make(map[peer.ID]map[string]int), - } -} - -// CheckPeers will trigger alerts based on the latest metrics from the given peerset -// when they have expired and no alert has been sent before. -func (mc *Checker) CheckPeers(peers []peer.ID) error { - for _, name := range mc.metrics.MetricNames() { - for _, peer := range peers { - for _, metric := range mc.metrics.PeerMetricAll(name, peer) { - if mc.FailedMetric(metric.Name, peer) { - err := mc.alert(peer, metric.Name) - if err != nil { - return err - } - } - } - } - } - return nil -} - -// CheckAll will trigger alerts for all latest metrics when they have expired -// and no alert has been sent before. -func (mc *Checker) CheckAll() error { - for _, metric := range mc.metrics.AllMetrics() { - if mc.FailedMetric(metric.Name, metric.Peer) { - err := mc.alert(metric.Peer, metric.Name) - if err != nil { - return err - } - } - } - - return nil -} - -// ResetAlerts clears up how many time a peer alerted for a given metric. -// Thus, if it was over the threshold, it will start alerting again. -func (mc *Checker) ResetAlerts(pid peer.ID, metricName string) { - mc.failedPeersMu.Lock() - defer mc.failedPeersMu.Unlock() - - failedMetrics, ok := mc.failedPeers[pid] - if !ok { - return - } - delete(failedMetrics, metricName) - if len(mc.failedPeers[pid]) == 0 { - delete(mc.failedPeers, pid) - } -} - -func (mc *Checker) alert(pid peer.ID, metricName string) error { - mc.failedPeersMu.Lock() - defer mc.failedPeersMu.Unlock() - - if _, ok := mc.failedPeers[pid]; !ok { - mc.failedPeers[pid] = make(map[string]int) - } - failedMetrics := mc.failedPeers[pid] - lastMetric := mc.metrics.PeerLatest(metricName, pid) - if !lastMetric.Defined() { - lastMetric = api.Metric{ - Name: metricName, - Peer: pid, - } - } - - failedMetrics[metricName]++ - // If above threshold, do not send alert - if failedMetrics[metricName] > MaxAlertThreshold { - // Cleanup old metrics eventually - if failedMetrics[metricName] >= 300 { - delete(failedMetrics, metricName) - if len(mc.failedPeers[pid]) == 0 { - delete(mc.failedPeers, pid) - } - } - return nil - } - - alrt := api.Alert{ - Metric: lastMetric, - TriggeredAt: time.Now(), - } - select { - case mc.alertCh <- alrt: - default: - return ErrAlertChannelFull - } - return nil -} - -// Alerts returns a channel which gets notified by CheckPeers. -func (mc *Checker) Alerts() <-chan api.Alert { - return mc.alertCh -} - -// Watch will trigger regular CheckPeers on the given interval. It will call -// peersF to obtain a peerset. It can be stopped by canceling the context. -// Usually you want to launch this in a goroutine. -func (mc *Checker) Watch(ctx context.Context, peersF func(context.Context) ([]peer.ID, error), interval time.Duration) { - ticker := time.NewTicker(interval) - for { - select { - case <-ticker.C: - if peersF != nil { - peers, err := peersF(ctx) - if err != nil { - continue - } - mc.CheckPeers(peers) - } else { - mc.CheckAll() - } - case <-ctx.Done(): - ticker.Stop() - return - } - } -} - -// FailedMetric returns if a peer is marked as failed for a particular metric. -func (mc *Checker) FailedMetric(metric string, pid peer.ID) bool { - latest := mc.metrics.PeerLatest(metric, pid) - return latest.Expired() -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/checker_test.go b/packages/networking/ipfs-cluster/monitor/metrics/checker_test.go deleted file mode 100644 index 20c9a85..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/checker_test.go +++ /dev/null @@ -1,200 +0,0 @@ -package metrics - -import ( - "context" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -func TestChecker_CheckPeers(t *testing.T) { - t.Run("check with single metric", func(t *testing.T) { - metrics := NewStore() - checker := NewChecker(context.Background(), metrics) - - metr := api.Metric{ - Name: "ping", - Peer: test.PeerID1, - Value: "1", - Valid: true, - } - metr.SetTTL(2 * time.Second) - - metrics.Add(metr) - - checker.CheckPeers([]peer.ID{test.PeerID1}) - select { - case <-checker.Alerts(): - t.Error("there should not be an alert yet") - default: - } - - time.Sleep(3 * time.Second) - err := checker.CheckPeers([]peer.ID{test.PeerID1}) - if err != nil { - t.Fatal(err) - } - - select { - case <-checker.Alerts(): - default: - t.Error("an alert should have been triggered") - } - - checker.CheckPeers([]peer.ID{test.PeerID2}) - select { - case <-checker.Alerts(): - t.Error("there should not be alerts for different peer") - default: - } - }) -} - -func TestChecker_CheckAll(t *testing.T) { - t.Run("checkall with single metric", func(t *testing.T) { - metrics := NewStore() - checker := NewChecker(context.Background(), metrics) - - metr := api.Metric{ - Name: "ping", - Peer: test.PeerID1, - Value: "1", - Valid: true, - } - metr.SetTTL(2 * time.Second) - - metrics.Add(metr) - - checker.CheckAll() - select { - case <-checker.Alerts(): - t.Error("there should not be an alert yet") - default: - } - - time.Sleep(3 * time.Second) - err := checker.CheckAll() - if err != nil { - t.Fatal(err) - } - - select { - case <-checker.Alerts(): - default: - t.Error("an alert should have been triggered") - } - - checker.CheckAll() - select { - case <-checker.Alerts(): - t.Error("there should not be alerts for different peer") - default: - } - }) -} - -func TestChecker_Watch(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - metrics := NewStore() - checker := NewChecker(context.Background(), metrics) - - metr := api.Metric{ - Name: "ping", - Peer: test.PeerID1, - Value: "1", - Valid: true, - } - metr.SetTTL(100 * time.Millisecond) - metrics.Add(metr) - - peersF := func(context.Context) ([]peer.ID, error) { - return []peer.ID{test.PeerID1}, nil - } - - go checker.Watch(ctx, peersF, 200*time.Millisecond) - - select { - case a := <-checker.Alerts(): - t.Log("received alert:", a) - case <-ctx.Done(): - t.Fatal("should have received an alert") - } -} - -func TestChecker_Failed(t *testing.T) { - t.Run("standard failure check", func(t *testing.T) { - metrics := NewStore() - checker := NewChecker(context.Background(), metrics) - - metrics.Add(makePeerMetric(test.PeerID1, "1", 100*time.Millisecond)) - time.Sleep(50 * time.Millisecond) - got := checker.FailedMetric("ping", test.PeerID1) - if got { - t.Error("should not have failed so soon") - } - time.Sleep(100 * time.Millisecond) - got = checker.FailedMetric("ping", test.PeerID1) - if !got { - t.Error("should have failed") - } - }) -} - -func TestChecker_alert(t *testing.T) { - t.Run("remove peer from store after alert", func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - metrics := NewStore() - checker := NewChecker(ctx, metrics) - - metr := api.Metric{ - Name: "ping", - Peer: test.PeerID1, - Value: "1", - Valid: true, - } - metr.SetTTL(100 * time.Millisecond) - metrics.Add(metr) - - peersF := func(context.Context) ([]peer.ID, error) { - return []peer.ID{test.PeerID1}, nil - } - - go checker.Watch(ctx, peersF, 200*time.Millisecond) - - var alertCount int - for { - select { - case a := <-checker.Alerts(): - t.Log("received alert:", a) - alertCount++ - if alertCount > MaxAlertThreshold { - t.Fatalf("there should no more than %d alert", MaxAlertThreshold) - } - case <-ctx.Done(): - if alertCount < 1 { - t.Fatal("should have received an alert") - } - return - } - } - }) -} - -func makePeerMetric(pid peer.ID, value string, ttl time.Duration) api.Metric { - metr := api.Metric{ - Name: "ping", - Peer: pid, - Value: value, - Valid: true, - } - metr.SetTTL(ttl) - return metr -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/store.go b/packages/networking/ipfs-cluster/monitor/metrics/store.go deleted file mode 100644 index c91c064..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/store.go +++ /dev/null @@ -1,188 +0,0 @@ -package metrics - -import ( - "sort" - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// PeerMetrics maps a peer IDs to a metrics window. -type PeerMetrics map[peer.ID]*Window - -// Store can be used to store and access metrics. -type Store struct { - mux sync.RWMutex - byName map[string]PeerMetrics -} - -// NewStore can be used to create a Store. -func NewStore() *Store { - return &Store{ - byName: make(map[string]PeerMetrics), - } -} - -// Add inserts a new metric in Metrics. -func (mtrs *Store) Add(m api.Metric) { - mtrs.mux.Lock() - defer mtrs.mux.Unlock() - - name := m.Name - peer := m.Peer - mbyp, ok := mtrs.byName[name] - if !ok { - mbyp = make(PeerMetrics) - mtrs.byName[name] = mbyp - } - window, ok := mbyp[peer] - if !ok { - // We always lock the outer map, so we can use unsafe - // Window. - window = NewWindow(DefaultWindowCap) - mbyp[peer] = window - } - - window.Add(m) -} - -// RemovePeer removes all metrics related to a peer from the Store. -func (mtrs *Store) RemovePeer(pid peer.ID) { - mtrs.mux.Lock() - for _, metrics := range mtrs.byName { - delete(metrics, pid) - } - mtrs.mux.Unlock() -} - -// RemovePeerMetrics removes all metrics of a given name for a given peer ID. -func (mtrs *Store) RemovePeerMetrics(pid peer.ID, name string) { - mtrs.mux.Lock() - metrics := mtrs.byName[name] - delete(metrics, pid) - mtrs.mux.Unlock() -} - -// LatestValid returns all the last known valid metrics of a given type. A metric -// is valid if it has not expired. -func (mtrs *Store) LatestValid(name string) []api.Metric { - mtrs.mux.RLock() - defer mtrs.mux.RUnlock() - - byPeer, ok := mtrs.byName[name] - if !ok { - return []api.Metric{} - } - - metrics := make([]api.Metric, 0, len(byPeer)) - for _, window := range byPeer { - m, err := window.Latest() - // TODO(ajl): for accrual, does it matter if a ping has expired? - if err != nil || m.Discard() { - continue - } - metrics = append(metrics, m) - } - - sortedMetrics := api.MetricSlice(metrics) - sort.Stable(sortedMetrics) - return sortedMetrics -} - -// AllMetrics returns the latest metrics for all peers and metrics types. It -// may return expired metrics. -func (mtrs *Store) AllMetrics() []api.Metric { - mtrs.mux.RLock() - defer mtrs.mux.RUnlock() - - result := make([]api.Metric, 0) - - for _, byPeer := range mtrs.byName { - for _, window := range byPeer { - metric, err := window.Latest() - if err != nil || !metric.Valid { - continue - } - result = append(result, metric) - } - } - return result -} - -// PeerMetrics returns the latest metrics for a given peer ID for -// all known metrics types. It may return expired metrics. -func (mtrs *Store) PeerMetrics(pid peer.ID) []api.Metric { - mtrs.mux.RLock() - defer mtrs.mux.RUnlock() - - result := make([]api.Metric, 0) - - for _, byPeer := range mtrs.byName { - window, ok := byPeer[pid] - if !ok { - continue - } - metric, err := window.Latest() - if err != nil || !metric.Valid { - continue - } - result = append(result, metric) - } - return result -} - -// PeerMetricAll returns all of a particular metrics for a -// particular peer. -func (mtrs *Store) PeerMetricAll(name string, pid peer.ID) []api.Metric { - mtrs.mux.RLock() - defer mtrs.mux.RUnlock() - - byPeer, ok := mtrs.byName[name] - if !ok { - return nil - } - - window, ok := byPeer[pid] - if !ok { - return nil - } - ms := window.All() - return ms -} - -// PeerLatest returns the latest of a particular metric for a -// particular peer. It may return an expired metric. -func (mtrs *Store) PeerLatest(name string, pid peer.ID) api.Metric { - mtrs.mux.RLock() - defer mtrs.mux.RUnlock() - - byPeer, ok := mtrs.byName[name] - if !ok { - return api.Metric{} - } - - window, ok := byPeer[pid] - if !ok { - return api.Metric{} - } - m, err := window.Latest() - if err != nil { - // ignoring error, as nil metric is indicative enough - return api.Metric{} - } - return m -} - -// MetricNames returns all the known metric names -func (mtrs *Store) MetricNames() []string { - mtrs.mux.RLock() - defer mtrs.mux.RUnlock() - - list := make([]string, 0, len(mtrs.byName)) - for k := range mtrs.byName { - list = append(list, k) - } - return list -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/store_test.go b/packages/networking/ipfs-cluster/monitor/metrics/store_test.go deleted file mode 100644 index 2813311..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/store_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package metrics - -import ( - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" -) - -func TestStoreLatest(t *testing.T) { - store := NewStore() - - metr := api.Metric{ - Name: "test", - Peer: test.PeerID1, - Value: "1", - Valid: true, - } - metr.SetTTL(200 * time.Millisecond) - store.Add(metr) - - latest := store.LatestValid("test") - if len(latest) != 1 { - t.Error("expected 1 metric") - } - - time.Sleep(220 * time.Millisecond) - - latest = store.LatestValid("test") - if len(latest) != 0 { - t.Error("expected no metrics") - } -} - -func TestRemovePeer(t *testing.T) { - store := NewStore() - - metr := api.Metric{ - Name: "test", - Peer: test.PeerID1, - Value: "1", - Valid: true, - } - metr.SetTTL(200 * time.Millisecond) - store.Add(metr) - - if pmtrs := store.PeerMetrics(test.PeerID1); len(pmtrs) <= 0 { - t.Errorf("there should be one peer metric; got: %v", pmtrs) - } - store.RemovePeer(test.PeerID1) - if pmtrs := store.PeerMetrics(test.PeerID1); len(pmtrs) > 0 { - t.Errorf("there should be no peer metrics; got: %v", pmtrs) - } -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/util.go b/packages/networking/ipfs-cluster/monitor/metrics/util.go deleted file mode 100644 index 2dcaf8d..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/util.go +++ /dev/null @@ -1,28 +0,0 @@ -package metrics - -import ( - "github.com/ipfs-cluster/ipfs-cluster/api" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// PeersetFilter removes all metrics not belonging to the given -// peerset -func PeersetFilter(metrics []api.Metric, peerset []peer.ID) []api.Metric { - peerMap := make(map[peer.ID]struct{}) - for _, pid := range peerset { - peerMap[pid] = struct{}{} - } - - filtered := make([]api.Metric, 0, len(metrics)) - - for _, metric := range metrics { - _, ok := peerMap[metric.Peer] - if !ok { - continue - } - filtered = append(filtered, metric) - } - - return filtered -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/window.go b/packages/networking/ipfs-cluster/monitor/metrics/window.go deleted file mode 100644 index 1166c87..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/window.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package metrics provides common functionality for working with metrics, -// particularly useful for monitoring components. It includes types to store, -// check and filter metrics. -package metrics - -import ( - "container/ring" - "errors" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -// DefaultWindowCap sets the amount of metrics to store per peer. -var DefaultWindowCap = 25 - -// ErrNoMetrics is returned when there are no metrics in a Window. -var ErrNoMetrics = errors.New("no metrics have been added to this window") - -// Window implements a circular queue to store metrics. -type Window struct { - wMu sync.RWMutex - window *ring.Ring -} - -// NewWindow creates an instance with the given -// window capacity. -func NewWindow(windowCap int) *Window { - if windowCap <= 0 { - panic("invalid windowCap") - } - - w := ring.New(windowCap) - return &Window{ - window: w, - } -} - -// Add adds a new metric to the window. If the window capacity -// has been reached, the oldest metric (by the time it was added), -// will be discarded. Add leaves the cursor on the next spot, -// which is either empty or the oldest record. -func (mw *Window) Add(m api.Metric) { - m.ReceivedAt = time.Now().UnixNano() - - mw.wMu.Lock() - mw.window.Value = m - mw.window = mw.window.Next() - mw.wMu.Unlock() -} - -// Latest returns the last metric added. It returns an error -// if no metrics were added. -func (mw *Window) Latest() (api.Metric, error) { - var last api.Metric - var ok bool - - mw.wMu.RLock() - // This just returns the previous ring and - // doesn't set the window "cursor" to the previous - // ring. Therefore this is just a read operation - // as well. - prevRing := mw.window.Prev() - mw.wMu.RUnlock() - - last, ok = prevRing.Value.(api.Metric) - - if !ok || !last.Defined() { - return last, ErrNoMetrics - } - - return last, nil -} - -// All returns all the metrics in the window, in the inverse order -// they were Added. That is, result[0] will be the last added -// metric. -func (mw *Window) All() []api.Metric { - values := make([]api.Metric, 0, mw.window.Len()) - - mw.wMu.RLock() - mw.window.Do(func(v interface{}) { - i, ok := v.(api.Metric) - if ok { - // append younger values to older value - values = append([]api.Metric{i}, values...) - } - }) - mw.wMu.RUnlock() - - return values -} diff --git a/packages/networking/ipfs-cluster/monitor/metrics/window_test.go b/packages/networking/ipfs-cluster/monitor/metrics/window_test.go deleted file mode 100644 index dcc99e8..0000000 --- a/packages/networking/ipfs-cluster/monitor/metrics/window_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package metrics - -import ( - "fmt" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -func makeMetric(value string) api.Metric { - metr := api.Metric{ - Name: "test", - Peer: "peer1", - Value: value, - Valid: true, - } - metr.SetTTL(5 * time.Second) - return metr -} - -func TestNewWindow(t *testing.T) { - w := NewWindow(10) - w.window.Next() -} - -func TestWindow_Race(t *testing.T) { - t.SkipNow() - w := NewWindow(DefaultWindowCap) - start := make(chan struct{}) - done := make(chan struct{}) - log := make(chan string, 100) - - // go routine to add metrics at regular interval - addTicker := time.NewTicker(10 * time.Millisecond) - go func() { - var i int - <-start - for { - select { - case <-addTicker.C: - if i >= 25 { - i = 0 - } - time.Sleep(time.Duration(i) * time.Millisecond) - w.Add(makeMetric("1")) - i++ - case <-done: - return - } - } - }() - - // go routine to query latest at regular interval - latestTicker := time.NewTicker(20 * time.Millisecond) - go func() { - <-start - for { - select { - case <-latestTicker.C: - // l, _ := w.Latest() - w.Latest() - // log <- fmt.Sprintf("latest: %v", l) - case <-done: - return - } - } - }() - - // go routine to query all at regular interval - allTicker := time.NewTicker(30 * time.Millisecond) - go func() { - <-start - for { - select { - case <-allTicker.C: - w.All() - // log <- fmt.Sprintf("all: %v", w.All()) - case <-done: - return - } - } - }() - - go func() { - <-start - <-done - for s := range log { - fmt.Println(s) - } - close(done) - }() - - close(start) - time.Sleep(50 * time.Millisecond) - done <- struct{}{} - <-done -} - -func TestWindow_Add(t *testing.T) { - t.Run("add single value", func(t *testing.T) { - mw := NewWindow(4) - want := makeMetric("1") - mw.Add(want) - - mw.wMu.RLock() - prevRing := mw.window.Prev() - got, ok := prevRing.Value.(api.Metric) - mw.wMu.RUnlock() - if !ok { - t.Error("value in window isn't an api.Metric") - } - - // We need to do this for metrics to be equal since ReceivedAt - // is added by the window. - want.ReceivedAt = got.ReceivedAt - - if got != want { - t.Errorf("got = %v, want = %v", got, want) - } - }) -} - -func BenchmarkWindow_Add(b *testing.B) { - b.Run("window size 10", func(b *testing.B) { - mw := NewWindow(10) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.Add(makeMetric("1")) - } - }) - - b.Run("window size 25", func(b *testing.B) { - mw := NewWindow(25) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.Add(makeMetric("1")) - } - }) - - b.Run("window size 1000", func(b *testing.B) { - mw := NewWindow(1000) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.Add(makeMetric("1")) - } - }) -} - -func TestWindow_Latest(t *testing.T) { - t.Run("no metrics error", func(t *testing.T) { - mw := NewWindow(4) - _, err := mw.Latest() - if err != ErrNoMetrics { - t.Error("expected ErrNoMetrics") - } - }) - - t.Run("single latest value", func(t *testing.T) { - mw := NewWindow(4) - mw.Add(makeMetric("1")) - - metr, err := mw.Latest() - if err != nil { - t.Fatal(err) - } - - if metr.Value != "1" { - t.Error("expected different value") - } - }) -} - -func BenchmarkWindow_Latest(b *testing.B) { - b.Run("window size 10", func(b *testing.B) { - mw := NewWindow(10) - for i := 0; i < 10; i++ { - mw.Add(makeMetric("1")) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.Add(makeMetric("1")) - } - }) - - b.Run("window size 25", func(b *testing.B) { - mw := NewWindow(25) - for i := 0; i < 25; i++ { - mw.Add(makeMetric("1")) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.Add(makeMetric("1")) - } - }) - - b.Run("window size 1000", func(b *testing.B) { - mw := NewWindow(1000) - for i := 0; i < 1000; i++ { - mw.Add(makeMetric("1")) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.Add(makeMetric("1")) - } - }) -} - -func TestWindow_All(t *testing.T) { - t.Run("empty window", func(t *testing.T) { - mw := NewWindow(4) - if len(mw.All()) != 0 { - t.Error("expected 0 metrics") - } - }) - - t.Run("half capacity", func(t *testing.T) { - mw := NewWindow(4) - mw.Add(makeMetric("1")) - mw.Add(makeMetric("2")) - - all := mw.All() - if len(all) != 2 { - t.Fatalf("should only be storing 2 metrics: got: %d", len(all)) - } - - if all[0].Value != "2" { - t.Error("newest metric should be first") - } - - if all[1].Value != "1" { - t.Error("older metric should be second") - } - }) - - t.Run("full capacity", func(t *testing.T) { - mw := NewWindow(4) - mw.Add(makeMetric("1")) - mw.Add(makeMetric("2")) - mw.Add(makeMetric("3")) - mw.Add(makeMetric("4")) - - all := mw.All() - if len(all) != 4 { - t.Fatalf("should only be storing 4 metrics: got: %d", len(all)) - } - - if all[len(all)-1].Value != "1" { - t.Error("oldest metric should be 1") - } - }) - - t.Run("over flow capacity", func(t *testing.T) { - mw := NewWindow(4) - mw.Add(makeMetric("1")) - mw.Add(makeMetric("2")) - mw.Add(makeMetric("3")) - mw.Add(makeMetric("4")) - mw.Add(makeMetric("5")) - - all := mw.All() - if len(all) != 4 { - t.Fatalf("should only be storing 4 metrics: got: %d", len(all)) - } - - if all[len(all)-1].Value != "2" { - t.Error("oldest metric should be 2") - } - - }) -} - -func TestWindow_AddParallel(t *testing.T) { - t.Parallel() - - mw := NewWindow(10) - - t.Run("parallel adder 1", func(t *testing.T) { - for i := 0; i < 100; i++ { - mw.Add(makeMetric("adder 1")) - } - }) - - t.Run("parallel adder 2", func(t *testing.T) { - for i := 0; i < 100; i++ { - mw.Add(makeMetric("adder 2")) - } - }) -} - -func BenchmarkWindow_All(b *testing.B) { - b.Run("window size 10", func(b *testing.B) { - mw := NewWindow(10) - for i := 0; i < 10; i++ { - mw.Add(makeMetric("1")) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.All() - } - }) - - b.Run("window size 25", func(b *testing.B) { - mw := NewWindow(25) - for i := 0; i < 25; i++ { - mw.Add(makeMetric("1")) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.All() - } - }) - - b.Run("window size 1000", func(b *testing.B) { - mw := NewWindow(1000) - for i := 0; i < 1000; i++ { - mw.Add(makeMetric("1")) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - mw.All() - } - }) -} diff --git a/packages/networking/ipfs-cluster/monitor/pubsubmon/config.go b/packages/networking/ipfs-cluster/monitor/pubsubmon/config.go deleted file mode 100644 index c6dbded..0000000 --- a/packages/networking/ipfs-cluster/monitor/pubsubmon/config.go +++ /dev/null @@ -1,103 +0,0 @@ -package pubsubmon - -import ( - "encoding/json" - "errors" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/kelseyhightower/envconfig" -) - -const configKey = "pubsubmon" -const envConfigKey = "cluster_pubsubmon" - -// Default values for this Config. -const ( - DefaultCheckInterval = 15 * time.Second -) - -// Config allows to initialize a Monitor and customize some parameters. -type Config struct { - config.Saver - - CheckInterval time.Duration -} - -type jsonConfig struct { - CheckInterval string `json:"check_interval"` -} - -// ConfigKey provides a human-friendly identifier for this type of Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default sets the fields of this Config to sensible values. -func (cfg *Config) Default() error { - cfg.CheckInterval = DefaultCheckInterval - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.CheckInterval <= 0 { - return errors.New("pubsubmon.check_interval too low") - } - - return nil -} - -// LoadJSON sets the fields of this Config to the values defined by the JSON -// representation of it, as generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling pubsubmon monitor config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - interval, _ := time.ParseDuration(jcfg.CheckInterval) - cfg.CheckInterval = interval - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return json.MarshalIndent(jcfg, "", " ") -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - return &jsonConfig{ - CheckInterval: cfg.CheckInterval.String(), - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/monitor/pubsubmon/config_test.go b/packages/networking/ipfs-cluster/monitor/pubsubmon/config_test.go deleted file mode 100644 index 9de24ad..0000000 --- a/packages/networking/ipfs-cluster/monitor/pubsubmon/config_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package pubsubmon - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "check_interval": "15s" -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - - json.Unmarshal(cfgJSON, j) - j.CheckInterval = "-10" - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err == nil { - t.Error("expected error decoding check_interval") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.CheckInterval = 0 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_PUBSUBMON_CHECKINTERVAL", "22s") - cfg := &Config{} - cfg.ApplyEnvVars() - - if cfg.CheckInterval != 22*time.Second { - t.Fatal("failed to override check_interval with env var") - } -} diff --git a/packages/networking/ipfs-cluster/monitor/pubsubmon/pubsubmon.go b/packages/networking/ipfs-cluster/monitor/pubsubmon/pubsubmon.go deleted file mode 100644 index 62cb31d..0000000 --- a/packages/networking/ipfs-cluster/monitor/pubsubmon/pubsubmon.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package pubsubmon implements a PeerMonitor component for IPFS Cluster that -// uses PubSub to send and receive metrics. -package pubsubmon - -import ( - "bytes" - "context" - "time" - - "sync" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/monitor/metrics" - - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - pubsub "github.com/libp2p/go-libp2p-pubsub" - gocodec "github.com/ugorji/go/codec" - - "go.opencensus.io/trace" -) - -var logger = logging.Logger("monitor") - -// PubsubTopic specifies the topic used to publish Cluster metrics. -var PubsubTopic = "monitor.metrics" - -var msgpackHandle = &gocodec.MsgpackHandle{} - -// Monitor is a component in charge of monitoring peers, logging -// metrics and detecting failures -type Monitor struct { - ctx context.Context - cancel func() - rpcClient *rpc.Client - rpcReady chan struct{} - - pubsub *pubsub.PubSub - topic *pubsub.Topic - subscription *pubsub.Subscription - peers PeersFunc - - metrics *metrics.Store - checker *metrics.Checker - - config *Config - - shutdownLock sync.Mutex - shutdown bool - wg sync.WaitGroup -} - -// PeersFunc allows the Monitor to filter and discard metrics -// that do not belong to a given peerset. -type PeersFunc func(context.Context) ([]peer.ID, error) - -// New creates a new PubSub monitor, using the given host, config and -// PeersFunc. The PeersFunc can be nil. In this case, no metric filtering is -// done based on peers (any peer is considered part of the peerset). -func New( - ctx context.Context, - cfg *Config, - psub *pubsub.PubSub, - peers PeersFunc, -) (*Monitor, error) { - err := cfg.Validate() - if err != nil { - return nil, err - } - - ctx, cancel := context.WithCancel(ctx) - - mtrs := metrics.NewStore() - checker := metrics.NewChecker(ctx, mtrs) - - topic, err := psub.Join(PubsubTopic) - if err != nil { - cancel() - return nil, err - } - subscription, err := topic.Subscribe() - if err != nil { - cancel() - return nil, err - } - - mon := &Monitor{ - ctx: ctx, - cancel: cancel, - rpcReady: make(chan struct{}, 1), - - pubsub: psub, - topic: topic, - subscription: subscription, - peers: peers, - - metrics: mtrs, - checker: checker, - config: cfg, - } - - go mon.run() - return mon, nil -} - -func (mon *Monitor) run() { - select { - case <-mon.rpcReady: - go mon.logFromPubsub() - go mon.checker.Watch(mon.ctx, mon.peers, mon.config.CheckInterval) - case <-mon.ctx.Done(): - } -} - -// logFromPubsub logs metrics received in the subscribed topic. -func (mon *Monitor) logFromPubsub() { - ctx, span := trace.StartSpan(mon.ctx, "monitor/pubsub/logFromPubsub") - defer span.End() - - decodeWarningPrinted := false - // Previous versions use multicodec with the following header, which - // we need to remove. - multicodecPrefix := append([]byte{byte(9)}, []byte("/msgpack\n")...) - - for { - select { - case <-ctx.Done(): - return - default: - msg, err := mon.subscription.Next(ctx) - if err != nil { // context canceled enters here - continue - } - - data := msg.GetData() - buf := bytes.NewBuffer(data) - dec := gocodec.NewDecoder(buf, msgpackHandle) - metric := api.Metric{} - err = dec.Decode(&metric) - if err != nil { - if bytes.HasPrefix(data, multicodecPrefix) { - buf := bytes.NewBuffer(data[len(multicodecPrefix):]) - dec := gocodec.NewDecoder(buf, msgpackHandle) - err = dec.Decode(&metric) - if err != nil { - logger.Error(err) - continue - } - // managed to decode an older version metric. Warn about it once. - if !decodeWarningPrinted { - logger.Warning("Peers in versions <= v0.13.3 detected. These peers will not receive metrics from this or other newer peers. Please upgrade them.") - decodeWarningPrinted = true - } - } else { - logger.Error(err) - continue - } - } - - debug("received", metric) - - err = mon.LogMetric(ctx, metric) - if err != nil { - logger.Error(err) - continue - } - } - } -} - -// SetClient saves the given rpc.Client for later use -func (mon *Monitor) SetClient(c *rpc.Client) { - mon.rpcClient = c - mon.rpcReady <- struct{}{} -} - -// Shutdown stops the peer monitor. It particular, it will -// not deliver any alerts. -func (mon *Monitor) Shutdown(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "monitor/pubsub/Shutdown") - defer span.End() - - mon.shutdownLock.Lock() - defer mon.shutdownLock.Unlock() - - if mon.shutdown { - logger.Warn("Monitor already shut down") - return nil - } - - logger.Info("stopping Monitor") - close(mon.rpcReady) - - mon.cancel() - - mon.wg.Wait() - mon.shutdown = true - return nil -} - -// LogMetric stores a metric so it can later be retrieved. -func (mon *Monitor) LogMetric(ctx context.Context, m api.Metric) error { - _, span := trace.StartSpan(ctx, "monitor/pubsub/LogMetric") - defer span.End() - - mon.metrics.Add(m) - debug("logged", m) - if !m.Discard() { // We received a valid metric so avoid alerting. - mon.checker.ResetAlerts(m.Peer, m.Name) - } - return nil -} - -// PublishMetric broadcasts a metric to all current cluster peers. -func (mon *Monitor) PublishMetric(ctx context.Context, m api.Metric) error { - ctx, span := trace.StartSpan(ctx, "monitor/pubsub/PublishMetric") - defer span.End() - - if m.Discard() { - logger.Warnf("discarding invalid metric: %+v", m) - return nil - } - - var b bytes.Buffer - - enc := gocodec.NewEncoder(&b, msgpackHandle) - err := enc.Encode(m) - if err != nil { - logger.Error(err) - return err - } - - debug("publish", m) - - err = mon.topic.Publish(ctx, b.Bytes()) - if err != nil { - logger.Error(err) - return err - } - - return nil -} - -// LatestMetrics returns last known VALID metrics of a given type. A metric -// is only valid if it has not expired and belongs to a current cluster peer. -func (mon *Monitor) LatestMetrics(ctx context.Context, name string) []api.Metric { - ctx, span := trace.StartSpan(ctx, "monitor/pubsub/LatestMetrics") - defer span.End() - - latest := mon.metrics.LatestValid(name) - - if mon.peers == nil { - return latest - } - - // Make sure we only return metrics in the current peerset if we have - // a peerset provider. - peers, err := mon.peers(ctx) - if err != nil { - return []api.Metric{} - } - - return metrics.PeersetFilter(latest, peers) -} - -// LatestForPeer returns the latest metric received for a peer (it may have -// expired). It returns nil if no metric exists. -func (mon *Monitor) LatestForPeer(ctx context.Context, name string, pid peer.ID) api.Metric { - return mon.metrics.PeerLatest(name, pid) -} - -// Alerts returns a channel on which alerts are sent when the -// monitor detects a failure. -func (mon *Monitor) Alerts() <-chan api.Alert { - return mon.checker.Alerts() -} - -// MetricNames lists all metric names. -func (mon *Monitor) MetricNames(ctx context.Context) []string { - _, span := trace.StartSpan(ctx, "monitor/pubsub/MetricNames") - defer span.End() - - return mon.metrics.MetricNames() -} - -func debug(event string, m api.Metric) { - logger.Debugf( - "%s metric: '%s' - '%s' - '%s' - '%s'", - event, - m.Peer, - m.Name, - m.Value, - time.Unix(0, m.Expire), - ) -} diff --git a/packages/networking/ipfs-cluster/monitor/pubsubmon/pubsubmon_test.go b/packages/networking/ipfs-cluster/monitor/pubsubmon/pubsubmon_test.go deleted file mode 100644 index 5dd4351..0000000 --- a/packages/networking/ipfs-cluster/monitor/pubsubmon/pubsubmon_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package pubsubmon - -import ( - "context" - "fmt" - - "strconv" - "sync" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" -) - -func init() { - // GossipSub needs to heartbeat to discover newly connected hosts - // This speeds things up a little. - pubsub.GossipSubHeartbeatInterval = 50 * time.Millisecond -} - -type metricFactory struct { - l sync.Mutex - counter int -} - -func newMetricFactory() *metricFactory { - return &metricFactory{ - counter: 0, - } -} - -func (mf *metricFactory) newMetric(n string, p peer.ID) api.Metric { - mf.l.Lock() - defer mf.l.Unlock() - m := api.Metric{ - Name: n, - Peer: p, - Value: fmt.Sprintf("%d", mf.counter), - Valid: true, - } - m.SetTTL(5 * time.Second) - mf.counter++ - return m -} - -func (mf *metricFactory) count() int { - mf.l.Lock() - defer mf.l.Unlock() - return mf.counter -} - -func peers(ctx context.Context) ([]peer.ID, error) { - return []peer.ID{test.PeerID1, test.PeerID2, test.PeerID3}, nil -} - -func testPeerMonitor(t *testing.T) (*Monitor, host.Host, func()) { - ctx := context.Background() - h, err := libp2p.New( - libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), - ) - if err != nil { - t.Fatal(err) - } - - psub, err := pubsub.NewGossipSub( - ctx, - h, - pubsub.WithMessageSigning(true), - pubsub.WithStrictSignatureVerification(true), - ) - if err != nil { - h.Close() - t.Fatal(err) - } - - mock := test.NewMockRPCClientWithHost(t, h) - cfg := &Config{} - cfg.Default() - cfg.CheckInterval = 2 * time.Second - mon, err := New(ctx, cfg, psub, peers) - if err != nil { - t.Fatal(err) - } - mon.SetClient(mock) - - shutdownF := func() { - mon.Shutdown(ctx) - h.Close() - } - - return mon, h, shutdownF -} - -func TestPeerMonitorShutdown(t *testing.T) { - ctx := context.Background() - pm, _, shutdown := testPeerMonitor(t) - defer shutdown() - - err := pm.Shutdown(ctx) - if err != nil { - t.Error(err) - } - - err = pm.Shutdown(ctx) - if err != nil { - t.Error(err) - } -} - -func TestLogMetricConcurrent(t *testing.T) { - ctx := context.Background() - pm, _, shutdown := testPeerMonitor(t) - defer shutdown() - - var wg sync.WaitGroup - wg.Add(3) - - // Insert 25 metrics - f := func() { - defer wg.Done() - for i := 0; i < 25; i++ { - mt := api.Metric{ - Name: "test", - Peer: test.PeerID1, - Value: fmt.Sprintf("%d", time.Now().UnixNano()), - Valid: true, - } - mt.SetTTL(150 * time.Millisecond) - pm.LogMetric(ctx, mt) - time.Sleep(75 * time.Millisecond) - } - } - go f() - go f() - go f() - - // Wait for at least two metrics to be inserted - time.Sleep(200 * time.Millisecond) - last := time.Now().Add(-500 * time.Millisecond) - - for i := 0; i <= 20; i++ { - lastMtrcs := pm.LatestMetrics(ctx, "test") - - // There should always 1 valid LatestMetric "test" - if len(lastMtrcs) != 1 { - t.Error("no valid metrics", len(lastMtrcs), i) - time.Sleep(75 * time.Millisecond) - continue - } - - n, err := strconv.Atoi(lastMtrcs[0].Value) - if err != nil { - t.Fatal(err) - } - - // The timestamp of the metric cannot be older than - // the timestamp from the last - current := time.Unix(0, int64(n)) - if current.Before(last) { - t.Errorf("expected newer metric: Current: %s, Last: %s", current, last) - } - last = current - time.Sleep(75 * time.Millisecond) - } - - wg.Wait() -} - -func TestPeerMonitorLogMetric(t *testing.T) { - ctx := context.Background() - pm, _, shutdown := testPeerMonitor(t) - defer shutdown() - mf := newMetricFactory() - - // dont fill window - pm.LogMetric(ctx, mf.newMetric("test", test.PeerID1)) - pm.LogMetric(ctx, mf.newMetric("test", test.PeerID2)) - pm.LogMetric(ctx, mf.newMetric("test", test.PeerID3)) - - // fill window - pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3)) - pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3)) - pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3)) - pm.LogMetric(ctx, mf.newMetric("test2", test.PeerID3)) - - latestMetrics := pm.LatestMetrics(ctx, "testbad") - if len(latestMetrics) != 0 { - t.Logf("%+v", latestMetrics) - t.Error("metrics should be empty") - } - - latestMetrics = pm.LatestMetrics(ctx, "test") - if len(latestMetrics) != 3 { - t.Error("metrics should correspond to 3 hosts") - } - - for _, v := range latestMetrics { - switch v.Peer { - case test.PeerID1: - if v.Value != "0" { - t.Error("bad metric value") - } - case test.PeerID2: - if v.Value != "1" { - t.Error("bad metric value") - } - case test.PeerID3: - if v.Value != "2" { - t.Error("bad metric value") - } - default: - t.Error("bad peer") - } - } - - latestMetrics = pm.LatestMetrics(ctx, "test2") - if len(latestMetrics) != 1 { - t.Fatal("should only be one metric") - } - if latestMetrics[0].Value != fmt.Sprintf("%d", mf.count()-1) { - t.Error("metric is not last") - } -} - -func TestPeerMonitorPublishMetric(t *testing.T) { - ctx := context.Background() - pm, host, shutdown := testPeerMonitor(t) - defer shutdown() - - pm2, host2, shutdown2 := testPeerMonitor(t) - defer shutdown2() - - time.Sleep(200 * time.Millisecond) - - err := host.Connect( - context.Background(), - peer.AddrInfo{ - ID: host2.ID(), - Addrs: host2.Addrs(), - }, - ) - if err != nil { - t.Fatal(err) - } - - time.Sleep(200 * time.Millisecond) - - mf := newMetricFactory() - - metric := mf.newMetric("test", test.PeerID1) - err = pm.PublishMetric(ctx, metric) - if err != nil { - t.Fatal(err) - } - - time.Sleep(500 * time.Millisecond) - - checkMetric := func(t *testing.T, pm *Monitor) { - latestMetrics := pm.LatestMetrics(ctx, "test") - if len(latestMetrics) != 1 { - t.Fatal(host.ID(), "expected 1 published metric") - } - t.Log(host.ID(), "received metric") - - receivedMetric := latestMetrics[0] - if receivedMetric.Peer != metric.Peer || - receivedMetric.Expire != metric.Expire || - receivedMetric.Value != metric.Value || - receivedMetric.Valid != metric.Valid || - receivedMetric.Name != metric.Name { - t.Fatal("it should be exactly the same metric we published") - } - } - - t.Log("pm1") - checkMetric(t, pm) - t.Log("pm2") - checkMetric(t, pm2) -} - -func TestPeerMonitorAlerts(t *testing.T) { - ctx := context.Background() - pm, _, shutdown := testPeerMonitor(t) - defer shutdown() - mf := newMetricFactory() - - mtr := mf.newMetric("test", test.PeerID1) - mtr.SetTTL(0) - pm.LogMetric(ctx, mtr) - time.Sleep(time.Second) - timeout := time.NewTimer(time.Second * 5) - - // it should alert once. - for i := 0; i < 1; i++ { - select { - case <-timeout.C: - t.Fatal("should have thrown an alert by now") - case alrt := <-pm.Alerts(): - if alrt.Name != "test" { - t.Error("Alert should be for test") - } - if alrt.Peer != test.PeerID1 { - t.Error("Peer should be TestPeerID1") - } - } - } -} diff --git a/packages/networking/ipfs-cluster/observations/config.go b/packages/networking/ipfs-cluster/observations/config.go deleted file mode 100644 index 66086ef..0000000 --- a/packages/networking/ipfs-cluster/observations/config.go +++ /dev/null @@ -1,269 +0,0 @@ -package observations - -import ( - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/kelseyhightower/envconfig" - - ma "github.com/multiformats/go-multiaddr" - - "github.com/ipfs-cluster/ipfs-cluster/config" -) - -const metricsConfigKey = "metrics" -const tracingConfigKey = "tracing" -const metricsEnvConfigKey = "cluster_metrics" -const tracingEnvConfigKey = "cluster_tracing" - -// Default values for this Config. -const ( - DefaultEnableStats = false - DefaultPrometheusEndpoint = "/ip4/127.0.0.1/tcp/8888" - DefaultReportingInterval = 2 * time.Second - - DefaultEnableTracing = false - DefaultJaegerAgentEndpoint = "/ip4/0.0.0.0/udp/6831" - DefaultSamplingProb = 0.3 - DefaultServiceName = "cluster-daemon" -) - -// MetricsConfig configures metrics collection. -type MetricsConfig struct { - config.Saver - - EnableStats bool - PrometheusEndpoint ma.Multiaddr - ReportingInterval time.Duration -} - -type jsonMetricsConfig struct { - EnableStats bool `json:"enable_stats"` - PrometheusEndpoint string `json:"prometheus_endpoint"` - ReportingInterval string `json:"reporting_interval"` -} - -// ConfigKey provides a human-friendly identifier for this type of Config. -func (cfg *MetricsConfig) ConfigKey() string { - return metricsConfigKey -} - -// Default sets the fields of this Config to sensible values. -func (cfg *MetricsConfig) Default() error { - cfg.EnableStats = DefaultEnableStats - endpointAddr, _ := ma.NewMultiaddr(DefaultPrometheusEndpoint) - cfg.PrometheusEndpoint = endpointAddr - cfg.ReportingInterval = DefaultReportingInterval - - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *MetricsConfig) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(metricsEnvConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *MetricsConfig) Validate() error { - if cfg.EnableStats { - if cfg.PrometheusEndpoint == nil { - return errors.New("metrics.prometheus_endpoint is undefined") - } - if cfg.ReportingInterval < 0 { - return errors.New("metrics.reporting_interval is invalid") - } - } - return nil -} - -// LoadJSON sets the fields of this Config to the values defined by the JSON -// representation of it, as generated by ToJSON. -func (cfg *MetricsConfig) LoadJSON(raw []byte) error { - jcfg := &jsonMetricsConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling observations config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *MetricsConfig) applyJSONConfig(jcfg *jsonMetricsConfig) error { - err := cfg.loadMetricsOptions(jcfg) - if err != nil { - return err - } - - return cfg.Validate() -} - -func (cfg *MetricsConfig) loadMetricsOptions(jcfg *jsonMetricsConfig) error { - cfg.EnableStats = jcfg.EnableStats - endpointAddr, err := ma.NewMultiaddr(jcfg.PrometheusEndpoint) - if err != nil { - return fmt.Errorf("loadMetricsOptions: PrometheusEndpoint multiaddr: %v", err) - } - cfg.PrometheusEndpoint = endpointAddr - - return config.ParseDurations( - metricsConfigKey, - &config.DurationOpt{ - Duration: jcfg.ReportingInterval, - Dst: &cfg.ReportingInterval, - Name: "metrics.reporting_interval", - }, - ) -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *MetricsConfig) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *MetricsConfig) toJSONConfig() *jsonMetricsConfig { - return &jsonMetricsConfig{ - EnableStats: cfg.EnableStats, - PrometheusEndpoint: cfg.PrometheusEndpoint.String(), - ReportingInterval: cfg.ReportingInterval.String(), - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *MetricsConfig) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} - -// TracingConfig configures tracing. -type TracingConfig struct { - config.Saver - - EnableTracing bool - JaegerAgentEndpoint ma.Multiaddr - SamplingProb float64 - ServiceName string - ClusterID string - ClusterPeername string -} - -type jsonTracingConfig struct { - EnableTracing bool `json:"enable_tracing"` - JaegerAgentEndpoint string `json:"jaeger_agent_endpoint"` - SamplingProb float64 `json:"sampling_prob"` - ServiceName string `json:"service_name"` -} - -// ConfigKey provides a human-friendly identifier for this type of Config. -func (cfg *TracingConfig) ConfigKey() string { - return tracingConfigKey -} - -// Default sets the fields of this Config to sensible values. -func (cfg *TracingConfig) Default() error { - cfg.EnableTracing = DefaultEnableTracing - agentAddr, _ := ma.NewMultiaddr(DefaultJaegerAgentEndpoint) - cfg.JaegerAgentEndpoint = agentAddr - cfg.SamplingProb = DefaultSamplingProb - cfg.ServiceName = DefaultServiceName - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *TracingConfig) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(tracingEnvConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *TracingConfig) Validate() error { - if cfg.EnableTracing { - if cfg.JaegerAgentEndpoint == nil { - return errors.New("tracing.jaeger_agent_endpoint is undefined") - } - if cfg.SamplingProb < 0 { - return errors.New("tracing.sampling_prob is invalid") - } - } - return nil -} - -// LoadJSON sets the fields of this Config to the values defined by the JSON -// representation of it, as generated by ToJSON. -func (cfg *TracingConfig) LoadJSON(raw []byte) error { - jcfg := &jsonTracingConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling observations config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *TracingConfig) applyJSONConfig(jcfg *jsonTracingConfig) error { - err := cfg.loadTracingOptions(jcfg) - if err != nil { - return err - } - - return cfg.Validate() -} - -func (cfg *TracingConfig) loadTracingOptions(jcfg *jsonTracingConfig) error { - cfg.EnableTracing = jcfg.EnableTracing - agentAddr, err := ma.NewMultiaddr(jcfg.JaegerAgentEndpoint) - if err != nil { - return fmt.Errorf("loadTracingOptions: JaegerAgentEndpoint multiaddr: %v", err) - } - cfg.JaegerAgentEndpoint = agentAddr - cfg.SamplingProb = jcfg.SamplingProb - cfg.ServiceName = jcfg.ServiceName - - return nil -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *TracingConfig) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *TracingConfig) toJSONConfig() *jsonTracingConfig { - return &jsonTracingConfig{ - EnableTracing: cfg.EnableTracing, - JaegerAgentEndpoint: cfg.JaegerAgentEndpoint.String(), - SamplingProb: cfg.SamplingProb, - ServiceName: cfg.ServiceName, - } -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *TracingConfig) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/observations/config_test.go b/packages/networking/ipfs-cluster/observations/config_test.go deleted file mode 100644 index 2dd3b5e..0000000 --- a/packages/networking/ipfs-cluster/observations/config_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package observations - -import ( - "os" - "testing" -) - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_METRICS_ENABLESTATS", "true") - mcfg := &MetricsConfig{} - mcfg.Default() - mcfg.ApplyEnvVars() - - if !mcfg.EnableStats { - t.Fatal("failed to override enable_stats with env var") - } - - os.Setenv("CLUSTER_TRACING_ENABLETRACING", "true") - tcfg := &TracingConfig{} - tcfg.Default() - tcfg.ApplyEnvVars() - - if !tcfg.EnableTracing { - t.Fatal("failed to override enable_tracing with env var") - } -} diff --git a/packages/networking/ipfs-cluster/observations/metrics.go b/packages/networking/ipfs-cluster/observations/metrics.go deleted file mode 100644 index adf7d39..0000000 --- a/packages/networking/ipfs-cluster/observations/metrics.go +++ /dev/null @@ -1,146 +0,0 @@ -// Package observations sets up metric and trace exporting for IPFS cluster. -package observations - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - logging "github.com/ipfs/go-log/v2" -) - -var logger = logging.Logger("observations") - -var ( -// taken from ocgrpc (https://github.com/census-instrumentation/opencensus-go/blob/master/plugin/ocgrpc/stats_common.go) -// latencyDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) -// bytesDistribution = view.Distribution(0, 24, 32, 64, 128, 256, 512, 1024, 2048, 4096, 16384, 65536, 262144, 1048576) -// messageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) -) - -// attributes -var ( - ClientIPAttribute = "http.client.ip" -) - -// keys -var ( - HostKey = makeKey("host") - RemotePeerKey = makeKey("remote_peer") -) - -// metrics -var ( - // This metric is managed in state/dsstate. - Pins = stats.Int64("pins", "Total number of cluster pins", stats.UnitDimensionless) - - // These metrics are managed by the pintracker/optracker module. - PinsQueued = stats.Int64("pins/pin_queued", "Current number of pins queued for pinning", stats.UnitDimensionless) - PinsPinning = stats.Int64("pins/pinning", "Current number of pins currently pinning", stats.UnitDimensionless) - PinsPinError = stats.Int64("pins/pin_error", "Current number of pins in pin_error state", stats.UnitDimensionless) - - // These metrics and managed in the ipfshttp module. - PinsIpfsPins = stats.Int64("pins/ipfs_pins", "Current number of items pinned on IPFS", stats.UnitDimensionless) - PinsPinAdd = stats.Int64("pins/pin_add", "Total number of IPFS pin requests", stats.UnitDimensionless) - PinsPinAddError = stats.Int64("pins/pin_add_errors", "Total number of failed pin requests", stats.UnitDimensionless) - BlocksPut = stats.Int64("blocks/put", "Total number of blocks/put requests", stats.UnitDimensionless) - BlocksAddedSize = stats.Int64("blocks/added_size", "Total size of blocks added in bytes", stats.UnitBytes) - - BlocksAdded = stats.Int64("blocks/added", "Total number of blocks added", stats.UnitDimensionless) - BlocksAddedError = stats.Int64("blocks/put_errors", "Total number of block/put errors", stats.UnitDimensionless) - - InformerDisk = stats.Int64("informer/disk", "The metric value weight issued by disk informer", stats.UnitDimensionless) -) - -// views, which is just the aggregation of the metrics -var ( - PinsView = &view.View{ - Measure: Pins, - // This would add a tag to the metric if a value for this key - // is present in the context when recording the observation. - - //TagKeys: []tag.Key{HostKey}, - Aggregation: view.LastValue(), - } - - PinsQueuedView = &view.View{ - Measure: PinsQueued, - //TagKeys: []tag.Key{HostKey}, - Aggregation: view.LastValue(), - } - - PinsPinningView = &view.View{ - Measure: PinsPinning, - //TagKeys: []tag.Key{HostKey}, - Aggregation: view.LastValue(), - } - - PinsPinErrorView = &view.View{ - Measure: PinsPinError, - //TagKeys: []tag.Key{HostKey}, - Aggregation: view.LastValue(), - } - - PinsIpfsPinsView = &view.View{ - Measure: PinsIpfsPins, - Aggregation: view.LastValue(), - } - - PinsPinAddView = &view.View{ - Measure: PinsPinAdd, - Aggregation: view.Sum(), - } - - PinsPinAddErrorView = &view.View{ - Measure: PinsPinAddError, - Aggregation: view.Sum(), - } - - BlocksPutView = &view.View{ - Measure: BlocksPut, - Aggregation: view.Sum(), - } - - BlocksAddedSizeView = &view.View{ - Measure: BlocksAddedSize, - Aggregation: view.Sum(), - } - - BlocksAddedView = &view.View{ - Measure: BlocksAdded, - Aggregation: view.Sum(), - } - - BlocksAddedErrorView = &view.View{ - Measure: BlocksAddedError, - Aggregation: view.Sum(), - } - - InformerDiskView = &view.View{ - Measure: InformerDisk, - Aggregation: view.LastValue(), - } - - DefaultViews = []*view.View{ - PinsView, - PinsQueuedView, - PinsPinningView, - PinsPinErrorView, - PinsIpfsPinsView, - PinsPinAddView, - PinsPinAddErrorView, - BlocksPutView, - BlocksAddedSizeView, - BlocksAddedView, - BlocksAddedErrorView, - InformerDiskView, - } -) - -func makeKey(name string) tag.Key { - key, err := tag.NewKey(name) - if err != nil { - logger.Fatal(err) - } - return key -} diff --git a/packages/networking/ipfs-cluster/observations/setup.go b/packages/networking/ipfs-cluster/observations/setup.go deleted file mode 100644 index 07b98b2..0000000 --- a/packages/networking/ipfs-cluster/observations/setup.go +++ /dev/null @@ -1,159 +0,0 @@ -package observations - -import ( - "context" - "expvar" - "net/http" - "net/http/pprof" - - rpc "github.com/libp2p/go-libp2p-gorpc" - manet "github.com/multiformats/go-multiaddr/net" - - "contrib.go.opencensus.io/exporter/jaeger" - "contrib.go.opencensus.io/exporter/prometheus" - ocgorpc "github.com/lanzafame/go-libp2p-ocgorpc" - prom "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/collectors" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - "go.opencensus.io/zpages" -) - -// SetupMetrics configures and starts stats tooling, -// if enabled. -func SetupMetrics(cfg *MetricsConfig) error { - if cfg.EnableStats { - logger.Infof("stats collection enabled on %s", cfg.PrometheusEndpoint) - return setupMetrics(cfg) - } - return nil -} - -// JaegerTracer implements ipfscluster.Tracer. -type JaegerTracer struct { - jaeger *jaeger.Exporter -} - -// SetClient no-op. -func (t *JaegerTracer) SetClient(*rpc.Client) {} - -// Shutdown the tracer and flush any remaining traces. -func (t *JaegerTracer) Shutdown(context.Context) error { - // nil check for testing, where tracer may not be configured - if t != (*JaegerTracer)(nil) && t.jaeger != nil { - t.jaeger.Flush() - } - return nil -} - -// SetupTracing configures and starts tracing tooling, -// if enabled. -func SetupTracing(cfg *TracingConfig) (*JaegerTracer, error) { - if !cfg.EnableTracing { - return nil, nil - } - logger.Info("tracing enabled...") - je, err := setupTracing(cfg) - if err != nil { - return nil, err - } - return &JaegerTracer{je}, nil -} - -func setupMetrics(cfg *MetricsConfig) error { - // setup Prometheus - registry := prom.NewRegistry() - goCollector := collectors.NewGoCollector() - procCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) - registry.MustRegister(goCollector, procCollector) - pe, err := prometheus.NewExporter(prometheus.Options{ - Namespace: "ipfscluster", - Registry: registry, - }) - if err != nil { - return err - } - - // register prometheus with opencensus - view.RegisterExporter(pe) - view.SetReportingPeriod(cfg.ReportingInterval) - - // register the metrics views of interest - if err := view.Register(DefaultViews...); err != nil { - return err - } - if err := view.Register( - ochttp.ClientCompletedCount, - ochttp.ClientRoundtripLatencyDistribution, - ochttp.ClientReceivedBytesDistribution, - ochttp.ClientSentBytesDistribution, - ); err != nil { - return err - } - if err := view.Register( - ochttp.ServerRequestCountView, - ochttp.ServerRequestBytesView, - ochttp.ServerResponseBytesView, - ochttp.ServerLatencyView, - ochttp.ServerRequestCountByMethod, - ochttp.ServerResponseCountByStatusCode, - ); err != nil { - return err - } - if err := view.Register(ocgorpc.DefaultServerViews...); err != nil { - return err - } - - _, promAddr, err := manet.DialArgs(cfg.PrometheusEndpoint) - if err != nil { - return err - } - go func() { - mux := http.NewServeMux() - zpages.Handle(mux, "/debug") - mux.Handle("/metrics", pe) - mux.Handle("/debug/vars", expvar.Handler()) - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - mux.Handle("/debug/pprof/block", pprof.Handler("block")) - mux.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) - mux.Handle("/debug/pprof/heap", pprof.Handler("heap")) - mux.Handle("/debug/pprof/mutex", pprof.Handler("mutex")) - mux.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) - if err := http.ListenAndServe(promAddr, mux); err != nil { - logger.Fatalf("Failed to run Prometheus /metrics endpoint: %v", err) - } - }() - return nil -} - -// setupTracing configures a OpenCensus Tracing exporter for Jaeger. -func setupTracing(cfg *TracingConfig) (*jaeger.Exporter, error) { - _, agentAddr, err := manet.DialArgs(cfg.JaegerAgentEndpoint) - if err != nil { - return nil, err - } - // setup Jaeger - je, err := jaeger.NewExporter(jaeger.Options{ - AgentEndpoint: agentAddr, - Process: jaeger.Process{ - ServiceName: cfg.ServiceName + "-" + cfg.ClusterPeername, - Tags: []jaeger.Tag{ - jaeger.StringTag("cluster_id", cfg.ClusterID), - }, - }, - }) - if err != nil { - return nil, err - } - - // register jaeger with opencensus - trace.RegisterExporter(je) - // configure tracing - trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(cfg.SamplingProb)}) - return je, nil -} diff --git a/packages/networking/ipfs-cluster/peer_manager_test.go b/packages/networking/ipfs-cluster/peer_manager_test.go deleted file mode 100644 index 237a947..0000000 --- a/packages/networking/ipfs-cluster/peer_manager_test.go +++ /dev/null @@ -1,729 +0,0 @@ -package ipfscluster - -import ( - "context" - "fmt" - "math/rand" - "sync" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/config" - "github.com/ipfs-cluster/ipfs-cluster/test" - - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -func peers(ctx context.Context, t *testing.T, c *Cluster) []api.ID { - t.Helper() - out := make(chan api.ID) - go func() { - c.Peers(ctx, out) - }() - var ids []api.ID - for id := range out { - ids = append(ids, id) - } - return ids -} - -func peerManagerClusters(t *testing.T) ([]*Cluster, []*test.IpfsMock, host.Host) { - cls := make([]*Cluster, nClusters) - mocks := make([]*test.IpfsMock, nClusters) - var wg sync.WaitGroup - for i := 0; i < nClusters; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - cl, m := createOnePeerCluster(t, i, testingClusterSecret) - cls[i] = cl - mocks[i] = m - }(i) - } - wg.Wait() - - // Creat an identity - ident, err := config.NewIdentity() - if err != nil { - t.Fatal(err) - } - // Create a config - cfg := &Config{} - cfg.Default() - listen, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0") - cfg.ListenAddr = []ma.Multiaddr{listen} - cfg.Secret = testingClusterSecret - - h, _, _ := createHost(t, ident.PrivateKey, testingClusterSecret, cfg.ListenAddr) - - // Connect host to all peers. This will allow that they can discover - // each others via DHT. - for i := 0; i < nClusters; i++ { - err := h.Connect( - context.Background(), - peer.AddrInfo{ - ID: cls[i].host.ID(), - Addrs: cls[i].host.Addrs(), - }, - ) - if err != nil { - t.Fatal(err) - } - } - return cls, mocks, h -} - -func clusterAddr(c *Cluster) ma.Multiaddr { - for _, a := range c.host.Addrs() { - if _, err := a.ValueForProtocol(ma.P_IP4); err == nil { - p := c.id.String() - cAddr, _ := ma.NewMultiaddr(fmt.Sprintf("%s/p2p/%s", a, p)) - return cAddr - } - } - return nil -} - -func TestClustersPeerAdd(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer shutdownClusters(t, clusters, mocks) - defer boot.Close() - - if len(clusters) < 2 { - t.Skip("need at least 2 nodes for this test") - } - - for i := 1; i < len(clusters); i++ { - id, err := clusters[0].PeerAdd(ctx, clusters[i].id) - if err != nil { - t.Fatal(err) - } - if !containsPeer(id.ClusterPeers, clusters[0].id) { - // ClusterPeers is originally empty and contains nodes as we add them - t.Log(i, id.ClusterPeers) - t.Fatal("cluster peers should be up to date with the cluster") - } - } - - h := test.Cid1 - _, err := clusters[1].Pin(ctx, h, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - ttlDelay() - - f := func(t *testing.T, c *Cluster) { - ids := peers(ctx, t, c) - - // check they are tracked by the peer manager - if len(ids) != nClusters { - //t.Log(ids) - t.Error("added clusters are not part of clusters") - } - - // Check that they are part of the consensus - pins, err := c.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 { - t.Log(pins) - t.Error("expected 1 pin everywhere") - } - - if len(c.ID(ctx).ClusterPeers) != nClusters { - t.Log(c.ID(ctx).ClusterPeers) - t.Error("By now cluster peers should reflect all peers") - } - } - runF(t, clusters, f) - - for _, c := range clusters { - c.Shutdown(ctx) - } - - f2 := func(t *testing.T, c *Cluster) { - // check that all peers are part of the peerstore - // (except ourselves) - addrs := c.peerManager.LoadPeerstore() - peerMap := make(map[peer.ID]struct{}) - for _, a := range addrs { - pinfo, err := peer.AddrInfoFromP2pAddr(a) - if err != nil { - t.Fatal(err) - } - peerMap[pinfo.ID] = struct{}{} - } - - if len(peerMap) == 0 { - t.Errorf("%s: peerstore to store at least 1 peer", c.id) - } - - } - runF(t, clusters, f2) -} - -func TestClustersJoinBadPeer(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer shutdownClusters(t, clusters[0:1], mocks[0:1]) - defer boot.Close() - - addr := clusterAddr(clusters[1]) - - if len(clusters) < 2 { - t.Skip("need at least 2 nodes for this test") - } - - for _, c := range clusters[1:] { - c.Shutdown(ctx) - } - - // We add a cluster that has been shutdown - // (closed transports) - // Let the OS actually close the ports. - // Sometimes we hang otherwise. - delay() - err := clusters[0].Join(ctx, addr) - if err == nil { - t.Error("expected an error") - } - ids := peers(ctx, t, clusters[0]) - if len(ids) != 1 { - t.Error("cluster should have only one member") - } -} - -func TestClustersPeerAddInUnhealthyCluster(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer shutdownClusters(t, clusters, mocks) - defer boot.Close() - - if len(clusters) < 3 { - t.Skip("need at least 3 nodes for this test") - } - - clusters[0].PeerAdd(ctx, clusters[1].id) - ttlDelay() - ids := peers(ctx, t, clusters[1]) - // raft will have only 2 peers - // crdt will have all peers autodiscovered by now - if len(ids) < 2 { - t.Error("expected at least 2 peers") - } - - // Now we shutdown the one member of the running cluster - // and try to add someone else. - err := clusters[1].Shutdown(ctx) - if err != nil { - t.Error("Shutdown should be clean: ", err) - } - switch consensus { - case "raft": - delay() // This makes sure the leader realizes that it's not - // leader anymore. Otherwise it commits fine. - - _, err = clusters[0].PeerAdd(ctx, clusters[2].id) - - if err == nil { - t.Error("expected an error") - } - - ids = peers(ctx, t, clusters[0]) - if len(ids) != 2 { - t.Error("cluster should still have 2 peers") - } - case "crdt": - // crdt does not really care whether we add or remove - - delay() // let metrics expire - _, err = clusters[0].PeerAdd(ctx, clusters[2].id) - - if err != nil { - t.Error(err) - } - - ttlDelay() - ids = peers(ctx, t, clusters[0]) - if len(ids) < 2 { - t.Error("cluster should have at least 2 peers after removing and adding 1") - } - default: - t.Fatal("bad consensus") - } -} - -func TestClustersPeerRemove(t *testing.T) { - ctx := context.Background() - clusters, mocks := createClusters(t) - defer shutdownClusters(t, clusters, mocks) - - if len(clusters) < 2 { - t.Skip("test needs at least 2 clusters") - } - - switch consensus { - case "crdt": - // Peer Rm is a no op. - return - case "raft": - p := clusters[1].ID(ctx).ID - err := clusters[0].PeerRemove(ctx, p) - if err != nil { - t.Error(err) - } - - delay() - - f := func(t *testing.T, c *Cluster) { - if c.ID(ctx).ID == p { //This is the removed cluster - _, ok := <-c.Done() - if ok { - t.Error("removed peer should have exited") - } - } else { - ids := peers(ctx, t, c) - if len(ids) != nClusters-1 { - t.Error("should have removed 1 peer") - } - } - } - - runF(t, clusters, f) - default: - t.Fatal("bad consensus") - } -} - -func TestClustersPeerRemoveSelf(t *testing.T) { - ctx := context.Background() - // this test hangs sometimes if there are problems - clusters, mocks := createClusters(t) - defer shutdownClusters(t, clusters, mocks) - - switch consensus { - case "crdt": - // remove is a no op in CRDTs - return - - case "raft": - for i := 0; i < len(clusters); i++ { - waitForLeaderAndMetrics(t, clusters) - peers := peers(ctx, t, clusters[i]) - t.Logf("Current cluster size: %d", len(peers)) - if len(peers) != (len(clusters) - i) { - t.Fatal("Previous peers not removed correctly") - } - err := clusters[i].PeerRemove(ctx, clusters[i].ID(ctx).ID) - // Last peer member won't be able to remove itself - // In this case, we shut it down. - if err != nil { - if i != len(clusters)-1 { //not last - t.Error(err) - } else { - err := clusters[i].Shutdown(ctx) - if err != nil { - t.Fatal(err) - } - } - } - // potential hanging place - _, more := <-clusters[i].Done() - if more { - t.Error("should be done") - } - } - default: - t.Fatal("bad consensus") - } -} - -func TestClustersPeerRemoveLeader(t *testing.T) { - ctx := context.Background() - // this test is like the one above, except it always - // removes the current leader. - // this test hangs sometimes if there are problems - clusters, mocks := createClusters(t) - defer shutdownClusters(t, clusters, mocks) - - switch consensus { - case "crdt": - return - case "raft": - - findLeader := func(t *testing.T) *Cluster { - var l peer.ID - for _, c := range clusters { - if !c.shutdownB { - waitForLeaderAndMetrics(t, clusters) - l, _ = c.consensus.Leader(ctx) - } - } - for _, c := range clusters { - if c.id == l { - return c - } - } - t.Fatal("no leader found") - return nil - } - - for i := 0; i < len(clusters); i++ { - leader := findLeader(t) - peers := peers(ctx, t, leader) - t.Logf("Current cluster size: %d", len(peers)) - if len(peers) != (len(clusters) - i) { - t.Fatal("Previous peers not removed correctly") - } - err := leader.PeerRemove(ctx, leader.id) - // Last peer member won't be able to remove itself - // In this case, we shut it down. - if err != nil { - if i != len(clusters)-1 { //not last - t.Error(err) - } else { - err := leader.Shutdown(ctx) - if err != nil { - t.Fatal(err) - } - } - } - _, more := <-leader.Done() - if more { - t.Error("should be done") - } - time.Sleep(time.Second / 2) - } - default: - t.Fatal("bad consensus") - } -} - -func TestClustersPeerRemoveReallocsPins(t *testing.T) { - // This test is testing that the peers are vacated upon - // removal. - - ctx := context.Background() - clusters, mocks := createClusters(t) - - if len(clusters) < 3 { - t.Skip("test needs at least 3 clusters") - } - - // Adjust the replication factor for re-allocation - for _, c := range clusters { - c.config.ReplicationFactorMin = nClusters - 1 - c.config.ReplicationFactorMax = nClusters - 1 - } - - // We choose to remove the leader, to make things even more interesting - chosenID, err := clusters[0].consensus.Leader(ctx) - if err != nil { - // choose a random peer - crdt - i := rand.Intn(nClusters) - chosenID = clusters[i].host.ID() - } - - var chosen *Cluster - var chosenIndex int - for i, cl := range clusters { - if id := cl.ID(ctx).ID; id == chosenID { - chosen = cl - chosenIndex = i - break - } - } - if chosen == nil { - shutdownClusters(t, clusters, mocks) - t.Fatal("did not get to choose a peer?") - } - - chosenMock := mocks[chosenIndex] - - // Remove the chosen peer from set - clusters = append(clusters[:chosenIndex], clusters[chosenIndex+1:]...) - mocks = append(mocks[:chosenIndex], mocks[chosenIndex+1:]...) - defer chosen.Shutdown(ctx) - defer chosenMock.Close() - defer shutdownClusters(t, clusters, mocks) - - prefix := test.Cid1.Prefix() - - // Pin nCluster random pins. This ensures each peer will - // pin the same number of Cids. - for i := 0; i < nClusters; i++ { - h, err := prefix.Sum(randomBytes()) - if err != nil { - t.Fatal(err) - } - _, err = chosen.Pin(ctx, api.NewCid(h), api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - ttlDelay() - } - - pinDelay() - - // At this point, all peers must have nClusters -1 pins - // associated to them. - // Find out which pins are associated to the chosen peer. - interestingCids := []api.Cid{} - - pins, err := chosen.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != nClusters { - t.Fatal("expected number of tracked pins to be nClusters") - } - for _, p := range pins { - if containsPeer(p.Allocations, chosenID) { - //t.Logf("%s pins %s", chosenID, p.Cid) - interestingCids = append(interestingCids, p.Cid) - } - } - - if len(interestingCids) != nClusters-1 { - t.Fatalf("Expected %d allocated CIDs but got %d", nClusters-1, - len(interestingCids)) - } - - // Now the chosen removes itself. Ignoring errors as they will - // be caught below and crdt does error here. - chosen.PeerRemove(ctx, chosenID) - - delay() - waitForLeaderAndMetrics(t, clusters) - delay() // this seems to fail when not waiting enough... - - for _, icid := range interestingCids { - // Now check that the allocations are new. - newPin, err := clusters[1].PinGet(ctx, icid) - if err != nil { - t.Fatal("error getting the new allocations for", icid) - } - if containsPeer(newPin.Allocations, chosenID) { - t.Fatal("pin should not be allocated to the removed peer") - } - } -} - -func TestClustersPeerJoin(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer shutdownClusters(t, clusters, mocks) - defer boot.Close() - - if len(clusters) < 3 { - t.Skip("test needs at least 3 clusters") - } - - for i := 1; i < len(clusters); i++ { - err := clusters[i].Join(ctx, clusterAddr(clusters[0])) - if err != nil { - t.Fatal(err) - } - } - - h := test.Cid1 - clusters[0].Pin(ctx, h, api.PinOptions{}) - ttlDelay() - - for _, p := range clusters { - t.Log(p.id.String()) - } - - f := func(t *testing.T, c *Cluster) { - peers := peers(ctx, t, c) - str := c.id.String() + "\n" - for _, p := range peers { - str += " - " + p.ID.String() + "\n" - } - t.Log(str) - if len(peers) != nClusters { - t.Error("all peers should be connected") - } - pins, err := c.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 || !pins[0].Cid.Equals(h) { - t.Error("all peers should have pinned the cid") - } - } - runF(t, clusters, f) -} - -func TestClustersPeerJoinAllAtOnce(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer shutdownClusters(t, clusters, mocks) - defer boot.Close() - - if len(clusters) < 2 { - t.Skip("test needs at least 2 clusters") - } - - f := func(t *testing.T, c *Cluster) { - err := c.Join(ctx, clusterAddr(clusters[0])) - if err != nil { - t.Fatal(err) - } - } - runF(t, clusters[1:], f) - - h := test.Cid1 - clusters[0].Pin(ctx, h, api.PinOptions{}) - ttlDelay() - - f2 := func(t *testing.T, c *Cluster) { - peers := peers(ctx, t, c) - if len(peers) != nClusters { - t.Error("all peers should be connected") - } - pins, err := c.pinsSlice(ctx) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 || !pins[0].Cid.Equals(h) { - t.Error("all peers should have pinned the cid") - } - } - runF(t, clusters, f2) -} - -// This test fails a lot when re-use port is not available (MacOS, Windows) -// func TestClustersPeerJoinAllAtOnceWithRandomBootstrap(t *testing.T) { -// clusters, mocks,boot := peerManagerClusters(t) -// defer shutdownClusters(t, clusters, mocks) -// defer boot.Close() -// if len(clusters) < 3 { -// t.Skip("test needs at least 3 clusters") -// } - -// delay() - -// // We have a 2 node cluster and the rest of nodes join -// // one of the two seeds randomly - -// err := clusters[1].Join(clusterAddr(clusters[0])) -// if err != nil { -// t.Fatal(err) -// } - -// f := func(t *testing.T, c *Cluster) { -// j := rand.Intn(2) -// err := c.Join(clusterAddr(clusters[j])) -// if err != nil { -// t.Fatal(err) -// } -// } -// runF(t, clusters[2:], f) - -// hash := test.Cid1 -// clusters[0].Pin(api.PinCid(hash)) -// delay() - -// f2 := func(t *testing.T, c *Cluster) { -// peers := c.Peers() -// if len(peers) != nClusters { -// peersIds := []peer.ID{} -// for _, p := range peers { -// peersIds = append(peersIds, p.ID) -// } -// t.Errorf("%s sees %d peers: %s", c.id, len(peers), peersIds) -// } -// pins := c.Pins() -// if len(pins) != 1 || !pins[0].Cid.Equals(hash) { -// t.Error("all peers should have pinned the cid") -// } -// } -// runF(t, clusters, f2) -// } - -// Tests that a peer catches up on the state correctly after rejoining -func TestClustersPeerRejoin(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer shutdownClusters(t, clusters, mocks) - defer boot.Close() - - // pin something in c0 - pin1 := test.Cid1 - _, err := clusters[0].Pin(ctx, pin1, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - // add all clusters - for i := 1; i < len(clusters); i++ { - err := clusters[i].Join(ctx, clusterAddr(clusters[0])) - if err != nil { - t.Fatal(err) - } - } - - delay() - - // all added peers should have the content - for i := 1; i < len(clusters); i++ { - pinfo := clusters[i].tracker.Status(ctx, pin1) - if pinfo.Status != api.TrackerStatusPinned { - t.Error("Added peers should pin the content") - } - } - - clusters[0].config.LeaveOnShutdown = true - err = clusters[0].Shutdown(ctx) - if err != nil { - t.Fatal(err) - } - mocks[0].Close() - - delay() - - // Forget peer so we can re-add one in same address/port - f := func(t *testing.T, c *Cluster) { - c.peerManager.RmPeer(clusters[0].id) // errors ignore for crdts - } - runF(t, clusters[1:], f) - - // Pin something on the rest - pin2 := test.Cid2 - _, err = clusters[1].Pin(ctx, pin2, api.PinOptions{}) - if err != nil { - t.Fatal(err) - } - - pinDelay() - - // Rejoin c0 - c0, m0 := createOnePeerCluster(t, 0, testingClusterSecret) - clusters[0] = c0 - mocks[0] = m0 - - delay() - - err = c0.Join(ctx, clusterAddr(clusters[1])) - if err != nil { - t.Fatal(err) - } - - delay() - - pinfo := clusters[0].tracker.Status(ctx, pin2) - if pinfo.Status != api.TrackerStatusPinned { - t.Error("re-joined cluster should have caught up") - } - - pinfo = clusters[0].tracker.Status(ctx, pin1) - if pinfo.Status != api.TrackerStatusPinned { - t.Error("re-joined cluster should have original pin") - } -} diff --git a/packages/networking/ipfs-cluster/pintracker/optracker/operation.go b/packages/networking/ipfs-cluster/pintracker/optracker/operation.go deleted file mode 100644 index 748c18f..0000000 --- a/packages/networking/ipfs-cluster/pintracker/optracker/operation.go +++ /dev/null @@ -1,321 +0,0 @@ -package optracker - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "go.opencensus.io/trace" -) - -//go:generate stringer -type=OperationType - -// OperationType represents the kinds of operations that the PinTracker -// performs and the operationTracker tracks the status of. -type OperationType int - -const ( - // OperationUnknown represents an unknown operation. - OperationUnknown OperationType = iota - // OperationPin represents a pin operation. - OperationPin - // OperationUnpin represents an unpin operation. - OperationUnpin - // OperationRemote represents an noop operation - OperationRemote - // OperationShard represents a meta pin. We don't - // pin these. - OperationShard -) - -//go:generate stringer -type=Phase - -// Phase represents the multiple phase that an operation can be in. -type Phase int - -const ( - // PhaseError represents an error state. - PhaseError Phase = iota - // PhaseQueued represents the queued phase of an operation. - PhaseQueued - // PhaseInProgress represents the operation as in progress. - PhaseInProgress - // PhaseDone represents the operation once finished. - PhaseDone -) - -// Operation represents an ongoing operation involving a -// particular Cid. It provides the type and phase of operation -// and a way to mark the operation finished (also used to cancel). -type Operation struct { - ctx context.Context - cancel func() - - tracker *OperationTracker - - // RO fields - opType OperationType - pin api.Pin - - // RW fields - mu sync.RWMutex - phase Phase - attemptCount int - priority bool - error string - ts time.Time -} - -// newOperation creates a new Operation. -func newOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase, tracker *OperationTracker) *Operation { - ctx, span := trace.StartSpan(ctx, "optracker/NewOperation") - defer span.End() - - ctx, cancel := context.WithCancel(ctx) - op := &Operation{ - ctx: ctx, - cancel: cancel, - - tracker: tracker, - - pin: pin, - opType: typ, - phase: ph, - attemptCount: 0, - priority: false, - ts: time.Now(), - error: "", - } - return op -} - -// String returns a string representation of an Operation. -func (op *Operation) String() string { - var b strings.Builder - - fmt.Fprintf(&b, "type: %s\n", op.Type().String()) - fmt.Fprint(&b, "pin:\n") - pinstr := op.Pin().String() - pinstrs := strings.Split(pinstr, "\n") - for _, s := range pinstrs { - fmt.Fprintf(&b, "\t%s\n", s) - } - fmt.Fprintf(&b, "phase: %s\n", op.Phase().String()) - fmt.Fprintf(&b, "attemptCount: %d\n", op.AttemptCount()) - fmt.Fprintf(&b, "error: %s\n", op.Error()) - fmt.Fprintf(&b, "timestamp: %s\n", op.Timestamp().String()) - - return b.String() -} - -// Cid returns the Cid associated to this operation. -func (op *Operation) Cid() api.Cid { - return op.pin.Cid -} - -// Context returns the context associated to this operation. -func (op *Operation) Context() context.Context { - return op.ctx -} - -// Cancel will cancel the context associated to this operation. -func (op *Operation) Cancel() { - _, span := trace.StartSpan(op.ctx, "optracker/Cancel") - op.cancel() - span.End() -} - -// Phase returns the Phase. -func (op *Operation) Phase() Phase { - var ph Phase - - op.mu.RLock() - ph = op.phase - op.mu.RUnlock() - - return ph -} - -// SetPhase changes the Phase and updates the timestamp. -func (op *Operation) SetPhase(ph Phase) { - _, span := trace.StartSpan(op.ctx, "optracker/SetPhase") - op.mu.Lock() - { - op.tracker.recordMetricUnsafe(op, -1) - op.phase = ph - op.ts = time.Now() - op.tracker.recordMetricUnsafe(op, 1) - } - op.mu.Unlock() - - span.End() -} - -// AttemptCount returns the number of times that this operation has been in -// progress. -func (op *Operation) AttemptCount() int { - var retries int - - op.mu.RLock() - retries = op.attemptCount - op.mu.RUnlock() - - return retries -} - -// IncAttempt does a plus-one on the AttemptCount. -func (op *Operation) IncAttempt() { - op.mu.Lock() - op.attemptCount++ - op.mu.Unlock() -} - -// PriorityPin returns true if the pin has been marked as priority pin. -func (op *Operation) PriorityPin() bool { - var p bool - op.mu.RLock() - p = op.priority - op.mu.RUnlock() - return p -} - -// SetPriorityPin returns true if the pin has been marked as priority pin. -func (op *Operation) SetPriorityPin(p bool) { - op.mu.Lock() - op.priority = p - op.mu.Unlock() -} - -// Error returns any error message attached to the operation. -func (op *Operation) Error() string { - var err string - op.mu.RLock() - err = op.error - op.mu.RUnlock() - return err -} - -// SetError sets the phase to PhaseError along with -// an error message. It updates the timestamp. -func (op *Operation) SetError(err error) { - _, span := trace.StartSpan(op.ctx, "optracker/SetError") - op.mu.Lock() - { - op.tracker.recordMetricUnsafe(op, -1) - op.phase = PhaseError - op.error = err.Error() - op.ts = time.Now() - op.tracker.recordMetricUnsafe(op, 1) - } - op.mu.Unlock() - span.End() -} - -// Type returns the operation Type. -func (op *Operation) Type() OperationType { - return op.opType -} - -// Pin returns the Pin object associated to the operation. -func (op *Operation) Pin() api.Pin { - return op.pin -} - -// Timestamp returns the time when this operation was -// last modified (phase changed, error was set...). -func (op *Operation) Timestamp() time.Time { - var ts time.Time - op.mu.RLock() - ts = op.ts - op.mu.RUnlock() - return ts -} - -// Canceled returns whether the context for this -// operation has been canceled. -func (op *Operation) Canceled() bool { - ctx, span := trace.StartSpan(op.ctx, "optracker/Canceled") - _ = ctx - defer span.End() - select { - case <-op.ctx.Done(): - return true - default: - return false - } -} - -// ToTrackerStatus returns an api.TrackerStatus reflecting -// the current status of this operation. It's a translation -// from the Type and the Phase. -func (op *Operation) ToTrackerStatus() api.TrackerStatus { - typ := op.Type() - ph := op.Phase() - switch typ { - case OperationPin: - switch ph { - case PhaseError: - return api.TrackerStatusPinError - case PhaseQueued: - return api.TrackerStatusPinQueued - case PhaseInProgress: - return api.TrackerStatusPinning - case PhaseDone: - return api.TrackerStatusPinned - default: - return api.TrackerStatusUndefined - } - case OperationUnpin: - switch ph { - case PhaseError: - return api.TrackerStatusUnpinError - case PhaseQueued: - return api.TrackerStatusUnpinQueued - case PhaseInProgress: - return api.TrackerStatusUnpinning - case PhaseDone: - return api.TrackerStatusUnpinned - default: - return api.TrackerStatusUndefined - } - case OperationRemote: - return api.TrackerStatusRemote - case OperationShard: - return api.TrackerStatusSharded - default: - return api.TrackerStatusUndefined - } - -} - -// TrackerStatusToOperationPhase takes an api.TrackerStatus and -// converts it to an OpType and Phase. -func TrackerStatusToOperationPhase(status api.TrackerStatus) (OperationType, Phase) { - switch status { - case api.TrackerStatusPinError: - return OperationPin, PhaseError - case api.TrackerStatusPinQueued: - return OperationPin, PhaseQueued - case api.TrackerStatusPinning: - return OperationPin, PhaseInProgress - case api.TrackerStatusPinned: - return OperationPin, PhaseDone - case api.TrackerStatusUnpinError: - return OperationUnpin, PhaseError - case api.TrackerStatusUnpinQueued: - return OperationUnpin, PhaseQueued - case api.TrackerStatusUnpinning: - return OperationUnpin, PhaseInProgress - case api.TrackerStatusUnpinned: - return OperationUnpin, PhaseDone - case api.TrackerStatusRemote: - return OperationRemote, PhaseDone - case api.TrackerStatusSharded: - return OperationShard, PhaseDone - default: - return OperationUnknown, PhaseError - } -} diff --git a/packages/networking/ipfs-cluster/pintracker/optracker/operation_test.go b/packages/networking/ipfs-cluster/pintracker/optracker/operation_test.go deleted file mode 100644 index 9a09853..0000000 --- a/packages/networking/ipfs-cluster/pintracker/optracker/operation_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package optracker - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" -) - -func TestOperation(t *testing.T) { - tim := time.Now().Add(-2 * time.Second) - op := newOperation(context.Background(), api.PinCid(test.Cid1), OperationUnpin, PhaseQueued, nil) - if !op.Cid().Equals(test.Cid1) { - t.Error("bad cid") - } - if op.Phase() != PhaseQueued { - t.Error("bad phase") - } - - op.SetError(errors.New("fake error")) - if op.Error() != "fake error" { - t.Error("bad error") - } - - op.SetPhase(PhaseInProgress) - if op.Phase() != PhaseInProgress { - t.Error("bad phase") - } - - if op.Type() != OperationUnpin { - t.Error("bad type") - } - - if !op.Timestamp().After(tim) { - t.Error("bad timestamp") - } - - if op.Canceled() { - t.Error("should not be canceled") - } - - op.Cancel() - if !op.Canceled() { - t.Error("should be canceled") - } - - if op.ToTrackerStatus() != api.TrackerStatusUnpinning { - t.Error("should be in unpin error") - } -} diff --git a/packages/networking/ipfs-cluster/pintracker/optracker/operationtracker.go b/packages/networking/ipfs-cluster/pintracker/optracker/operationtracker.go deleted file mode 100644 index 473f9bf..0000000 --- a/packages/networking/ipfs-cluster/pintracker/optracker/operationtracker.go +++ /dev/null @@ -1,398 +0,0 @@ -// Package optracker implements functionality to track the status of pin and -// operations as needed by implementations of the pintracker component. -// It particularly allows to obtain status information for a given Cid, -// to skip re-tracking already ongoing operations, or to cancel ongoing -// operations when opposing ones arrive. -package optracker - -import ( - "context" - "fmt" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/observations" - - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - - "go.opencensus.io/stats" - "go.opencensus.io/trace" -) - -var logger = logging.Logger("optracker") - -// OperationTracker tracks and manages all inflight Operations. -type OperationTracker struct { - ctx context.Context // parent context for all ops - pid peer.ID - peerName string - - mu sync.RWMutex - operations map[api.Cid]*Operation - - pinningCount int64 - pinErrorCount int64 - pinQueuedCount int64 -} - -func (opt *OperationTracker) String() string { - var b strings.Builder - fmt.Fprintf(&b, "pid: %v\n", opt.pid) - fmt.Fprintf(&b, "name: %s\n", opt.peerName) - - fmt.Fprint(&b, "operations:\n") - opt.mu.RLock() - defer opt.mu.RUnlock() - for _, op := range opt.operations { - opstr := op.String() - opstrs := strings.Split(opstr, "\n") - for _, s := range opstrs { - fmt.Fprintf(&b, "\t%s\n", s) - } - } - return b.String() -} - -// NewOperationTracker creates a new OperationTracker. -func NewOperationTracker(ctx context.Context, pid peer.ID, peerName string) *OperationTracker { - initializeMetrics(ctx) - - return &OperationTracker{ - ctx: ctx, - pid: pid, - peerName: peerName, - operations: make(map[api.Cid]*Operation), - } -} - -// TrackNewOperation will create, track and return a new operation unless -// one already exists to do the same thing, in which case nil is returned. -// -// If an operation exists it is of different type, it is -// canceled and the new one replaces it in the tracker. -func (opt *OperationTracker) TrackNewOperation(ctx context.Context, pin api.Pin, typ OperationType, ph Phase) *Operation { - ctx = trace.NewContext(opt.ctx, trace.FromContext(ctx)) - ctx, span := trace.StartSpan(ctx, "optracker/TrackNewOperation") - defer span.End() - - opt.mu.Lock() - defer opt.mu.Unlock() - - op, ok := opt.operations[pin.Cid] - if ok { // operation exists for the CID - if op.Type() == typ && op.Phase() != PhaseError && op.Phase() != PhaseDone { - // an ongoing operation of the same - // type. i.e. pinning, or queued. - return nil - } - // i.e. operations in error phase - // i.e. pin operations that need to be canceled for unpinning - op.tracker.recordMetric(op, -1) - op.Cancel() // cancel ongoing operation and replace it - } - - op2 := newOperation(ctx, pin, typ, ph, opt) - if ok && op.Type() == typ { - // Carry over the attempt count when doing an operation of the - // same type. The old operation exists and was canceled. - op2.attemptCount = op.AttemptCount() // carry the count - } - logger.Debugf("'%s' on cid '%s' has been created with phase '%s'", typ, pin.Cid, ph) - opt.operations[pin.Cid] = op2 - opt.recordMetricUnsafe(op2, 1) - return op2 -} - -// Clean deletes an operation from the tracker if it is the one we are tracking -// (compares pointers). -func (opt *OperationTracker) Clean(ctx context.Context, op *Operation) { - opt.mu.Lock() - defer opt.mu.Unlock() - op2, ok := opt.operations[op.Cid()] - if ok && op == op2 { // same pointer - delete(opt.operations, op.Cid()) - } -} - -// Status returns the TrackerStatus associated to the last operation known -// with the given Cid. It returns false if we are not tracking any operation -// for the given Cid. -func (opt *OperationTracker) Status(ctx context.Context, c api.Cid) (api.TrackerStatus, bool) { - opt.mu.RLock() - defer opt.mu.RUnlock() - op, ok := opt.operations[c] - if !ok { - return 0, false - } - - return op.ToTrackerStatus(), true -} - -// SetError transitions an operation for a Cid into PhaseError if its Status -// is PhaseDone. Any other phases are considered in-flight and not touched. -// For things already in error, the error message is updated. -// Remote pins are ignored too. -// Only used in tests right now. -func (opt *OperationTracker) SetError(ctx context.Context, c api.Cid, err error) { - opt.mu.Lock() - defer opt.mu.Unlock() - op, ok := opt.operations[c] - if !ok { - return - } - - if ty := op.Type(); ty == OperationRemote { - return - } - - if ph := op.Phase(); ph == PhaseDone || ph == PhaseError { - op.SetPhase(PhaseError) - op.SetError(err) - } -} - -func (opt *OperationTracker) unsafePinInfo(ctx context.Context, op *Operation, ipfs api.IPFSID) api.PinInfo { - if op == nil { - return api.PinInfo{ - Cid: api.CidUndef, - Name: "", - Peer: opt.pid, - Origins: nil, - //Created: 0, - Metadata: nil, - PinInfoShort: api.PinInfoShort{ - PeerName: opt.peerName, - IPFS: "", - Status: api.TrackerStatusUnpinned, - TS: time.Now(), - AttemptCount: 0, - PriorityPin: false, - Error: "", - }, - } - } - return api.PinInfo{ - Cid: op.Cid(), - Name: op.Pin().Name, - Peer: opt.pid, - Allocations: op.Pin().Allocations, - Origins: op.Pin().Origins, - Created: op.Pin().Timestamp, - Metadata: op.Pin().Metadata, - PinInfoShort: api.PinInfoShort{ - PeerName: opt.peerName, - IPFS: ipfs.ID, - IPFSAddresses: ipfs.Addresses, - Status: op.ToTrackerStatus(), - TS: op.Timestamp(), - AttemptCount: op.AttemptCount(), - PriorityPin: op.PriorityPin(), - Error: op.Error(), - }, - } -} - -// Get returns a PinInfo object for Cid. -func (opt *OperationTracker) Get(ctx context.Context, c api.Cid, ipfs api.IPFSID) api.PinInfo { - ctx, span := trace.StartSpan(ctx, "optracker/GetAll") - defer span.End() - - opt.mu.RLock() - defer opt.mu.RUnlock() - op := opt.operations[c] - pInfo := opt.unsafePinInfo(ctx, op, ipfs) - if !pInfo.Cid.Defined() { - pInfo.Cid = c - } - return pInfo -} - -// GetExists returns a PinInfo object for a Cid only if there exists -// an associated Operation. -func (opt *OperationTracker) GetExists(ctx context.Context, c api.Cid, ipfs api.IPFSID) (api.PinInfo, bool) { - ctx, span := trace.StartSpan(ctx, "optracker/GetExists") - defer span.End() - - opt.mu.RLock() - defer opt.mu.RUnlock() - op, ok := opt.operations[c] - if !ok { - return api.PinInfo{}, false - } - pInfo := opt.unsafePinInfo(ctx, op, ipfs) - return pInfo, true -} - -// GetAll returns PinInfo objects for all known operations. -func (opt *OperationTracker) GetAll(ctx context.Context, ipfs api.IPFSID) []api.PinInfo { - ctx, span := trace.StartSpan(ctx, "optracker/GetAll") - defer span.End() - - ch := make(chan api.PinInfo, 1024) - var pinfos []api.PinInfo - go opt.GetAllChannel(ctx, api.TrackerStatusUndefined, ipfs, ch) - for pinfo := range ch { - pinfos = append(pinfos, pinfo) - } - return pinfos -} - -// GetAllChannel returns all known operations that match the filter on the -// provided channel. Blocks until done. -func (opt *OperationTracker) GetAllChannel(ctx context.Context, filter api.TrackerStatus, ipfs api.IPFSID, out chan<- api.PinInfo) error { - defer close(out) - - opt.mu.RLock() - defer opt.mu.RUnlock() - - for _, op := range opt.operations { - pinfo := opt.unsafePinInfo(ctx, op, ipfs) - if pinfo.Status.Match(filter) { - select { - case <-ctx.Done(): - return fmt.Errorf("listing operations aborted: %w", ctx.Err()) - default: - } - - select { - case <-ctx.Done(): - return fmt.Errorf("listing operations aborted: %w", ctx.Err()) - case out <- pinfo: - } - } - } - return nil -} - -// CleanAllDone deletes any operation from the tracker that is in PhaseDone. -func (opt *OperationTracker) CleanAllDone(ctx context.Context) { - opt.mu.Lock() - defer opt.mu.Unlock() - for _, op := range opt.operations { - if op.Phase() == PhaseDone { - delete(opt.operations, op.Cid()) - } - } -} - -// OpContext gets the context of an operation, if any. -func (opt *OperationTracker) OpContext(ctx context.Context, c api.Cid) context.Context { - opt.mu.RLock() - defer opt.mu.RUnlock() - op, ok := opt.operations[c] - if !ok { - return nil - } - return op.Context() -} - -// Filter returns a slice of api.PinInfos that had associated -// Operations that matched the provided filter. Note, only supports -// filters of type OperationType or Phase, any other type -// will result in a nil slice being returned. -func (opt *OperationTracker) Filter(ctx context.Context, ipfs api.IPFSID, filters ...interface{}) []api.PinInfo { - var pinfos []api.PinInfo - opt.mu.RLock() - defer opt.mu.RUnlock() - ops := filterOpsMap(ctx, opt.operations, filters) - for _, op := range ops { - pinfo := opt.unsafePinInfo(ctx, op, ipfs) - pinfos = append(pinfos, pinfo) - } - return pinfos -} - -// filterOps returns a slice that only contains operations -// with the matching filter. Note, only supports -// filters of type OperationType or Phase, any other type -// will result in a nil slice being returned. -// Only used in tests right now. -func (opt *OperationTracker) filterOps(ctx context.Context, filters ...interface{}) []*Operation { - var fltops []*Operation - opt.mu.RLock() - defer opt.mu.RUnlock() - for _, op := range filterOpsMap(ctx, opt.operations, filters) { - fltops = append(fltops, op) - } - return fltops -} - -func filterOpsMap(ctx context.Context, ops map[api.Cid]*Operation, filters []interface{}) map[api.Cid]*Operation { - fltops := make(map[api.Cid]*Operation) - if len(filters) < 1 { - return nil - } - - if len(filters) == 1 { - filter(ctx, ops, fltops, filters[0]) - return fltops - } - - mainFilter, filters := filters[0], filters[1:] - filter(ctx, ops, fltops, mainFilter) - - return filterOpsMap(ctx, fltops, filters) -} - -func filter(ctx context.Context, in, out map[api.Cid]*Operation, filter interface{}) { - for _, op := range in { - switch filter.(type) { - case OperationType: - if op.Type() == filter { - out[op.Cid()] = op - } - case Phase: - if op.Phase() == filter { - out[op.Cid()] = op - } - } - } -} - -func initializeMetrics(ctx context.Context) { - stats.Record(ctx, observations.PinsPinError.M(0)) - stats.Record(ctx, observations.PinsQueued.M(0)) - stats.Record(ctx, observations.PinsPinning.M(0)) -} - -func (opt *OperationTracker) recordMetricUnsafe(op *Operation, val int64) { - if opt == nil || op == nil { - return - } - - if op.opType == OperationPin { - switch op.phase { - case PhaseError: - pinErrors := atomic.AddInt64(&opt.pinErrorCount, val) - stats.Record(op.Context(), observations.PinsPinError.M(pinErrors)) - case PhaseQueued: - pinQueued := atomic.AddInt64(&opt.pinQueuedCount, val) - stats.Record(op.Context(), observations.PinsQueued.M(pinQueued)) - case PhaseInProgress: - pinning := atomic.AddInt64(&opt.pinningCount, val) - stats.Record(op.Context(), observations.PinsPinning.M(pinning)) - case PhaseDone: - // we have no metric to log anything - } - } -} - -func (opt *OperationTracker) recordMetric(op *Operation, val int64) { - if op == nil { - return - } - op.mu.RLock() - { - opt.recordMetricUnsafe(op, val) - } - op.mu.RUnlock() -} - -// PinQueueSize returns the current number of items queued to pin. -func (opt *OperationTracker) PinQueueSize() int64 { - return atomic.LoadInt64(&opt.pinQueuedCount) -} diff --git a/packages/networking/ipfs-cluster/pintracker/optracker/operationtracker_test.go b/packages/networking/ipfs-cluster/pintracker/optracker/operationtracker_test.go deleted file mode 100644 index fcf6b45..0000000 --- a/packages/networking/ipfs-cluster/pintracker/optracker/operationtracker_test.go +++ /dev/null @@ -1,258 +0,0 @@ -package optracker - -import ( - "context" - "errors" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/test" -) - -func testOperationTracker(t *testing.T) *OperationTracker { - ctx := context.Background() - return NewOperationTracker(ctx, test.PeerID1, test.PeerName1) -} - -func TestOperationTracker_TrackNewOperation(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - op := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseQueued) - - t.Run("track new operation", func(t *testing.T) { - if op == nil { - t.Fatal("nil op") - } - if op.Phase() != PhaseQueued { - t.Error("bad phase") - } - - if op.Type() != OperationPin { - t.Error("bad type") - } - - if op.Canceled() != false { - t.Error("should not be canceled") - } - - if op.ToTrackerStatus() != api.TrackerStatusPinQueued { - t.Error("bad state") - } - }) - - t.Run("track when ongoing operation", func(t *testing.T) { - op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress) - if op2 != nil { - t.Fatal("should not have created new operation") - } - }) - - t.Run("track of different type", func(t *testing.T) { - op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued) - if op2 == nil { - t.Fatal("should have created a new operation") - } - - if !op.Canceled() { - t.Fatal("should have canceled the original operation") - } - }) - - t.Run("track of same type when done", func(t *testing.T) { - op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone) - if op2 == nil { - t.Fatal("should have created a new operation") - } - - op3 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseQueued) - if op3 == nil { - t.Fatal("should have created a new operation when other is in Done") - } - }) - - t.Run("track of same type when error", func(t *testing.T) { - op4 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseError) - if op4 == nil { - t.Fatal("should have created a new operation") - } - - op5 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued) - if op5 == nil { - t.Fatal("should have created a new operation") - } - }) -} - -func TestOperationTracker_Clean(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - op := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseQueued) - op2 := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued) - t.Run("clean older operation", func(t *testing.T) { - opt.Clean(ctx, op) - st, ok := opt.Status(ctx, test.Cid1) - if !ok || st != api.TrackerStatusUnpinQueued { - t.Fatal("should not have cleaned the latest op") - } - }) - - t.Run("clean current operation", func(t *testing.T) { - opt.Clean(ctx, op2) - _, ok := opt.Status(ctx, test.Cid1) - if ok { - t.Fatal("should have cleaned the latest op") - } - }) -} - -func TestOperationTracker_Status(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationRemote, PhaseDone) - st, ok := opt.Status(ctx, test.Cid1) - if !ok || st != api.TrackerStatusRemote { - t.Error("should provide status remote") - } - - _, ok = opt.Status(ctx, test.Cid1) - if !ok { - t.Error("should signal unexistent status") - } -} - -func TestOperationTracker_SetError(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone) - opt.SetError(ctx, test.Cid1, errors.New("fake error")) - pinfo := opt.Get(ctx, test.Cid1, api.IPFSID{}) - if pinfo.Status != api.TrackerStatusPinError { - t.Error("should have updated the status") - } - if pinfo.Error != "fake error" { - t.Error("should have set the error message") - } - - opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationUnpin, PhaseQueued) - opt.SetError(ctx, test.Cid1, errors.New("fake error")) - st, ok := opt.Status(ctx, test.Cid1) - if !ok || st != api.TrackerStatusUnpinQueued { - t.Error("should not have set an error on in-flight items") - } -} - -func TestOperationTracker_Get(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseDone) - - t.Run("Get with existing item", func(t *testing.T) { - pinfo := opt.Get(ctx, test.Cid1, api.IPFSID{}) - if pinfo.Status != api.TrackerStatusPinned { - t.Error("bad status") - } - if !pinfo.Cid.Equals(test.Cid1) { - t.Error("bad cid") - } - - if pinfo.Peer != test.PeerID1 { - t.Error("bad peer ID") - } - - }) - - t.Run("Get with unexisting item", func(t *testing.T) { - pinfo := opt.Get(ctx, test.Cid2, api.IPFSID{}) - if pinfo.Status != api.TrackerStatusUnpinned { - t.Error("bad status") - } - if !pinfo.Cid.Equals(test.Cid2) { - t.Error("bad cid") - } - - if pinfo.Peer != test.PeerID1 { - t.Error("bad peer ID") - } - }) -} - -func TestOperationTracker_GetAll(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress) - pinfos := opt.GetAll(ctx, api.IPFSID{}) - if len(pinfos) != 1 { - t.Fatal("expected 1 item") - } - if pinfos[0].Status != api.TrackerStatusPinning { - t.Fatal("bad status") - } -} - -func TestOperationTracker_OpContext(t *testing.T) { - ctx := context.Background() - opt := testOperationTracker(t) - op := opt.TrackNewOperation(ctx, api.PinCid(test.Cid1), OperationPin, PhaseInProgress) - ctx1 := op.Context() - ctx2 := opt.OpContext(ctx, test.Cid1) - if ctx1 != ctx2 { - t.Fatal("didn't get the right context") - } -} - -func TestOperationTracker_filterOps(t *testing.T) { - ctx := context.Background() - testOpsMap := map[api.Cid]*Operation{ - test.Cid1: {pin: api.PinCid(test.Cid1), opType: OperationPin, phase: PhaseQueued}, - test.Cid2: {pin: api.PinCid(test.Cid2), opType: OperationPin, phase: PhaseInProgress}, - test.Cid3: {pin: api.PinCid(test.Cid3), opType: OperationUnpin, phase: PhaseInProgress}, - } - opt := &OperationTracker{ctx: ctx, operations: testOpsMap} - - t.Run("filter ops to pin operations", func(t *testing.T) { - wantLen := 2 - wantOp := OperationPin - got := opt.filterOps(ctx, wantOp) - if len(got) != wantLen { - t.Errorf("want: %d %s operations; got: %d", wantLen, wantOp.String(), len(got)) - } - for i := range got { - if got[i].Type() != wantOp { - t.Errorf("want: %v; got: %v", wantOp.String(), got[i]) - } - } - }) - - t.Run("filter ops to in progress phase", func(t *testing.T) { - wantLen := 2 - wantPhase := PhaseInProgress - got := opt.filterOps(ctx, PhaseInProgress) - if len(got) != wantLen { - t.Errorf("want: %d %s operations; got: %d", wantLen, wantPhase.String(), len(got)) - } - for i := range got { - if got[i].Phase() != wantPhase { - t.Errorf("want: %s; got: %v", wantPhase.String(), got[i]) - } - } - }) - - t.Run("filter ops to queued pins", func(t *testing.T) { - wantLen := 1 - wantPhase := PhaseQueued - wantOp := OperationPin - got := opt.filterOps(ctx, OperationPin, PhaseQueued) - if len(got) != wantLen { - t.Errorf("want: %d %s operations; got: %d", wantLen, wantPhase.String(), len(got)) - } - for i := range got { - if got[i].Phase() != wantPhase { - t.Errorf("want: %s; got: %v", wantPhase.String(), got[i]) - } - - if got[i].Type() != wantOp { - t.Errorf("want: %s; got: %v", wantOp.String(), got[i]) - } - } - }) -} diff --git a/packages/networking/ipfs-cluster/pintracker/optracker/operationtype_string.go b/packages/networking/ipfs-cluster/pintracker/optracker/operationtype_string.go deleted file mode 100644 index 5661142..0000000 --- a/packages/networking/ipfs-cluster/pintracker/optracker/operationtype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=OperationType"; DO NOT EDIT. - -package optracker - -import "strconv" - -const _OperationType_name = "OperationUnknownOperationPinOperationUnpinOperationRemoteOperationShard" - -var _OperationType_index = [...]uint8{0, 16, 28, 42, 57, 71} - -func (i OperationType) String() string { - if i < 0 || i >= OperationType(len(_OperationType_index)-1) { - return "OperationType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] -} diff --git a/packages/networking/ipfs-cluster/pintracker/optracker/phase_string.go b/packages/networking/ipfs-cluster/pintracker/optracker/phase_string.go deleted file mode 100644 index 2c14fe6..0000000 --- a/packages/networking/ipfs-cluster/pintracker/optracker/phase_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=Phase"; DO NOT EDIT. - -package optracker - -import "strconv" - -const _Phase_name = "PhaseErrorPhaseQueuedPhaseInProgressPhaseDone" - -var _Phase_index = [...]uint8{0, 10, 21, 36, 45} - -func (i Phase) String() string { - if i < 0 || i >= Phase(len(_Phase_index)-1) { - return "Phase(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Phase_name[_Phase_index[i]:_Phase_index[i+1]] -} diff --git a/packages/networking/ipfs-cluster/pintracker/pintracker_test.go b/packages/networking/ipfs-cluster/pintracker/pintracker_test.go deleted file mode 100644 index b173d79..0000000 --- a/packages/networking/ipfs-cluster/pintracker/pintracker_test.go +++ /dev/null @@ -1,572 +0,0 @@ -// Package pintracker_test tests the multiple implementations -// of the PinTracker interface. -// -// These tests are legacy from the time when there were several -// pintracker implementations. -package pintracker_test - -import ( - "context" - "sort" - "testing" - "time" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - "github.com/ipfs-cluster/ipfs-cluster/test" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var ( - pinOpts = api.PinOptions{ - ReplicationFactorMax: -1, - ReplicationFactorMin: -1, - } -) - -var sortPinInfoByCid = func(p []api.PinInfo) { - sort.Slice(p, func(i, j int) bool { - return p[i].Cid.String() < p[j].Cid.String() - }) -} - -// prefilledState return a state instance with some pins: -// - Cid1 - pin everywhere -// - Cid2 - weird / remote // replication factor set to 0, no allocations -// - Cid3 - remote - this pin is on ipfs -// - Cid4 - pin everywhere - this pin is not on ipfs -func prefilledState(ctx context.Context) (state.ReadOnly, error) { - st, err := dsstate.New(ctx, inmem.New(), "", dsstate.DefaultHandle()) - if err != nil { - return nil, err - } - - remote := api.PinWithOpts(test.Cid3, api.PinOptions{ - ReplicationFactorMax: 1, - ReplicationFactorMin: 1, - }) - remote.Allocations = []peer.ID{test.PeerID2} - - pins := []api.Pin{ - api.PinWithOpts(test.Cid1, pinOpts), - api.PinCid(test.Cid2), - remote, - api.PinWithOpts(test.Cid4, pinOpts), - } - - for _, pin := range pins { - err = st.Add(ctx, pin) - if err != nil { - return nil, err - } - } - return st, nil -} - -func testStatelessPinTracker(t testing.TB) *stateless.Tracker { - t.Helper() - - cfg := &stateless.Config{} - cfg.Default() - spt := stateless.New(cfg, test.PeerID1, test.PeerName1, prefilledState) - spt.SetClient(test.NewMockRPCClient(t)) - return spt -} - -func TestPinTracker_Track(t *testing.T) { - type args struct { - c api.Pin - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - "basic stateless track", - args{ - api.PinWithOpts(test.Cid1, pinOpts), - testStatelessPinTracker(t), - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := tt.args.tracker.Track(context.Background(), tt.args.c); (err != nil) != tt.wantErr { - t.Errorf("PinTracker.Track() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func BenchmarkPinTracker_Track(b *testing.B) { - type args struct { - c api.Pin - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - }{ - { - "basic stateless track", - args{ - api.PinWithOpts(test.Cid1, pinOpts), - testStatelessPinTracker(b), - }, - }, - } - for _, tt := range tests { - b.Run(tt.name, func(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; i++ { - if err := tt.args.tracker.Track(context.Background(), tt.args.c); err != nil { - b.Errorf("PinTracker.Track() error = %v", err) - } - } - }) - } -} - -func TestPinTracker_Untrack(t *testing.T) { - type args struct { - c api.Cid - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - "basic stateless untrack", - args{ - test.Cid1, - testStatelessPinTracker(t), - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := tt.args.tracker.Untrack(context.Background(), tt.args.c); (err != nil) != tt.wantErr { - t.Errorf("PinTracker.Untrack() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func collectPinInfos(t *testing.T, out chan api.PinInfo) []api.PinInfo { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var pis []api.PinInfo - for { - select { - case <-ctx.Done(): - t.Error("took too long") - return nil - case pi, ok := <-out: - if !ok { - return pis - } - pis = append(pis, pi) - } - } - -} - -func TestPinTracker_StatusAll(t *testing.T) { - type args struct { - c api.Pin - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - want []api.PinInfo - }{ - { - "basic stateless statusall", - args{ - api.PinWithOpts(test.Cid1, pinOpts), - testStatelessPinTracker(t), - }, - []api.PinInfo{ - { - Cid: test.Cid1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - }, - }, - { - Cid: test.Cid2, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusRemote, - }, - }, - { - Cid: test.Cid3, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusRemote, - }, - }, - { - // in state but not on IPFS - Cid: test.Cid4, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusUnexpectedlyUnpinned, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if err := tt.args.tracker.Track(context.Background(), tt.args.c); err != nil { - t.Errorf("PinTracker.Track() error = %v", err) - } - time.Sleep(200 * time.Millisecond) - infos := make(chan api.PinInfo) - go func() { - err := tt.args.tracker.StatusAll(context.Background(), api.TrackerStatusUndefined, infos) - if err != nil { - t.Error() - } - }() - - got := collectPinInfos(t, infos) - - if len(got) != len(tt.want) { - for _, pi := range got { - t.Logf("pinfo: %v", pi) - } - t.Errorf("got len = %d, want = %d", len(got), len(tt.want)) - t.FailNow() - } - - sortPinInfoByCid(got) - sortPinInfoByCid(tt.want) - - for i := range tt.want { - if got[i].Cid != tt.want[i].Cid { - t.Errorf("got: %v\nwant: %v", got, tt.want) - } - if got[i].Status != tt.want[i].Status { - t.Errorf("for cid %v:\n got: %s\nwant: %s", got[i].Cid, got[i].Status, tt.want[i].Status) - } - } - }) - } -} - -func TestPinTracker_Status(t *testing.T) { - type args struct { - c api.Cid - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - want api.PinInfo - }{ - { - "basic stateless status", - args{ - test.Cid1, - testStatelessPinTracker(t), - }, - api.PinInfo{ - Cid: test.Cid1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - }, - }, - }, - { - "basic stateless status/unpinned", - args{ - test.Cid5, - testStatelessPinTracker(t), - }, - api.PinInfo{ - Cid: test.Cid5, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusUnpinned, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := tt.args.tracker.Status(context.Background(), tt.args.c) - - if got.Cid != tt.want.Cid { - t.Errorf("PinTracker.Status() = %v, want %v", got.Cid, tt.want.Cid) - } - - if got.Status != tt.want.Status { - t.Errorf("PinTracker.Status() = %v, want %v", got.Status, tt.want.Status) - } - }) - } -} - -func TestPinTracker_RecoverAll(t *testing.T) { - type args struct { - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - want []api.PinInfo - wantErr bool - }{ - { - "basic stateless recoverall", - args{ - testStatelessPinTracker(t), - }, - // The only CID to recover is test.Cid4 which is in error. - []api.PinInfo{ - { - // This will recover and status - // is ignored as it could come back as - // queued, pinning or error. - - Cid: test.Cid4, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinError, - }, - }, - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - infos := make(chan api.PinInfo) - go func() { - err := tt.args.tracker.RecoverAll(context.Background(), infos) - if (err != nil) != tt.wantErr { - t.Errorf("PinTracker.RecoverAll() error = %v, wantErr %v", err, tt.wantErr) - return - } - }() - - got := collectPinInfos(t, infos) - - if len(got) != len(tt.want) { - for _, pi := range got { - t.Logf("pinfo: %v", pi) - } - t.Fatalf("got len = %d, want = %d", len(got), len(tt.want)) - } - - sortPinInfoByCid(got) - sortPinInfoByCid(tt.want) - - for i := range tt.want { - if got[i].Cid != tt.want[i].Cid { - t.Errorf("\ngot: %v,\nwant: %v", got[i].Cid, tt.want[i].Cid) - } - - // Cid4 needs to be recovered, we do not care - // on what status it finds itself. - if got[i].Cid == test.Cid4 { - continue - } - if got[i].Status != tt.want[i].Status { - t.Errorf("for cid: %v:\ngot: %v,\nwant: %v", tt.want[i].Cid, got[i].Status, tt.want[i].Status) - } - } - }) - } -} - -func TestPinTracker_Recover(t *testing.T) { - type args struct { - c api.Cid - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - want api.PinInfo - wantErr bool - }{ - { - "basic stateless recover", - args{ - test.Cid1, - testStatelessPinTracker(t), - }, - api.PinInfo{ - Cid: test.Cid1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - }, - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := tt.args.tracker.Recover(context.Background(), tt.args.c) - if (err != nil) != tt.wantErr { - t.Errorf("PinTracker.Recover() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if got.Cid != tt.want.Cid { - t.Errorf("PinTracker.Recover() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestUntrackTrack(t *testing.T) { - type args struct { - c api.Cid - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - want api.PinInfo - wantErr bool - }{ - { - "basic stateless untrack track", - args{ - test.Cid1, - testStatelessPinTracker(t), - }, - api.PinInfo{ - Cid: test.Cid1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - }, - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.args.tracker.Track(context.Background(), api.PinWithOpts(tt.args.c, pinOpts)) - if err != nil { - t.Fatal(err) - } - - time.Sleep(200 * time.Millisecond) - - err = tt.args.tracker.Untrack(context.Background(), tt.args.c) - if err != nil { - t.Fatal(err) - } - }) - } -} - -func TestTrackUntrackWithCancel(t *testing.T) { - type args struct { - c api.Cid - tracker ipfscluster.PinTracker - } - tests := []struct { - name string - args args - want api.PinInfo - wantErr bool - }{ - { - "stateless tracker untrack w/ cancel", - args{ - test.SlowCid1, - testStatelessPinTracker(t), - }, - api.PinInfo{ - Cid: test.SlowCid1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - }, - }, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := api.PinWithOpts(tt.args.c, pinOpts) - err := tt.args.tracker.Track(context.Background(), p) - if err != nil { - t.Fatal(err) - } - - time.Sleep(200 * time.Millisecond) // let pinning start - - pInfo := tt.args.tracker.Status(context.Background(), tt.args.c) - if pInfo.Status == api.TrackerStatusUnpinned { - t.Fatal("slowPin should be tracked") - } - - if pInfo.Status == api.TrackerStatusPinning { - go func() { - err = tt.args.tracker.Untrack(context.Background(), tt.args.c) - if err != nil { - t.Error() - return - } - }() - var ctx context.Context - switch trkr := tt.args.tracker.(type) { - case *stateless.Tracker: - ctx = trkr.OpContext(context.Background(), tt.args.c) - } - select { - case <-ctx.Done(): - return - case <-time.Tick(150 * time.Millisecond): - t.Errorf("operation context should have been canceled by now") - } - } else { - t.Error("slowPin should be pinning and is:", pInfo.Status) - } - }) - } -} - -func TestPinTracker_RemoteIgnoresError(t *testing.T) { - ctx := context.Background() - testF := func(t *testing.T, pt ipfscluster.PinTracker) { - remoteCid := test.Cid3 - - remote := api.PinWithOpts(remoteCid, pinOpts) - remote.Allocations = []peer.ID{test.PeerID2} - remote.ReplicationFactorMin = 1 - remote.ReplicationFactorMax = 1 - - err := pt.Track(ctx, remote) - if err != nil { - t.Fatal(err) - } - - pi := pt.Status(ctx, remoteCid) - if pi.Status != api.TrackerStatusRemote || pi.Error != "" { - t.Error("Remote pin should not be in error", pi.Status, pi.Error) - } - } - - t.Run("stateless pintracker", func(t *testing.T) { - pt := testStatelessPinTracker(t) - testF(t, pt) - }) -} diff --git a/packages/networking/ipfs-cluster/pintracker/stateless/config.go b/packages/networking/ipfs-cluster/pintracker/stateless/config.go deleted file mode 100644 index a4d8036..0000000 --- a/packages/networking/ipfs-cluster/pintracker/stateless/config.go +++ /dev/null @@ -1,158 +0,0 @@ -package stateless - -import ( - "encoding/json" - "errors" - "time" - - "github.com/kelseyhightower/envconfig" - - "github.com/ipfs-cluster/ipfs-cluster/config" -) - -const configKey = "stateless" -const envConfigKey = "cluster_stateless" - -// Default values for this Config. -const ( - DefaultMaxPinQueueSize = 1000000 - DefaultConcurrentPins = 10 - DefaultPriorityPinMaxAge = 24 * time.Hour - DefaultPriorityPinMaxRetries = 5 -) - -// Config allows to initialize a Monitor and customize some parameters. -type Config struct { - config.Saver - - // If higher, they will automatically marked with an error. - MaxPinQueueSize int - // ConcurrentPins specifies how many pin requests can be sent to the ipfs - // daemon in parallel. If the pinning method is "refs", it might increase - // speed. Unpin requests are always processed one by one. - ConcurrentPins int - - // PriorityPinMaxAge specifies the maximum age that a pin needs to - // can have since it was submitted to the cluster to be pinned - // preferentially (before pins that are older or have too many retries). - PriorityPinMaxAge time.Duration - - // PriorityPinMaxRetries specifies the maximum amount of retries that - // a pin can have before it is moved to a non-prioritary queue. - PriorityPinMaxRetries int -} - -type jsonConfig struct { - MaxPinQueueSize int `json:"max_pin_queue_size,omitempty"` - ConcurrentPins int `json:"concurrent_pins"` - PriorityPinMaxAge string `json:"priority_pin_max_age"` - PriorityPinMaxRetries int `json:"priority_pin_max_retries"` -} - -// ConfigKey provides a human-friendly identifier for this type of Config. -func (cfg *Config) ConfigKey() string { - return configKey -} - -// Default sets the fields of this Config to sensible values. -func (cfg *Config) Default() error { - cfg.MaxPinQueueSize = DefaultMaxPinQueueSize - cfg.ConcurrentPins = DefaultConcurrentPins - cfg.PriorityPinMaxAge = DefaultPriorityPinMaxAge - cfg.PriorityPinMaxRetries = DefaultPriorityPinMaxRetries - return nil -} - -// ApplyEnvVars fills in any Config fields found -// as environment variables. -func (cfg *Config) ApplyEnvVars() error { - jcfg := cfg.toJSONConfig() - - err := envconfig.Process(envConfigKey, jcfg) - if err != nil { - return err - } - - return cfg.applyJSONConfig(jcfg) -} - -// Validate checks that the fields of this Config have working values, -// at least in appearance. -func (cfg *Config) Validate() error { - if cfg.MaxPinQueueSize <= 0 { - return errors.New("statelesstracker.max_pin_queue_size too low") - } - - if cfg.ConcurrentPins <= 0 { - return errors.New("statelesstracker.concurrent_pins is too low") - } - - if cfg.PriorityPinMaxAge <= 0 { - return errors.New("statelesstracker.priority_pin_max_age is too low") - } - - if cfg.PriorityPinMaxRetries <= 0 { - return errors.New("statelesstracker.priority_pin_max_retries is too low") - } - - return nil -} - -// LoadJSON sets the fields of this Config to the values defined by the JSON -// representation of it, as generated by ToJSON. -func (cfg *Config) LoadJSON(raw []byte) error { - jcfg := &jsonConfig{} - err := json.Unmarshal(raw, jcfg) - if err != nil { - logger.Error("Error unmarshaling statelesstracker config") - return err - } - - cfg.Default() - - return cfg.applyJSONConfig(jcfg) -} - -func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error { - config.SetIfNotDefault(jcfg.MaxPinQueueSize, &cfg.MaxPinQueueSize) - config.SetIfNotDefault(jcfg.ConcurrentPins, &cfg.ConcurrentPins) - err := config.ParseDurations(cfg.ConfigKey(), - &config.DurationOpt{ - Duration: jcfg.PriorityPinMaxAge, - Dst: &cfg.PriorityPinMaxAge, - Name: "priority_pin_max_age", - }, - ) - if err != nil { - return err - } - - config.SetIfNotDefault(jcfg.PriorityPinMaxRetries, &cfg.PriorityPinMaxRetries) - - return cfg.Validate() -} - -// ToJSON generates a human-friendly JSON representation of this Config. -func (cfg *Config) ToJSON() ([]byte, error) { - jcfg := cfg.toJSONConfig() - - return config.DefaultJSONMarshal(jcfg) -} - -func (cfg *Config) toJSONConfig() *jsonConfig { - jCfg := &jsonConfig{ - ConcurrentPins: cfg.ConcurrentPins, - PriorityPinMaxAge: cfg.PriorityPinMaxAge.String(), - PriorityPinMaxRetries: cfg.PriorityPinMaxRetries, - } - if cfg.MaxPinQueueSize != DefaultMaxPinQueueSize { - jCfg.MaxPinQueueSize = cfg.MaxPinQueueSize - } - - return jCfg -} - -// ToDisplayJSON returns JSON config as a string. -func (cfg *Config) ToDisplayJSON() ([]byte, error) { - return config.DisplayJSON(cfg.toJSONConfig()) -} diff --git a/packages/networking/ipfs-cluster/pintracker/stateless/config_test.go b/packages/networking/ipfs-cluster/pintracker/stateless/config_test.go deleted file mode 100644 index 688a420..0000000 --- a/packages/networking/ipfs-cluster/pintracker/stateless/config_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package stateless - -import ( - "encoding/json" - "os" - "testing" - "time" -) - -var cfgJSON = []byte(` -{ - "max_pin_queue_size": 4092, - "concurrent_pins": 2, - "priority_pin_max_age": "240h", - "priority_pin_max_retries": 4 -} -`) - -func TestLoadJSON(t *testing.T) { - cfg := &Config{} - err := cfg.LoadJSON(cfgJSON) - if err != nil { - t.Fatal(err) - } - - j := &jsonConfig{} - - json.Unmarshal(cfgJSON, j) - j.ConcurrentPins = 10 - j.PriorityPinMaxAge = "216h" - j.PriorityPinMaxRetries = 2 - tst, _ := json.Marshal(j) - err = cfg.LoadJSON(tst) - if err != nil { - t.Error("did not expect an error") - } - if cfg.ConcurrentPins != 10 { - t.Error("expected 10 concurrent pins") - } - if cfg.PriorityPinMaxAge != 9*24*time.Hour { - t.Error("expected 9 days max age") - } - if cfg.PriorityPinMaxRetries != 2 { - t.Error("expected 2 max retries") - } -} - -func TestToJSON(t *testing.T) { - cfg := &Config{} - cfg.LoadJSON(cfgJSON) - newjson, err := cfg.ToJSON() - if err != nil { - t.Fatal(err) - } - cfg = &Config{} - err = cfg.LoadJSON(newjson) - if err != nil { - t.Fatal(err) - } -} - -func TestDefault(t *testing.T) { - cfg := &Config{} - cfg.Default() - if cfg.Validate() != nil { - t.Fatal("error validating") - } - - cfg.ConcurrentPins = -2 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } - cfg.ConcurrentPins = 3 - cfg.PriorityPinMaxRetries = -1 - if cfg.Validate() == nil { - t.Fatal("expected error validating") - } -} - -func TestApplyEnvVars(t *testing.T) { - os.Setenv("CLUSTER_STATELESS_CONCURRENTPINS", "22") - os.Setenv("CLUSTER_STATELESS_PRIORITYPINMAXAGE", "72h") - cfg := &Config{} - cfg.ApplyEnvVars() - - if cfg.ConcurrentPins != 22 { - t.Fatal("failed to override concurrent_pins with env var") - } - - if cfg.PriorityPinMaxAge != 3*24*time.Hour { - t.Fatal("failed to override priority_pin_max_age with env var") - } -} diff --git a/packages/networking/ipfs-cluster/pintracker/stateless/stateless.go b/packages/networking/ipfs-cluster/pintracker/stateless/stateless.go deleted file mode 100644 index 8635f68..0000000 --- a/packages/networking/ipfs-cluster/pintracker/stateless/stateless.go +++ /dev/null @@ -1,691 +0,0 @@ -// Package stateless implements a PinTracker component for IPFS Cluster, which -// aims to reduce the memory footprint when handling really large cluster -// states. -package stateless - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/pintracker/optracker" - "github.com/ipfs-cluster/ipfs-cluster/state" - - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - - "go.opencensus.io/trace" -) - -var logger = logging.Logger("pintracker") - -const pinsChannelSize = 1024 - -var ( - // ErrFullQueue is the error used when pin or unpin operation channel is full. - ErrFullQueue = errors.New("pin/unpin operation queue is full. Try increasing max_pin_queue_size") - - // items with this error should be recovered - errUnexpectedlyUnpinned = errors.New("the item should be pinned but it is not") -) - -// Tracker uses the optracker.OperationTracker to manage -// transitioning shared ipfs-cluster state (Pins) to the local IPFS node. -type Tracker struct { - config *Config - - optracker *optracker.OperationTracker - - peerID peer.ID - peerName string - - ctx context.Context - cancel func() - - getState func(ctx context.Context) (state.ReadOnly, error) - - rpcClient *rpc.Client - rpcReady chan struct{} - - priorityPinCh chan *optracker.Operation - pinCh chan *optracker.Operation - unpinCh chan *optracker.Operation - - shutdownMu sync.Mutex - shutdown bool - wg sync.WaitGroup -} - -// New creates a new StatelessPinTracker. -func New(cfg *Config, pid peer.ID, peerName string, getState func(ctx context.Context) (state.ReadOnly, error)) *Tracker { - ctx, cancel := context.WithCancel(context.Background()) - - spt := &Tracker{ - config: cfg, - peerID: pid, - peerName: peerName, - ctx: ctx, - cancel: cancel, - getState: getState, - optracker: optracker.NewOperationTracker(ctx, pid, peerName), - rpcReady: make(chan struct{}, 1), - priorityPinCh: make(chan *optracker.Operation, cfg.MaxPinQueueSize), - pinCh: make(chan *optracker.Operation, cfg.MaxPinQueueSize), - unpinCh: make(chan *optracker.Operation, cfg.MaxPinQueueSize), - } - - for i := 0; i < spt.config.ConcurrentPins; i++ { - go spt.opWorker(spt.pin, spt.priorityPinCh, spt.pinCh) - } - go spt.opWorker(spt.unpin, spt.unpinCh, nil) - - return spt -} - -// we can get our IPFS id from our own monitor ping metrics which -// are refreshed regularly. -func (spt *Tracker) getIPFSID(ctx context.Context) api.IPFSID { - // Wait until RPC is ready - <-spt.rpcReady - - var ipfsid api.IPFSID - err := spt.rpcClient.CallContext( - ctx, - "", - "Cluster", - "IPFSID", - peer.ID(""), // local peer - &ipfsid, - ) - if err != nil { - logger.Error(err) - } - return ipfsid -} - -// receives a pin Function (pin or unpin) and channels. Used for both pinning -// and unpinning. -func (spt *Tracker) opWorker(pinF func(*optracker.Operation) error, prioCh, normalCh chan *optracker.Operation) { - - var op *optracker.Operation - - for { - // Process the priority channel first. - select { - case op = <-prioCh: - goto APPLY_OP - case <-spt.ctx.Done(): - return - default: - } - - // Then process things on the other channels. - // Block if there are no things to process. - select { - case op = <-prioCh: - goto APPLY_OP - case op = <-normalCh: - goto APPLY_OP - case <-spt.ctx.Done(): - return - } - - // apply operations that came from some channel - APPLY_OP: - if clean := applyPinF(pinF, op); clean { - spt.optracker.Clean(op.Context(), op) - } - } -} - -// applyPinF returns true if the operation can be considered "DONE". -func applyPinF(pinF func(*optracker.Operation) error, op *optracker.Operation) bool { - if op.Canceled() { - // operation was canceled. Move on. - // This saves some time, but not 100% needed. - return false - } - op.SetPhase(optracker.PhaseInProgress) - op.IncAttempt() - err := pinF(op) // call pin/unpin - if err != nil { - if op.Canceled() { - // there was an error because - // we were canceled. Move on. - return false - } - op.SetError(err) - op.Cancel() - return false - } - op.SetPhase(optracker.PhaseDone) - op.Cancel() - return true // this tells the opWorker to clean the operation from the tracker. -} - -func (spt *Tracker) pin(op *optracker.Operation) error { - ctx, span := trace.StartSpan(op.Context(), "tracker/stateless/pin") - defer span.End() - - logger.Debugf("issuing pin call for %s", op.Cid()) - err := spt.rpcClient.CallContext( - ctx, - "", - "IPFSConnector", - "Pin", - op.Pin(), - &struct{}{}, - ) - if err != nil { - return err - } - return nil -} - -func (spt *Tracker) unpin(op *optracker.Operation) error { - ctx, span := trace.StartSpan(op.Context(), "tracker/stateless/unpin") - defer span.End() - - logger.Debugf("issuing unpin call for %s", op.Cid()) - err := spt.rpcClient.CallContext( - ctx, - "", - "IPFSConnector", - "Unpin", - op.Pin(), - &struct{}{}, - ) - if err != nil { - return err - } - return nil -} - -// Enqueue puts a new operation on the queue, unless ongoing exists. -func (spt *Tracker) enqueue(ctx context.Context, c api.Pin, typ optracker.OperationType) error { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/enqueue") - defer span.End() - - logger.Debugf("entering enqueue: pin: %+v", c) - op := spt.optracker.TrackNewOperation(ctx, c, typ, optracker.PhaseQueued) - if op == nil { - return nil // the operation exists and must be queued already. - } - - var ch chan *optracker.Operation - - switch typ { - case optracker.OperationPin: - isPriorityPin := time.Now().Before(c.Timestamp.Add(spt.config.PriorityPinMaxAge)) && - op.AttemptCount() <= spt.config.PriorityPinMaxRetries - op.SetPriorityPin(isPriorityPin) - - if isPriorityPin { - ch = spt.priorityPinCh - } else { - ch = spt.pinCh - } - case optracker.OperationUnpin: - ch = spt.unpinCh - } - - select { - case ch <- op: - default: - err := ErrFullQueue - op.SetError(err) - op.Cancel() - logger.Error(err.Error()) - return err - } - return nil -} - -// SetClient makes the StatelessPinTracker ready to perform RPC requests to -// other components. -func (spt *Tracker) SetClient(c *rpc.Client) { - spt.rpcClient = c - close(spt.rpcReady) -} - -// Shutdown finishes the services provided by the StatelessPinTracker -// and cancels any active context. -func (spt *Tracker) Shutdown(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/Shutdown") - _ = ctx - defer span.End() - - spt.shutdownMu.Lock() - defer spt.shutdownMu.Unlock() - - if spt.shutdown { - logger.Debug("already shutdown") - return nil - } - - logger.Info("stopping StatelessPinTracker") - spt.cancel() - spt.wg.Wait() - spt.shutdown = true - return nil -} - -// Track tells the StatelessPinTracker to start managing a Cid, -// possibly triggering Pin operations on the IPFS daemon. -func (spt *Tracker) Track(ctx context.Context, c api.Pin) error { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/Track") - defer span.End() - - logger.Debugf("tracking %s", c.Cid) - - // Sharded pins are never pinned. A sharded pin cannot turn into - // something else or viceversa like it happens with Remote pins so - // we just ignore them. - if c.Type == api.MetaType { - return nil - } - - // Trigger unpin whenever something remote is tracked - // Note, IPFSConn checks with pin/ls before triggering - // pin/rm. - if c.IsRemotePin(spt.peerID) { - op := spt.optracker.TrackNewOperation(ctx, c, optracker.OperationRemote, optracker.PhaseInProgress) - if op == nil { - return nil // ongoing unpin - } - err := spt.unpin(op) - op.Cancel() - if err != nil { - op.SetError(err) - return nil - } - - op.SetPhase(optracker.PhaseDone) - spt.optracker.Clean(ctx, op) - return nil - } - - return spt.enqueue(ctx, c, optracker.OperationPin) -} - -// Untrack tells the StatelessPinTracker to stop managing a Cid. -// If the Cid is pinned locally, it will be unpinned. -func (spt *Tracker) Untrack(ctx context.Context, c api.Cid) error { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/Untrack") - defer span.End() - - logger.Debugf("untracking %s", c) - return spt.enqueue(ctx, api.PinCid(c), optracker.OperationUnpin) -} - -// StatusAll returns information for all Cids pinned to the local IPFS node. -func (spt *Tracker) StatusAll(ctx context.Context, filter api.TrackerStatus, out chan<- api.PinInfo) error { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/StatusAll") - defer span.End() - - ipfsid := spt.getIPFSID(ctx) - - // Any other states are just operation-tracker states, so we just give - // those and return. - if !filter.Match( - api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned | - api.TrackerStatusSharded | api.TrackerStatusRemote) { - return spt.optracker.GetAllChannel(ctx, filter, ipfsid, out) - } - - defer close(out) - - // get global state - cluster pinset - st, err := spt.getState(ctx) - if err != nil { - logger.Error(err) - return err - } - - var ipfsRecursivePins map[api.Cid]api.IPFSPinStatus - // Only query IPFS if we want to status for pinned items - if filter.Match(api.TrackerStatusPinned | api.TrackerStatusUnexpectedlyUnpinned) { - ipfsRecursivePins = make(map[api.Cid]api.IPFSPinStatus) - // At some point we need a full map of what we have and what - // we don't. The IPFS pinset is the smallest thing we can keep - // on memory. - ipfsPinsCh, err := spt.ipfsPins(ctx) - if err != nil { - logger.Error(err) - return err - } - for ipfsPinInfo := range ipfsPinsCh { - ipfsRecursivePins[ipfsPinInfo.Cid] = ipfsPinInfo.Type - } - } - - // Prepare pinset streaming - statePins := make(chan api.Pin, pinsChannelSize) - go func() { - err = st.List(ctx, statePins) - if err != nil { - logger.Error(err) - } - }() - - // a shorthand for this select. - trySend := func(info api.PinInfo) bool { - select { - case <-ctx.Done(): - return false - case out <- info: - return true - } - } - - // For every item in the state. - for p := range statePins { - select { - case <-ctx.Done(): - default: - } - - // if there is an operation, issue that and move on - info, ok := spt.optracker.GetExists(ctx, p.Cid, ipfsid) - if ok && filter.Match(info.Status) { - if !trySend(info) { - return fmt.Errorf("error issuing PinInfo: %w", ctx.Err()) - } - continue // next pin - } - - // Preliminary PinInfo for this Pin. - info = api.PinInfo{ - Cid: p.Cid, - Name: p.Name, - Peer: spt.peerID, - Allocations: p.Allocations, - Origins: p.Origins, - Created: p.Timestamp, - Metadata: p.Metadata, - - PinInfoShort: api.PinInfoShort{ - PeerName: spt.peerName, - IPFS: ipfsid.ID, - IPFSAddresses: ipfsid.Addresses, - Status: api.TrackerStatusUndefined, // TBD - TS: p.Timestamp, - Error: "", - AttemptCount: 0, - PriorityPin: false, - }, - } - - ipfsStatus, pinnedInIpfs := ipfsRecursivePins[api.Cid(p.Cid)] - - switch { - case p.Type == api.MetaType: - info.Status = api.TrackerStatusSharded - case p.IsRemotePin(spt.peerID): - info.Status = api.TrackerStatusRemote - case pinnedInIpfs: - // No need to filter. pinnedInIpfs is false - // unless the filter is Pinned | - // UnexpectedlyUnpinned. We filter at the end. - info.Status = ipfsStatus.ToTrackerStatus() - default: - // Not on an operation - // Not a meta pin - // Not a remote pin - // Not a pin on ipfs - - // We understand that this is something that - // should be pinned on IPFS and it is not. - info.Status = api.TrackerStatusUnexpectedlyUnpinned - info.Error = errUnexpectedlyUnpinned.Error() - } - if !filter.Match(info.Status) { - continue - } - - if !trySend(info) { - return fmt.Errorf("error issuing PinInfo: %w", ctx.Err()) - } - } - return nil -} - -// Status returns information for a Cid pinned to the local IPFS node. -func (spt *Tracker) Status(ctx context.Context, c api.Cid) api.PinInfo { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/Status") - defer span.End() - - ipfsid := spt.getIPFSID(ctx) - - // check if c has an inflight operation or errorred operation in optracker - if oppi, ok := spt.optracker.GetExists(ctx, c, ipfsid); ok { - return oppi - } - - pinInfo := api.PinInfo{ - Cid: c, - Peer: spt.peerID, - Name: "", // etc to be filled later - PinInfoShort: api.PinInfoShort{ - PeerName: spt.peerName, - IPFS: ipfsid.ID, - IPFSAddresses: ipfsid.Addresses, - TS: time.Now(), - AttemptCount: 0, - PriorityPin: false, - }, - } - - // check global state to see if cluster should even be caring about - // the provided cid - var gpin api.Pin - st, err := spt.getState(ctx) - if err != nil { - logger.Error(err) - addError(&pinInfo, err) - return pinInfo - } - - gpin, err = st.Get(ctx, c) - if err == state.ErrNotFound { - pinInfo.Status = api.TrackerStatusUnpinned - return pinInfo - } - if err != nil { - logger.Error(err) - addError(&pinInfo, err) - return pinInfo - } - // The pin IS in the state. - pinInfo.Name = gpin.Name - pinInfo.TS = gpin.Timestamp - pinInfo.Allocations = gpin.Allocations - pinInfo.Origins = gpin.Origins - pinInfo.Created = gpin.Timestamp - pinInfo.Metadata = gpin.Metadata - - // check if pin is a meta pin - if gpin.Type == api.MetaType { - pinInfo.Status = api.TrackerStatusSharded - return pinInfo - } - - // check if pin is a remote pin - if gpin.IsRemotePin(spt.peerID) { - pinInfo.Status = api.TrackerStatusRemote - return pinInfo - } - - // else attempt to get status from ipfs node - var ips api.IPFSPinStatus - err = spt.rpcClient.CallContext( - ctx, - "", - "IPFSConnector", - "PinLsCid", - gpin, - &ips, - ) - if err != nil { - logger.Error(err) - addError(&pinInfo, err) - return pinInfo - } - - ipfsStatus := ips.ToTrackerStatus() - switch ipfsStatus { - case api.TrackerStatusUnpinned: - // The item is in the state but not in IPFS: - // PinError. Should be pinned. - pinInfo.Status = api.TrackerStatusUnexpectedlyUnpinned - pinInfo.Error = errUnexpectedlyUnpinned.Error() - default: - pinInfo.Status = ipfsStatus - } - return pinInfo -} - -// RecoverAll attempts to recover all items tracked by this peer. It returns -// any errors or when it is done re-tracking. -func (spt *Tracker) RecoverAll(ctx context.Context, out chan<- api.PinInfo) error { - defer close(out) - - ctx, span := trace.StartSpan(ctx, "tracker/stateless/RecoverAll") - defer span.End() - - statusesCh := make(chan api.PinInfo, 1024) - go func() { - err := spt.StatusAll(ctx, api.TrackerStatusUndefined, statusesCh) - if err != nil { - logger.Error(err) - } - }() - - for st := range statusesCh { - // Break out if we shutdown. We might be going through - // a very long list of statuses. - select { - case <-spt.ctx.Done(): - err := fmt.Errorf("RecoverAll aborted: %w", ctx.Err()) - logger.Error(err) - return err - default: - p, err := spt.recoverWithPinInfo(ctx, st) - if err != nil { - err = fmt.Errorf("RecoverAll error: %w", err) - logger.Error(err) - return err - } - if p.Defined() { - select { - case <-ctx.Done(): - err = fmt.Errorf("RecoverAll aborted: %w", ctx.Err()) - logger.Error(err) - return err - case out <- p: - } - } - } - } - return nil -} - -// Recover will trigger pinning or unpinning for items in -// PinError or UnpinError states. -func (spt *Tracker) Recover(ctx context.Context, c api.Cid) (api.PinInfo, error) { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/Recover") - defer span.End() - - pi := spt.Status(ctx, c) - - recPi, err := spt.recoverWithPinInfo(ctx, pi) - // if it was not enqueued, no updated pin-info is returned. - // Use the one we had. - if !recPi.Defined() { - recPi = pi - } - return recPi, err -} - -func (spt *Tracker) recoverWithPinInfo(ctx context.Context, pi api.PinInfo) (api.PinInfo, error) { - st, err := spt.getState(ctx) - if err != nil { - logger.Error(err) - return api.PinInfo{}, err - } - - var pin api.Pin - - switch pi.Status { - case api.TrackerStatusPinError, api.TrackerStatusUnexpectedlyUnpinned: - pin, err = st.Get(ctx, pi.Cid) - if err != nil { // ignore error - in case pin was removed while recovering - logger.Warn(err) - return spt.Status(ctx, pi.Cid), nil - } - logger.Infof("Restarting pin operation for %s", pi.Cid) - err = spt.enqueue(ctx, pin, optracker.OperationPin) - case api.TrackerStatusUnpinError: - logger.Infof("Restarting unpin operation for %s", pi.Cid) - err = spt.enqueue(ctx, api.PinCid(pi.Cid), optracker.OperationUnpin) - default: - // We do not return any information when recover was a no-op - return api.PinInfo{}, nil - } - if err != nil { - return spt.Status(ctx, pi.Cid), err - } - - // This status call should be cheap as it would normally come from the - // optracker and does not need to hit ipfs. - return spt.Status(ctx, pi.Cid), nil -} - -func (spt *Tracker) ipfsPins(ctx context.Context) (<-chan api.IPFSPinInfo, error) { - ctx, span := trace.StartSpan(ctx, "tracker/stateless/ipfsStatusAll") - defer span.End() - - in := make(chan []string, 1) // type filter. - in <- []string{"recursive", "direct"} - close(in) - out := make(chan api.IPFSPinInfo, pinsChannelSize) - - go func() { - err := spt.rpcClient.Stream( - ctx, - "", - "IPFSConnector", - "PinLs", - in, - out, - ) - if err != nil { - logger.Error(err) - } - }() - return out, nil -} - -// PinQueueSize returns the current size of the pinning queue. -func (spt *Tracker) PinQueueSize(ctx context.Context) (int64, error) { - return spt.optracker.PinQueueSize(), nil -} - -// func (spt *Tracker) getErrorsAll(ctx context.Context) []api.PinInfo { -// return spt.optracker.Filter(ctx, optracker.PhaseError) -// } - -// OpContext exports the internal optracker's OpContext method. -// For testing purposes only. -func (spt *Tracker) OpContext(ctx context.Context, c api.Cid) context.Context { - return spt.optracker.OpContext(ctx, c) -} - -func addError(pinInfo *api.PinInfo, err error) { - pinInfo.Error = err.Error() - pinInfo.Status = api.TrackerStatusClusterError -} diff --git a/packages/networking/ipfs-cluster/pintracker/stateless/stateless_test.go b/packages/networking/ipfs-cluster/pintracker/stateless/stateless_test.go deleted file mode 100644 index f31f271..0000000 --- a/packages/networking/ipfs-cluster/pintracker/stateless/stateless_test.go +++ /dev/null @@ -1,575 +0,0 @@ -package stateless - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - "github.com/ipfs-cluster/ipfs-cluster/test" - - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" -) - -var ( - pinCancelCid = test.Cid3 - unpinCancelCid = test.Cid2 - pinErrCid = test.ErrorCid - errPinCancelCid = errors.New("should not have received rpc.IPFSPin operation") - errUnpinCancelCid = errors.New("should not have received rpc.IPFSUnpin operation") - pinOpts = api.PinOptions{ - ReplicationFactorMax: -1, - ReplicationFactorMin: -1, - } -) - -// func TestMain(m *testing.M) { -// logging.SetLogLevel("pintracker", "debug") - -// os.Exit(m.Run()) -// } - -// Overwrite Pin and Unpin methods on the normal mock in order to return -// special errors when unwanted operations have been triggered. -type mockIPFS struct{} - -func (mock *mockIPFS) Pin(ctx context.Context, in api.Pin, out *struct{}) error { - switch in.Cid { - case pinCancelCid: - return errPinCancelCid - case test.SlowCid1: - time.Sleep(time.Second) - case pinErrCid: - return errors.New("error pinning") - } - return nil -} - -func (mock *mockIPFS) Unpin(ctx context.Context, in api.Pin, out *struct{}) error { - switch in.Cid { - case unpinCancelCid: - return errUnpinCancelCid - case test.SlowCid1: - time.Sleep(time.Second) - case pinErrCid: - return errors.New("error unpinning") - } - - return nil -} - -func (mock *mockIPFS) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error { - out <- api.IPFSPinInfo{ - Cid: api.Cid(test.Cid1), - Type: api.IPFSPinStatusRecursive, - } - - out <- api.IPFSPinInfo{ - Cid: api.Cid(test.Cid2), - Type: api.IPFSPinStatusRecursive, - } - close(out) - return nil -} - -func (mock *mockIPFS) PinLsCid(ctx context.Context, in api.Pin, out *api.IPFSPinStatus) error { - switch in.Cid { - case test.Cid1, test.Cid2: - *out = api.IPFSPinStatusRecursive - default: - *out = api.IPFSPinStatusUnpinned - return nil - } - return nil -} - -type mockCluster struct{} - -func (mock *mockCluster) IPFSID(ctx context.Context, in peer.ID, out *api.IPFSID) error { - addr, _ := api.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/p2p/" + test.PeerID1.Pretty()) - *out = api.IPFSID{ - ID: test.PeerID1, - Addresses: []api.Multiaddr{addr}, - } - return nil -} - -func mockRPCClient(t testing.TB) *rpc.Client { - t.Helper() - - s := rpc.NewServer(nil, "mock") - c := rpc.NewClientWithServer(nil, "mock", s) - - err := s.RegisterName("IPFSConnector", &mockIPFS{}) - if err != nil { - t.Fatal(err) - } - - err = s.RegisterName("Cluster", &mockCluster{}) - if err != nil { - t.Fatal(err) - } - return c -} - -func getStateFunc(t testing.TB, items ...api.Pin) func(context.Context) (state.ReadOnly, error) { - t.Helper() - ctx := context.Background() - - st, err := dsstate.New(ctx, inmem.New(), "", dsstate.DefaultHandle()) - if err != nil { - t.Fatal(err) - } - - for _, item := range items { - err := st.Add(ctx, item) - if err != nil { - t.Fatal(err) - } - } - return func(ctx context.Context) (state.ReadOnly, error) { - return st, nil - } - -} - -func testStatelessPinTracker(t testing.TB, pins ...api.Pin) *Tracker { - t.Helper() - - cfg := &Config{} - cfg.Default() - cfg.ConcurrentPins = 1 - cfg.PriorityPinMaxAge = 10 * time.Second - cfg.PriorityPinMaxRetries = 1 - spt := New(cfg, test.PeerID1, test.PeerName1, getStateFunc(t, pins...)) - spt.SetClient(mockRPCClient(t)) - return spt -} - -func TestStatelessPinTracker_New(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - defer spt.Shutdown(ctx) -} - -func TestStatelessPinTracker_Shutdown(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - err := spt.Shutdown(ctx) - if err != nil { - t.Fatal(err) - } - err = spt.Shutdown(ctx) - if err != nil { - t.Fatal(err) - } -} - -func TestUntrackTrack(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - defer spt.Shutdown(ctx) - - h1 := test.Cid1 - - // LocalPin - c := api.PinWithOpts(h1, pinOpts) - - err := spt.Track(context.Background(), c) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second / 2) - - err = spt.Untrack(context.Background(), h1) - if err != nil { - t.Fatal(err) - } -} - -func TestTrackUntrackWithCancel(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - defer spt.Shutdown(ctx) - - slowPinCid := test.SlowCid1 - - // LocalPin - slowPin := api.PinWithOpts(slowPinCid, pinOpts) - - err := spt.Track(ctx, slowPin) - if err != nil { - t.Fatal(err) - } - - time.Sleep(100 * time.Millisecond) // let pinning start - - pInfo := spt.optracker.Get(ctx, slowPin.Cid, api.IPFSID{}) - if pInfo.Status == api.TrackerStatusUnpinned { - t.Fatal("slowPin should be tracked") - } - - if pInfo.Status == api.TrackerStatusPinning { - go func() { - err = spt.Untrack(ctx, slowPinCid) - if err != nil { - t.Error(err) - return - } - }() - select { - case <-spt.optracker.OpContext(ctx, slowPinCid).Done(): - return - case <-time.Tick(100 * time.Millisecond): - t.Errorf("operation context should have been canceled by now") - } - } else { - t.Error("slowPin should be pinning and is:", pInfo.Status) - } -} - -// This tracks a slow CID and then tracks a fast/normal one. -// Because we are pinning the slow CID, the fast one will stay -// queued. We proceed to untrack it then. Since it was never -// "pinning", it should simply be unqueued (or ignored), and no -// canceling of the pinning operation happens (unlike on WithCancel). -func TestTrackUntrackWithNoCancel(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - defer spt.Shutdown(ctx) - - slowPinCid := test.SlowCid1 - fastPinCid := pinCancelCid - - // SlowLocalPin - slowPin := api.PinWithOpts(slowPinCid, pinOpts) - - // LocalPin - fastPin := api.PinWithOpts(fastPinCid, pinOpts) - - err := spt.Track(ctx, slowPin) - if err != nil { - t.Fatal(err) - } - - // Otherwise fails when running with -race - time.Sleep(300 * time.Millisecond) - - err = spt.Track(ctx, fastPin) - if err != nil { - t.Fatal(err) - } - - // fastPin should be queued because slow pin is pinning - fastPInfo := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{}) - if fastPInfo.Status == api.TrackerStatusUnpinned { - t.Fatal("fastPin should be tracked") - } - if fastPInfo.Status == api.TrackerStatusPinQueued { - err = spt.Untrack(ctx, fastPinCid) - if err != nil { - t.Fatal(err) - } - // pi := spt.get(fastPinCid) - // if pi.Error == ErrPinCancelCid.Error() { - // t.Fatal(ErrPinCancelCid) - // } - } else { - t.Errorf("fastPin should be queued to pin but is %s", fastPInfo.Status) - } - - pi := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{}) - if !pi.Cid.Defined() { - t.Error("fastPin should have been removed from tracker") - } -} - -func TestUntrackTrackWithCancel(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - defer spt.Shutdown(ctx) - - slowPinCid := test.SlowCid1 - - // LocalPin - slowPin := api.PinWithOpts(slowPinCid, pinOpts) - - err := spt.Track(ctx, slowPin) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second / 2) - - // Untrack should cancel the ongoing request - // and unpin right away - err = spt.Untrack(ctx, slowPinCid) - if err != nil { - t.Fatal(err) - } - - time.Sleep(100 * time.Millisecond) - - pi := spt.optracker.Get(ctx, slowPin.Cid, api.IPFSID{}) - if !pi.Cid.Defined() { - t.Fatal("expected slowPin to be tracked") - } - - if pi.Status == api.TrackerStatusUnpinning { - go func() { - err = spt.Track(ctx, slowPin) - if err != nil { - t.Error(err) - return - } - }() - select { - case <-spt.optracker.OpContext(ctx, slowPinCid).Done(): - return - case <-time.Tick(100 * time.Millisecond): - t.Errorf("operation context should have been canceled by now") - } - } else { - t.Error("slowPin should be in unpinning") - } - -} - -func TestUntrackTrackWithNoCancel(t *testing.T) { - ctx := context.Background() - spt := testStatelessPinTracker(t) - defer spt.Shutdown(ctx) - - slowPinCid := test.SlowCid1 - fastPinCid := unpinCancelCid - - // SlowLocalPin - slowPin := api.PinWithOpts(slowPinCid, pinOpts) - - // LocalPin - fastPin := api.PinWithOpts(fastPinCid, pinOpts) - - err := spt.Track(ctx, slowPin) - if err != nil { - t.Fatal(err) - } - - err = spt.Track(ctx, fastPin) - if err != nil { - t.Fatal(err) - } - - time.Sleep(3 * time.Second) - - err = spt.Untrack(ctx, slowPin.Cid) - if err != nil { - t.Fatal(err) - } - - err = spt.Untrack(ctx, fastPin.Cid) - if err != nil { - t.Fatal(err) - } - - pi := spt.optracker.Get(ctx, fastPin.Cid, api.IPFSID{}) - if !pi.Cid.Defined() { - t.Fatal("c untrack operation should be tracked") - } - - if pi.Status == api.TrackerStatusUnpinQueued { - err = spt.Track(ctx, fastPin) - if err != nil { - t.Fatal(err) - } - - // pi := spt.get(fastPinCid) - // if pi.Error == ErrUnpinCancelCid.Error() { - // t.Fatal(ErrUnpinCancelCid) - // } - } else { - t.Error("c should be queued to unpin") - } -} - -// TestStatusAll checks that StatusAll correctly reports tracked -// items and mismatches between what's on IPFS and on the state. -func TestStatusAll(t *testing.T) { - ctx := context.Background() - - normalPin := api.PinWithOpts(test.Cid1, pinOpts) - normalPin2 := api.PinWithOpts(test.Cid4, pinOpts) - - // - Build a state with one pins (Cid1,Cid4) - // - The IPFS Mock reports Cid1 and Cid2 - // - Track a SlowCid additionally - slowPin := api.PinWithOpts(test.SlowCid1, pinOpts) - spt := testStatelessPinTracker(t, normalPin, normalPin2, slowPin) - defer spt.Shutdown(ctx) - - err := spt.Track(ctx, slowPin) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second / 2) - - // Needs to return: - // * A slow CID pinning - // * Cid1 is pinned - // * Cid4 should be in PinError (it's in the state but not on IPFS) - stAll := make(chan api.PinInfo, 10) - err = spt.StatusAll(ctx, api.TrackerStatusUndefined, stAll) - if err != nil { - t.Fatal(err) - } - - n := 0 - for pi := range stAll { - n++ - switch pi.Cid { - case test.Cid1: - if pi.Status != api.TrackerStatusPinned { - t.Error(test.Cid1, " should be pinned") - } - case test.Cid4: - if pi.Status != api.TrackerStatusUnexpectedlyUnpinned { - t.Error(test.Cid2, " should be in unexpectedly_unpinned status") - } - case test.SlowCid1: - if pi.Status != api.TrackerStatusPinning { - t.Error("slowCid1 should be pinning") - } - default: - t.Error("Unexpected pin:", pi.Cid) - } - if pi.IPFS == "" { - t.Error("IPFS field should be set") - } - } - if n != 3 { - t.Errorf("wrong status length. Expected 3, got: %d", n) - } -} - -// TestStatus checks that the Status calls correctly reports tracked -// items and mismatches between what's on IPFS and on the state. -func TestStatus(t *testing.T) { - ctx := context.Background() - - normalPin := api.PinWithOpts(test.Cid1, pinOpts) - normalPin2 := api.PinWithOpts(test.Cid4, pinOpts) - - // - Build a state with one pins (Cid1,Cid4) - // - The IPFS Mock reports Cid1 and Cid2 - // - Track a SlowCid additionally - - spt := testStatelessPinTracker(t, normalPin, normalPin2) - defer spt.Shutdown(ctx) - - slowPin := api.PinWithOpts(test.SlowCid1, pinOpts) - err := spt.Track(ctx, slowPin) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second / 2) - - // Status needs to return: - // * For slowCid1: A slow CID pinning - // * For Cid1: pinned - // * For Cid4: unexpectedly_unpinned - - st := spt.Status(ctx, test.Cid1) - if st.Status != api.TrackerStatusPinned { - t.Error("cid1 should be pinned") - } - - st = spt.Status(ctx, test.Cid4) - if st.Status != api.TrackerStatusUnexpectedlyUnpinned { - t.Error("cid2 should be in unexpectedly_unpinned status") - } - - st = spt.Status(ctx, test.SlowCid1) - if st.Status != api.TrackerStatusPinning { - t.Error("slowCid1 should be pinning") - } - - if st.IPFS == "" { - t.Error("IPFS field should be set") - } -} - -// Test -func TestAttemptCountAndPriority(t *testing.T) { - ctx := context.Background() - - normalPin := api.PinWithOpts(test.Cid1, pinOpts) - normalPin2 := api.PinWithOpts(test.Cid4, pinOpts) - errPin := api.PinWithOpts(pinErrCid, pinOpts) - - spt := testStatelessPinTracker(t, normalPin, normalPin2, errPin) - defer spt.Shutdown(ctx) - - st := spt.Status(ctx, test.Cid1) - if st.AttemptCount != 0 { - t.Errorf("errPin should have 0 attempts as it was already pinned: %+v", st) - } - - err := spt.Track(ctx, errPin) - if err != nil { - t.Fatal(err) - } - time.Sleep(200 * time.Millisecond) // let the pin be applied - st = spt.Status(ctx, pinErrCid) - if st.AttemptCount != 1 { - t.Errorf("errPin should have 1 attempt count: %+v", st) - } - - // Retry 1 - _, err = spt.Recover(ctx, pinErrCid) - if err != nil { - t.Fatal(err) - } - time.Sleep(200 * time.Millisecond) // let the pin be applied - st = spt.Status(ctx, pinErrCid) - if st.AttemptCount != 2 || !st.PriorityPin { - t.Errorf("errPin should have 2 attempt counts and be priority: %+v", st) - } - - // Retry 2 - _, err = spt.Recover(ctx, pinErrCid) - if err != nil { - t.Fatal(err) - } - time.Sleep(200 * time.Millisecond) // let the pin be applied - st = spt.Status(ctx, pinErrCid) - if st.AttemptCount != 3 || st.PriorityPin { - t.Errorf("errPin should have 3 attempts and not be priority: %+v", st) - } - - err = spt.Untrack(ctx, pinErrCid) - time.Sleep(200 * time.Millisecond) // let the pin be applied - if err != nil { - t.Fatal(err) - } - st = spt.Status(ctx, pinErrCid) - if st.AttemptCount != 1 { - t.Errorf("errPin should have 1 attempt count to unpin: %+v", st) - } - - err = spt.Untrack(ctx, pinErrCid) - time.Sleep(200 * time.Millisecond) // let the pin be applied - if err != nil { - t.Fatal(err) - } - st = spt.Status(ctx, pinErrCid) - if st.AttemptCount != 2 { - t.Errorf("errPin should have 2 attempt counts to unpin: %+v", st) - } -} diff --git a/packages/networking/ipfs-cluster/pnet_test.go b/packages/networking/ipfs-cluster/pnet_test.go deleted file mode 100644 index d08c581..0000000 --- a/packages/networking/ipfs-cluster/pnet_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package ipfscluster - -import ( - "context" - "testing" -) - -func TestClusterSecretFormat(t *testing.T) { - goodSecret := "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - emptySecret := "" - tooShort := "0123456789abcdef" - tooLong := "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0" - unsupportedChars := "0123456789abcdef0123456789!!!!!!0123456789abcdef0123456789abcdef" - - _, err := DecodeClusterSecret(goodSecret) - if err != nil { - t.Fatal("Failed to decode well-formatted secret.") - } - decodedEmptySecret, err := DecodeClusterSecret(emptySecret) - if decodedEmptySecret != nil || err != nil { - t.Fatal("Unsuspected output of decoding empty secret.") - } - _, err = DecodeClusterSecret(tooShort) - if err == nil { - t.Fatal("Successfully decoded secret that should haved failed (too short).") - } - _, err = DecodeClusterSecret(tooLong) - if err == nil { - t.Fatal("Successfully decoded secret that should haved failed (too long).") - } - _, err = DecodeClusterSecret(unsupportedChars) - if err == nil { - t.Fatal("Successfully decoded secret that should haved failed (unsupported chars).") - } -} - -func TestSimplePNet(t *testing.T) { - ctx := context.Background() - clusters, mocks, boot := peerManagerClusters(t) - defer cleanState() - defer shutdownClusters(t, clusters, mocks) - defer boot.Close() - - if len(clusters) < 2 { - t.Skip("need at least 2 nodes for this test") - } - - _, err := clusters[0].PeerAdd(ctx, clusters[1].id) - if err != nil { - t.Fatal(err) - } - ttlDelay() - - if len(peers(ctx, t, clusters[0])) != len(peers(ctx, t, clusters[1])) { - t.Fatal("Expected same number of peers") - } - if len(peers(ctx, t, clusters[0])) < 2 { - // crdt mode has auto discovered all peers at this point. - // Raft mode has 2 peers only. - t.Fatal("Expected at least 2 peers") - } -} - -// // Adds one minute to tests. Disabled for the moment. -// func TestClusterSecretRequired(t *testing.T) { -// cl1Secret, err := pnet.GenerateV1Bytes() -// if err != nil { -// t.Fatal("Unable to generate cluster secret.") -// } -// cl1, _ := createOnePeerCluster(t, 1, (*cl1Secret)[:]) -// cl2, _ := createOnePeerCluster(t, 2, testingClusterSecret) -// defer cleanState() -// defer cl1.Shutdown() -// defer cl2.Shutdown() -// peers1 := cl1.Peers() -// peers2 := cl2.Peers() -// -// _, err = cl1.PeerAdd(clusterAddr(cl2)) -// if err == nil { -// t.Fatal("Peer entered private cluster without key.") -// } - -// if len(peers1) != len(peers2) { -// t.Fatal("Expected same number of peers") -// } -// if len(peers1) != 1 { -// t.Fatal("Expected no peers other than self") -// } -// } diff --git a/packages/networking/ipfs-cluster/project.nix b/packages/networking/ipfs-cluster/project.nix deleted file mode 100644 index be54698..0000000 --- a/packages/networking/ipfs-cluster/project.nix +++ /dev/null @@ -1,59 +0,0 @@ -{ inputs, ... }: -{ - perSystem = { lib, pkgs, ... }: { - projectShells.ipfs-cluster = { - tools = [ - pkgs.go_1_19 - pkgs.gnumake - pkgs.gcc - ]; - env.GOPATH.eval = "$REPO_DATA_DIR/go"; - }; - packages.ipfs-cluster = with pkgs; buildGo119Module { - pname = "ipfs-cluster"; - version = "1.0.2"; - - src = with inputs.nix-filter.lib; filter { - root = ./.; - include = [ - "go.mod" - "go.sum" - (matchExt "go") - ] ++ (map inDirectory [ - "adder" - "allocator" - "api" - "cmd" - "cmdutils" - "config" - "consensus" - "datastore" - "docker" - "informer" - "ipfsconn" - "monitor" - "observations" - "pintracker" - "pstoremgr" - "rpcutil" - "sharness" - "state" - "test" - "version" - ]); - }; - - vendorHash = "sha256-EpZQ7br+ChoAGIj0g6pdpWvFeOFOn2i+6YRBgtzoO+A="; - - doCheck = false; - - meta = with lib; { - description = "Allocate, replicate, and track Pins across a cluster of IPFS daemons"; - homepage = "https://ipfscluster.io"; - license = licenses.mit; - platforms = platforms.unix; - maintainers = with maintainers; [ Luflosi jglukasik ]; - }; - }; - }; -} diff --git a/packages/networking/ipfs-cluster/pstoremgr/pstoremgr.go b/packages/networking/ipfs-cluster/pstoremgr/pstoremgr.go deleted file mode 100644 index 0f809dd..0000000 --- a/packages/networking/ipfs-cluster/pstoremgr/pstoremgr.go +++ /dev/null @@ -1,404 +0,0 @@ -// Package pstoremgr provides a Manager that simplifies handling -// addition, listing and removal of cluster peer multiaddresses from -// the libp2p Host. This includes resolving DNS addresses, decapsulating -// and encapsulating the /p2p/ (/ipfs/) protocol as needed, listing, saving -// and loading addresses. -package pstoremgr - -import ( - "bufio" - "context" - "fmt" - "os" - "sort" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" - host "github.com/libp2p/go-libp2p/core/host" - net "github.com/libp2p/go-libp2p/core/network" - peer "github.com/libp2p/go-libp2p/core/peer" - peerstore "github.com/libp2p/go-libp2p/core/peerstore" - pstoreutil "github.com/libp2p/go-libp2p/p2p/host/peerstore" - ma "github.com/multiformats/go-multiaddr" - madns "github.com/multiformats/go-multiaddr-dns" -) - -var logger = logging.Logger("pstoremgr") - -// PriorityTag is used to attach metadata to peers in the peerstore -// so they can be sorted. -var PriorityTag = "cluster" - -// Timeouts for network operations triggered by the Manager. -var ( - DNSTimeout = 5 * time.Second - ConnectTimeout = 5 * time.Second -) - -// Manager provides utilities for handling cluster peer addresses -// and storing them in a libp2p Host peerstore. -type Manager struct { - ctx context.Context - host host.Host - peerstoreLock sync.Mutex - peerstorePath string -} - -// New creates a Manager with the given libp2p Host and peerstorePath. -// The path indicates the place to persist and read peer addresses from. -// If empty, these operations (LoadPeerstore, SavePeerstore) will no-op. -func New(ctx context.Context, h host.Host, peerstorePath string) *Manager { - return &Manager{ - ctx: ctx, - host: h, - peerstorePath: peerstorePath, - } -} - -// ImportPeer adds a new peer address to the host's peerstore, optionally -// dialing to it. The address is expected to include the /p2p/ -// protocol part or to be a /dnsaddr/multiaddress -// Peers are added with the given ttl. -func (pm *Manager) ImportPeer(addr ma.Multiaddr, connect bool, ttl time.Duration) (peer.ID, error) { - if pm.host == nil { - return "", nil - } - - protos := addr.Protocols() - if len(protos) > 0 && protos[0].Code == ma.P_DNSADDR { - // We need to pre-resolve this - logger.Debugf("resolving %s", addr) - ctx, cancel := context.WithTimeout(pm.ctx, DNSTimeout) - defer cancel() - - resolvedAddrs, err := madns.Resolve(ctx, addr) - if err != nil { - return "", err - } - if len(resolvedAddrs) == 0 { - return "", fmt.Errorf("%s: no resolved addresses", addr) - } - var pid peer.ID - for _, add := range resolvedAddrs { - pid, err = pm.ImportPeer(add, connect, ttl) - if err != nil { - return "", err - } - } - return pid, nil // returns the last peer ID - } - - pinfo, err := peer.AddrInfoFromP2pAddr(addr) - if err != nil { - return "", err - } - - // Do not add ourselves - if pinfo.ID == pm.host.ID() { - return pinfo.ID, nil - } - - logger.Debugf("adding peer address %s", addr) - pm.host.Peerstore().AddAddrs(pinfo.ID, pinfo.Addrs, ttl) - - if connect { - go func() { - ctx := net.WithDialPeerTimeout(pm.ctx, ConnectTimeout) - pm.host.Connect(ctx, *pinfo) - }() - } - return pinfo.ID, nil -} - -// RmPeer clear all addresses for a given peer ID from the host's peerstore. -func (pm *Manager) RmPeer(pid peer.ID) error { - if pm.host == nil { - return nil - } - - logger.Debugf("forgetting peer %s", pid.Pretty()) - pm.host.Peerstore().ClearAddrs(pid) - return nil -} - -// if the peer has dns addresses, return only those, otherwise -// return all. -func (pm *Manager) filteredPeerAddrs(p peer.ID) []ma.Multiaddr { - all := pm.host.Peerstore().Addrs(p) - peerAddrs := []ma.Multiaddr{} - peerDNSAddrs := []ma.Multiaddr{} - - for _, a := range all { - if madns.Matches(a) { - peerDNSAddrs = append(peerDNSAddrs, a) - } else { - peerAddrs = append(peerAddrs, a) - } - } - - if len(peerDNSAddrs) > 0 { - return peerDNSAddrs - } - - sort.Sort(byString(peerAddrs)) - return peerAddrs -} - -// PeerInfos returns a slice of peerinfos for the given set of peers in order -// of priority. For peers for which we know DNS -// multiaddresses, we only include those. Otherwise, the AddrInfo includes all -// the multiaddresses known for that peer. Peers without addresses are not -// included. -func (pm *Manager) PeerInfos(peers []peer.ID) []peer.AddrInfo { - if pm.host == nil { - return nil - } - - if peers == nil { - return nil - } - - var pinfos []peer.AddrInfo - for _, p := range peers { - if p == pm.host.ID() { - continue - } - pinfo := peer.AddrInfo{ - ID: p, - Addrs: pm.filteredPeerAddrs(p), - } - if len(pinfo.Addrs) > 0 { - pinfos = append(pinfos, pinfo) - } - } - - toSort := &peerSort{ - pinfos: pinfos, - pstore: pm.host.Peerstore(), - } - // Sort from highest to lowest priority - sort.Sort(toSort) - - return toSort.pinfos -} - -// ImportPeers calls ImportPeer for every address in the given slice, using the -// given connect parameter. Peers are tagged with priority as given -// by their position in the list. -func (pm *Manager) ImportPeers(addrs []ma.Multiaddr, connect bool, ttl time.Duration) error { - for i, a := range addrs { - pid, err := pm.ImportPeer(a, connect, ttl) - if err == nil { - pm.SetPriority(pid, i) - } - } - return nil -} - -// ImportPeersFromPeerstore reads the peerstore file and calls ImportPeers with -// the addresses obtained from it. -func (pm *Manager) ImportPeersFromPeerstore(connect bool, ttl time.Duration) error { - return pm.ImportPeers(pm.LoadPeerstore(), connect, ttl) -} - -// LoadPeerstore parses the peerstore file and returns the list -// of addresses read from it. -func (pm *Manager) LoadPeerstore() (addrs []ma.Multiaddr) { - if pm.peerstorePath == "" { - return - } - pm.peerstoreLock.Lock() - defer pm.peerstoreLock.Unlock() - - f, err := os.Open(pm.peerstorePath) - if err != nil { - return // nothing to load - } - - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - addrStr := scanner.Text() - if len(addrStr) == 0 || addrStr[0] != '/' { - // skip anything that is not going to be a multiaddress - continue - } - addr, err := ma.NewMultiaddr(addrStr) - if err != nil { - logger.Errorf( - "error parsing multiaddress from %s: %s", - pm.peerstorePath, - err, - ) - } - addrs = append(addrs, addr) - } - if err := scanner.Err(); err != nil { - logger.Errorf("reading %s: %s", pm.peerstorePath, err) - } - return addrs -} - -// SavePeerstore stores a slice of multiaddresses in the peerstore file, one -// per line. -func (pm *Manager) SavePeerstore(pinfos []peer.AddrInfo) error { - if pm.peerstorePath == "" { - return nil - } - - pm.peerstoreLock.Lock() - defer pm.peerstoreLock.Unlock() - - f, err := os.Create(pm.peerstorePath) - if err != nil { - logger.Errorf( - "could not save peer addresses to %s: %s", - pm.peerstorePath, - err, - ) - return err - } - defer f.Close() - - for _, pinfo := range pinfos { - if len(pinfo.Addrs) == 0 { - logger.Warn("address info does not have any multiaddresses") - continue - } - - addrs, err := peer.AddrInfoToP2pAddrs(&pinfo) - if err != nil { - logger.Warn(err) - continue - } - for _, a := range addrs { - _, err = f.Write([]byte(fmt.Sprintf("%s\n", a.String()))) - if err != nil { - return err - } - } - } - return nil -} - -// SavePeerstoreForPeers calls PeerInfos and then saves the peerstore -// file using the result. -func (pm *Manager) SavePeerstoreForPeers(peers []peer.ID) error { - return pm.SavePeerstore(pm.PeerInfos(peers)) -} - -// Bootstrap attempts to get up to "count" connected peers by trying those -// in the peerstore in priority order. It returns the list of peers it managed -// to connect to. -func (pm *Manager) Bootstrap(count int) []peer.ID { - knownPeers := pm.host.Peerstore().PeersWithAddrs() - toSort := &peerSort{ - pinfos: pstoreutil.PeerInfos(pm.host.Peerstore(), knownPeers), - pstore: pm.host.Peerstore(), - } - - // Sort from highest to lowest priority - sort.Sort(toSort) - - pinfos := toSort.pinfos - lenKnown := len(pinfos) - totalConns := 0 - connectedPeers := []peer.ID{} - - // keep conecting while we have peers in the store - // and we have not reached count. - for i := 0; i < lenKnown && totalConns < count; i++ { - pinfo := pinfos[i] - ctx, cancel := context.WithTimeout(pm.ctx, ConnectTimeout) - defer cancel() - - if pm.host.Network().Connectedness(pinfo.ID) == net.Connected { - // We are connected, assume success and do not try - // to re-connect - totalConns++ - continue - } - - logger.Debugf("connecting to %s", pinfo.ID) - err := pm.host.Connect(ctx, pinfo) - if err != nil { - logger.Debug(err) - pm.SetPriority(pinfo.ID, 9999) - continue - } - logger.Debugf("connected to %s", pinfo.ID) - totalConns++ - connectedPeers = append(connectedPeers, pinfo.ID) - } - return connectedPeers -} - -// SetPriority attaches a priority to a peer. 0 means more priority than -// 1. 1 means more priority than 2 etc. -func (pm *Manager) SetPriority(pid peer.ID, prio int) error { - return pm.host.Peerstore().Put(pid, PriorityTag, prio) -} - -// HandlePeerFound implements the Notifee interface for discovery (mdns). -func (pm *Manager) HandlePeerFound(p peer.AddrInfo) { - addrs, err := peer.AddrInfoToP2pAddrs(&p) - if err != nil { - logger.Error(err) - return - } - // actually mdns returns a single address but let's do things - // as if there were several - for _, a := range addrs { - _, err = pm.ImportPeer(a, true, peerstore.ConnectedAddrTTL) - if err != nil { - logger.Error(err) - } - } -} - -// peerSort is used to sort a slice of PinInfos given the PriorityTag in the -// peerstore, from the lowest tag value (0 is the highest priority) to the -// highest, Peers without a valid priority tag are considered as having a tag -// with value 0, so they will be among the first elements in the resulting -// slice. -type peerSort struct { - pinfos []peer.AddrInfo - pstore peerstore.Peerstore -} - -func (ps *peerSort) Len() int { - return len(ps.pinfos) -} - -func (ps *peerSort) Less(i, j int) bool { - pinfo1 := ps.pinfos[i] - pinfo2 := ps.pinfos[j] - - var prio1, prio2 int - - prio1iface, err := ps.pstore.Get(pinfo1.ID, PriorityTag) - if err == nil { - prio1 = prio1iface.(int) - } - prio2iface, err := ps.pstore.Get(pinfo2.ID, PriorityTag) - if err == nil { - prio2 = prio2iface.(int) - } - return prio1 < prio2 -} - -func (ps *peerSort) Swap(i, j int) { - pinfo1 := ps.pinfos[i] - pinfo2 := ps.pinfos[j] - ps.pinfos[i] = pinfo2 - ps.pinfos[j] = pinfo1 -} - -// byString can sort multiaddresses by its string -type byString []ma.Multiaddr - -func (m byString) Len() int { return len(m) } -func (m byString) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m byString) Less(i, j int) bool { return m[i].String() < m[j].String() } diff --git a/packages/networking/ipfs-cluster/pstoremgr/pstoremgr_test.go b/packages/networking/ipfs-cluster/pstoremgr/pstoremgr_test.go deleted file mode 100644 index 9cc39cf..0000000 --- a/packages/networking/ipfs-cluster/pstoremgr/pstoremgr_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package pstoremgr - -import ( - "context" - "os" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/test" - - libp2p "github.com/libp2p/go-libp2p" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -func makeMgr(t *testing.T) *Manager { - h, err := libp2p.New() - if err != nil { - t.Fatal(err) - } - return New(context.Background(), h, "peerstore") -} - -func clean(pm *Manager) { - if path := pm.peerstorePath; path != "" { - os.RemoveAll(path) - } -} - -func testAddr(loc string, pid peer.ID) ma.Multiaddr { - m, _ := ma.NewMultiaddr(loc + "/p2p/" + pid.String()) - return m -} - -func TestManager(t *testing.T) { - pm := makeMgr(t) - defer clean(pm) - - loc := "/ip4/127.0.0.1/tcp/1234" - testAddr := testAddr(loc, test.PeerID1) - - _, err := pm.ImportPeer(testAddr, false, time.Minute) - if err != nil { - t.Fatal(err) - } - - peers := []peer.ID{test.PeerID1, pm.host.ID()} - pinfos := pm.PeerInfos(peers) - if len(pinfos) != 1 { - t.Fatal("expected 1 peerinfo") - } - - if pinfos[0].ID != test.PeerID1 { - t.Error("expected same peer as added") - } - - if len(pinfos[0].Addrs) != 1 { - t.Fatal("expected an address") - } - - if pinfos[0].Addrs[0].String() != loc { - t.Error("expected same address as added") - } - - pm.RmPeer(peers[0]) - pinfos = pm.PeerInfos(peers) - if len(pinfos) != 0 { - t.Fatal("expected 0 pinfos") - } -} - -func TestManagerDNS(t *testing.T) { - pm := makeMgr(t) - defer clean(pm) - - loc1 := "/ip4/127.0.0.1/tcp/1234" - testAddr1 := testAddr(loc1, test.PeerID1) - loc2 := "/dns4/localhost/tcp/1235" - testAddr2 := testAddr(loc2, test.PeerID1) - - err := pm.ImportPeers([]ma.Multiaddr{testAddr1, testAddr2}, false, time.Minute) - if err != nil { - t.Fatal(err) - } - - pinfos := pm.PeerInfos([]peer.ID{test.PeerID1}) - if len(pinfos) != 1 { - t.Fatal("expected 1 pinfo") - } - - if len(pinfos[0].Addrs) != 1 { - t.Error("expected a single address") - } - - if pinfos[0].Addrs[0].String() != "/dns4/localhost/tcp/1235" { - t.Error("expected the dns address") - } -} - -func TestPeerstore(t *testing.T) { - pm := makeMgr(t) - defer clean(pm) - - loc1 := "/ip4/127.0.0.1/tcp/1234" - testAddr1 := testAddr(loc1, test.PeerID1) - loc2 := "/ip4/127.0.0.1/tcp/1235" - testAddr2 := testAddr(loc2, test.PeerID1) - - err := pm.ImportPeers([]ma.Multiaddr{testAddr1, testAddr2}, false, time.Minute) - if err != nil { - t.Fatal(err) - } - - err = pm.SavePeerstoreForPeers([]peer.ID{test.PeerID1}) - if err != nil { - t.Error(err) - } - - pm2 := makeMgr(t) - defer clean(pm2) - - err = pm2.ImportPeersFromPeerstore(false, time.Minute) - if err != nil { - t.Fatal(err) - } - - pinfos := pm2.PeerInfos([]peer.ID{test.PeerID1}) - if len(pinfos) != 1 { - t.Fatal("expected 1 peer in the peerstore") - } - - if len(pinfos[0].Addrs) != 2 { - t.Error("expected 2 addresses") - } -} - -func TestPriority(t *testing.T) { - pm := makeMgr(t) - defer clean(pm) - - loc1 := "/ip4/127.0.0.1/tcp/1234" - testAddr1 := testAddr(loc1, test.PeerID1) - loc2 := "/ip4/127.0.0.2/tcp/1235" - testAddr2 := testAddr(loc2, test.PeerID2) - loc3 := "/ip4/127.0.0.3/tcp/1234" - testAddr3 := testAddr(loc3, test.PeerID3) - loc4 := "/ip4/127.0.0.4/tcp/1235" - testAddr4 := testAddr(loc4, test.PeerID4) - - err := pm.ImportPeers([]ma.Multiaddr{testAddr1, testAddr2, testAddr3, testAddr4}, false, time.Minute) - if err != nil { - t.Fatal(err) - } - - pinfos := pm.PeerInfos([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1}) - if len(pinfos) != 4 { - t.Fatal("expected 4 pinfos") - } - - if pinfos[0].ID != test.PeerID1 || - pinfos[1].ID != test.PeerID2 || - pinfos[2].ID != test.PeerID3 || - pinfos[3].ID != test.PeerID4 { - t.Error("wrong order of peerinfos") - } - - pm.SetPriority(test.PeerID1, 100) - - pinfos = pm.PeerInfos([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1}) - if len(pinfos) != 4 { - t.Fatal("expected 4 pinfos") - } - - if pinfos[3].ID != test.PeerID1 { - t.Fatal("PeerID1 should be last in the list") - } - - err = pm.SavePeerstoreForPeers([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1}) - if err != nil { - t.Error(err) - } - - pm2 := makeMgr(t) - defer clean(pm2) - - err = pm2.ImportPeersFromPeerstore(false, time.Minute) - if err != nil { - t.Fatal(err) - } - pinfos = pm2.PeerInfos([]peer.ID{test.PeerID4, test.PeerID2, test.PeerID3, test.PeerID1}) - if len(pinfos) != 4 { - t.Fatal("expected 4 pinfos") - } - - if pinfos[0].ID != test.PeerID2 || - pinfos[1].ID != test.PeerID3 || - pinfos[2].ID != test.PeerID4 || - pinfos[3].ID != test.PeerID1 { - t.Error("wrong order of peerinfos") - } -} diff --git a/packages/networking/ipfs-cluster/rpc_api.go b/packages/networking/ipfs-cluster/rpc_api.go deleted file mode 100644 index a008592..0000000 --- a/packages/networking/ipfs-cluster/rpc_api.go +++ /dev/null @@ -1,661 +0,0 @@ -package ipfscluster - -import ( - "context" - "errors" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/version" - - peer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/libp2p/go-libp2p-gorpc" - - ocgorpc "github.com/lanzafame/go-libp2p-ocgorpc" - "go.opencensus.io/trace" -) - -// RPC endpoint types w.r.t. trust level -const ( - // RPCClosed endpoints can only be called by the local cluster peer - // on itself. - RPCClosed RPCEndpointType = iota - // RPCTrusted endpoints can be called by "trusted" peers. - // It depends which peers are considered trusted. For example, - // in "raft" mode, Cluster will all peers as "trusted". In "crdt" mode, - // trusted peers are those specified in the configuration. - RPCTrusted - // RPCOpen endpoints can be called by any peer in the Cluster swarm. - RPCOpen -) - -// RPCEndpointType controls how access is granted to an RPC endpoint -type RPCEndpointType int - -const rpcStreamBufferSize = 1024 - -// A trick to find where something is used (i.e. Cluster.Pin): -// grep -R -B 3 '"Pin"' | grep -C 1 '"Cluster"'. -// This does not cover globalPinInfo*(...) broadcasts nor redirects to leader -// in Raft. - -// newRPCServer returns a new RPC Server for Cluster. -func newRPCServer(c *Cluster) (*rpc.Server, error) { - var s *rpc.Server - - authF := func(pid peer.ID, svc, method string) bool { - endpointType, ok := c.config.RPCPolicy[svc+"."+method] - if !ok { - return false - } - - switch endpointType { - case RPCTrusted: - return c.consensus.IsTrustedPeer(c.ctx, pid) - case RPCOpen: - return true - default: - return false - } - } - - if c.config.Tracing { - s = rpc.NewServer( - c.host, - version.RPCProtocol, - rpc.WithServerStatsHandler(&ocgorpc.ServerHandler{}), - rpc.WithAuthorizeFunc(authF), - rpc.WithStreamBufferSize(rpcStreamBufferSize), - ) - } else { - s = rpc.NewServer(c.host, version.RPCProtocol, rpc.WithAuthorizeFunc(authF)) - } - - cl := &ClusterRPCAPI{c} - err := s.RegisterName(RPCServiceID(cl), cl) - if err != nil { - return nil, err - } - pt := &PinTrackerRPCAPI{c.tracker} - err = s.RegisterName(RPCServiceID(pt), pt) - if err != nil { - return nil, err - } - ic := &IPFSConnectorRPCAPI{c.ipfs} - err = s.RegisterName(RPCServiceID(ic), ic) - if err != nil { - return nil, err - } - cons := &ConsensusRPCAPI{c.consensus} - err = s.RegisterName(RPCServiceID(cons), cons) - if err != nil { - return nil, err - } - pm := &PeerMonitorRPCAPI{mon: c.monitor, pid: c.id} - err = s.RegisterName(RPCServiceID(pm), pm) - if err != nil { - return nil, err - } - return s, nil -} - -// RPCServiceID returns the Service ID for the given RPCAPI object. -func RPCServiceID(rpcSvc interface{}) string { - switch rpcSvc.(type) { - case *ClusterRPCAPI: - return "Cluster" - case *PinTrackerRPCAPI: - return "PinTracker" - case *IPFSConnectorRPCAPI: - return "IPFSConnector" - case *ConsensusRPCAPI: - return "Consensus" - case *PeerMonitorRPCAPI: - return "PeerMonitor" - default: - return "" - } -} - -// ClusterRPCAPI is a go-libp2p-gorpc service which provides the internal peer -// API for the main cluster component. -type ClusterRPCAPI struct { - c *Cluster -} - -// PinTrackerRPCAPI is a go-libp2p-gorpc service which provides the internal -// peer API for the PinTracker component. -type PinTrackerRPCAPI struct { - tracker PinTracker -} - -// IPFSConnectorRPCAPI is a go-libp2p-gorpc service which provides the -// internal peer API for the IPFSConnector component. -type IPFSConnectorRPCAPI struct { - ipfs IPFSConnector -} - -// ConsensusRPCAPI is a go-libp2p-gorpc service which provides the -// internal peer API for the Consensus component. -type ConsensusRPCAPI struct { - cons Consensus -} - -// PeerMonitorRPCAPI is a go-libp2p-gorpc service which provides the -// internal peer API for the PeerMonitor component. -type PeerMonitorRPCAPI struct { - mon PeerMonitor - pid peer.ID -} - -/* - Cluster component methods -*/ - -// ID runs Cluster.ID() -func (rpcapi *ClusterRPCAPI) ID(ctx context.Context, in struct{}, out *api.ID) error { - id := rpcapi.c.ID(ctx) - *out = id - return nil -} - -// IDStream runs Cluster.ID() but in streaming form. -func (rpcapi *ClusterRPCAPI) IDStream(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error { - defer close(out) - id := rpcapi.c.ID(ctx) - select { - case <-ctx.Done(): - return ctx.Err() - case out <- id: - } - return nil -} - -// Pin runs Cluster.pin(). -func (rpcapi *ClusterRPCAPI) Pin(ctx context.Context, in api.Pin, out *api.Pin) error { - // we do not call the Pin method directly since that method does not - // allow to pin other than regular DataType pins. The adder will - // however send Meta, Shard and ClusterDAG pins. - pin, _, err := rpcapi.c.pin(ctx, in, []peer.ID{}) - if err != nil { - return err - } - *out = pin - return nil -} - -// Unpin runs Cluster.Unpin(). -func (rpcapi *ClusterRPCAPI) Unpin(ctx context.Context, in api.Pin, out *api.Pin) error { - pin, err := rpcapi.c.Unpin(ctx, in.Cid) - if err != nil { - return err - } - *out = pin - return nil -} - -// PinPath resolves path into a cid and runs Cluster.Pin(). -func (rpcapi *ClusterRPCAPI) PinPath(ctx context.Context, in api.PinPath, out *api.Pin) error { - pin, err := rpcapi.c.PinPath(ctx, in.Path, in.PinOptions) - if err != nil { - return err - } - *out = pin - return nil -} - -// UnpinPath resolves path into a cid and runs Cluster.Unpin(). -func (rpcapi *ClusterRPCAPI) UnpinPath(ctx context.Context, in api.PinPath, out *api.Pin) error { - pin, err := rpcapi.c.UnpinPath(ctx, in.Path) - if err != nil { - return err - } - *out = pin - return nil -} - -// Pins runs Cluster.Pins(). -func (rpcapi *ClusterRPCAPI) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error { - return rpcapi.c.Pins(ctx, out) -} - -// PinGet runs Cluster.PinGet(). -func (rpcapi *ClusterRPCAPI) PinGet(ctx context.Context, in api.Cid, out *api.Pin) error { - pin, err := rpcapi.c.PinGet(ctx, in) - if err != nil { - return err - } - *out = pin - return nil -} - -// Version runs Cluster.Version(). -func (rpcapi *ClusterRPCAPI) Version(ctx context.Context, in struct{}, out *api.Version) error { - *out = api.Version{ - Version: rpcapi.c.Version(), - } - return nil -} - -// Peers runs Cluster.Peers(). -func (rpcapi *ClusterRPCAPI) Peers(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error { - rpcapi.c.Peers(ctx, out) - return nil -} - -// PeersWithFilter runs Cluster.peersWithFilter(). -func (rpcapi *ClusterRPCAPI) PeersWithFilter(ctx context.Context, in <-chan []peer.ID, out chan<- api.ID) error { - peers := <-in - rpcapi.c.peersWithFilter(ctx, peers, out) - return nil -} - -// PeerAdd runs Cluster.PeerAdd(). -func (rpcapi *ClusterRPCAPI) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error { - id, err := rpcapi.c.PeerAdd(ctx, in) - if err != nil { - return err - } - *out = *id - return nil -} - -// ConnectGraph runs Cluster.GetConnectGraph(). -func (rpcapi *ClusterRPCAPI) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error { - graph, err := rpcapi.c.ConnectGraph() - if err != nil { - return err - } - *out = graph - return nil -} - -// PeerRemove runs Cluster.PeerRm(). -func (rpcapi *ClusterRPCAPI) PeerRemove(ctx context.Context, in peer.ID, out *struct{}) error { - return rpcapi.c.PeerRemove(ctx, in) -} - -// Join runs Cluster.Join(). -func (rpcapi *ClusterRPCAPI) Join(ctx context.Context, in api.Multiaddr, out *struct{}) error { - return rpcapi.c.Join(ctx, in.Value()) -} - -// StatusAll runs Cluster.StatusAll(). -func (rpcapi *ClusterRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error { - filter := <-in - return rpcapi.c.StatusAll(ctx, filter, out) -} - -// StatusAllLocal runs Cluster.StatusAllLocal(). -func (rpcapi *ClusterRPCAPI) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error { - filter := <-in - return rpcapi.c.StatusAllLocal(ctx, filter, out) -} - -// Status runs Cluster.Status(). -func (rpcapi *ClusterRPCAPI) Status(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error { - pinfo, err := rpcapi.c.Status(ctx, in) - if err != nil { - return err - } - *out = pinfo - return nil -} - -// StatusLocal runs Cluster.StatusLocal(). -func (rpcapi *ClusterRPCAPI) StatusLocal(ctx context.Context, in api.Cid, out *api.PinInfo) error { - pinfo := rpcapi.c.StatusLocal(ctx, in) - *out = pinfo - return nil -} - -// RecoverAll runs Cluster.RecoverAll(). -func (rpcapi *ClusterRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error { - return rpcapi.c.RecoverAll(ctx, out) -} - -// RecoverAllLocal runs Cluster.RecoverAllLocal(). -func (rpcapi *ClusterRPCAPI) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error { - return rpcapi.c.RecoverAllLocal(ctx, out) -} - -// Recover runs Cluster.Recover(). -func (rpcapi *ClusterRPCAPI) Recover(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error { - pinfo, err := rpcapi.c.Recover(ctx, in) - if err != nil { - return err - } - *out = pinfo - return nil -} - -// RecoverLocal runs Cluster.RecoverLocal(). -func (rpcapi *ClusterRPCAPI) RecoverLocal(ctx context.Context, in api.Cid, out *api.PinInfo) error { - pinfo, err := rpcapi.c.RecoverLocal(ctx, in) - if err != nil { - return err - } - *out = pinfo - return nil -} - -// BlockAllocate returns allocations for blocks. This is used in the adders. -// It's different from pin allocations when ReplicationFactor < 0. -func (rpcapi *ClusterRPCAPI) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error { - if rpcapi.c.config.FollowerMode { - return errFollowerMode - } - - // Allocating for a existing pin. Usually the adder calls this with - // cid.Undef. - existing, err := rpcapi.c.PinGet(ctx, in.Cid) - if err != nil && err != state.ErrNotFound { - return err - } - - in, err = rpcapi.c.setupPin(ctx, in, existing) - if err != nil { - return err - } - - // Return the current peer list. - if in.ReplicationFactorMin < 0 { - // Returned metrics are Valid and belong to current - // Cluster peers. - metrics := rpcapi.c.monitor.LatestMetrics(ctx, pingMetricName) - peers := make([]peer.ID, len(metrics)) - for i, m := range metrics { - peers[i] = m.Peer - } - - *out = peers - return nil - } - - allocs, err := rpcapi.c.allocate( - ctx, - in.Cid, - existing, - in.ReplicationFactorMin, - in.ReplicationFactorMax, - []peer.ID{}, // blacklist - in.UserAllocations, // prio list - ) - - if err != nil { - return err - } - - *out = allocs - return nil -} - -// RepoGC performs garbage collection sweep on all peers' repos. -func (rpcapi *ClusterRPCAPI) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error { - res, err := rpcapi.c.RepoGC(ctx) - if err != nil { - return err - } - *out = res - return nil -} - -// RepoGCLocal performs garbage collection sweep only on the local peer's IPFS daemon. -func (rpcapi *ClusterRPCAPI) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error { - res, err := rpcapi.c.RepoGCLocal(ctx) - if err != nil { - return err - } - *out = res - return nil -} - -// SendInformerMetrics runs Cluster.sendInformerMetric(). -func (rpcapi *ClusterRPCAPI) SendInformerMetrics(ctx context.Context, in struct{}, out *struct{}) error { - return rpcapi.c.sendInformersMetrics(ctx) -} - -// SendInformersMetrics runs Cluster.sendInformerMetric() on all informers. -func (rpcapi *ClusterRPCAPI) SendInformersMetrics(ctx context.Context, in struct{}, out *struct{}) error { - return rpcapi.c.sendInformersMetrics(ctx) -} - -// Alerts runs Cluster.Alerts(). -func (rpcapi *ClusterRPCAPI) Alerts(ctx context.Context, in struct{}, out *[]api.Alert) error { - alerts := rpcapi.c.Alerts() - *out = alerts - return nil -} - -// IPFSID returns the current cached IPFS ID for a peer. -func (rpcapi *ClusterRPCAPI) IPFSID(ctx context.Context, in peer.ID, out *api.IPFSID) error { - if in == "" { - in = rpcapi.c.host.ID() - } - pingVal := pingValueFromMetric(rpcapi.c.monitor.LatestForPeer(ctx, pingMetricName, in)) - i := api.IPFSID{ - ID: pingVal.IPFSID, - Addresses: pingVal.IPFSAddresses, - } - *out = i - return nil -} - -/* - Tracker component methods -*/ - -// Track runs PinTracker.Track(). -func (rpcapi *PinTrackerRPCAPI) Track(ctx context.Context, in api.Pin, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/tracker/Track") - defer span.End() - return rpcapi.tracker.Track(ctx, in) -} - -// Untrack runs PinTracker.Untrack(). -func (rpcapi *PinTrackerRPCAPI) Untrack(ctx context.Context, in api.Pin, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/tracker/Untrack") - defer span.End() - return rpcapi.tracker.Untrack(ctx, in.Cid) -} - -// StatusAll runs PinTracker.StatusAll(). -func (rpcapi *PinTrackerRPCAPI) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error { - ctx, span := trace.StartSpan(ctx, "rpc/tracker/StatusAll") - defer span.End() - - select { - case <-ctx.Done(): - close(out) - return ctx.Err() - case filter := <-in: - return rpcapi.tracker.StatusAll(ctx, filter, out) - } -} - -// Status runs PinTracker.Status(). -func (rpcapi *PinTrackerRPCAPI) Status(ctx context.Context, in api.Cid, out *api.PinInfo) error { - ctx, span := trace.StartSpan(ctx, "rpc/tracker/Status") - defer span.End() - pinfo := rpcapi.tracker.Status(ctx, in) - *out = pinfo - return nil -} - -// RecoverAll runs PinTracker.RecoverAll().f -func (rpcapi *PinTrackerRPCAPI) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error { - ctx, span := trace.StartSpan(ctx, "rpc/tracker/RecoverAll") - defer span.End() - return rpcapi.tracker.RecoverAll(ctx, out) -} - -// Recover runs PinTracker.Recover(). -func (rpcapi *PinTrackerRPCAPI) Recover(ctx context.Context, in api.Cid, out *api.PinInfo) error { - ctx, span := trace.StartSpan(ctx, "rpc/tracker/Recover") - defer span.End() - pinfo, err := rpcapi.tracker.Recover(ctx, in) - *out = pinfo - return err -} - -// PinQueueSize runs PinTracker.PinQueueSize(). -func (rpcapi *PinTrackerRPCAPI) PinQueueSize(ctx context.Context, in struct{}, out *int64) error { - size, err := rpcapi.tracker.PinQueueSize(ctx) - *out = size - return err -} - -/* - IPFS Connector component methods -*/ - -// Pin runs IPFSConnector.Pin(). -func (rpcapi *IPFSConnectorRPCAPI) Pin(ctx context.Context, in api.Pin, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/ipfsconn/IPFSPin") - defer span.End() - return rpcapi.ipfs.Pin(ctx, in) -} - -// Unpin runs IPFSConnector.Unpin(). -func (rpcapi *IPFSConnectorRPCAPI) Unpin(ctx context.Context, in api.Pin, out *struct{}) error { - return rpcapi.ipfs.Unpin(ctx, in.Cid) -} - -// PinLsCid runs IPFSConnector.PinLsCid(). -func (rpcapi *IPFSConnectorRPCAPI) PinLsCid(ctx context.Context, in api.Pin, out *api.IPFSPinStatus) error { - b, err := rpcapi.ipfs.PinLsCid(ctx, in) - if err != nil { - return err - } - *out = b - return nil -} - -// PinLs runs IPFSConnector.PinLs(). -func (rpcapi *IPFSConnectorRPCAPI) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error { - select { - case <-ctx.Done(): - close(out) - return ctx.Err() - case pinTypes, ok := <-in: - if !ok { - close(out) - return errors.New("no pinType provided for pin/ls") - } - return rpcapi.ipfs.PinLs(ctx, pinTypes, out) - } -} - -// ConfigKey runs IPFSConnector.ConfigKey(). -func (rpcapi *IPFSConnectorRPCAPI) ConfigKey(ctx context.Context, in string, out *interface{}) error { - res, err := rpcapi.ipfs.ConfigKey(in) - if err != nil { - return err - } - *out = res - return nil -} - -// RepoStat runs IPFSConnector.RepoStat(). -func (rpcapi *IPFSConnectorRPCAPI) RepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error { - res, err := rpcapi.ipfs.RepoStat(ctx) - if err != nil { - return err - } - *out = res - return err -} - -// SwarmPeers runs IPFSConnector.SwarmPeers(). -func (rpcapi *IPFSConnectorRPCAPI) SwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error { - res, err := rpcapi.ipfs.SwarmPeers(ctx) - if err != nil { - return err - } - *out = res - return nil -} - -// BlockStream runs IPFSConnector.BlockStream(). -func (rpcapi *IPFSConnectorRPCAPI) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error { - defer close(out) // very important to do at the end - return rpcapi.ipfs.BlockStream(ctx, in) -} - -// BlockGet runs IPFSConnector.BlockGet(). -func (rpcapi *IPFSConnectorRPCAPI) BlockGet(ctx context.Context, in api.Cid, out *[]byte) error { - res, err := rpcapi.ipfs.BlockGet(ctx, in) - if err != nil { - return err - } - *out = res - return nil -} - -// Resolve runs IPFSConnector.Resolve(). -func (rpcapi *IPFSConnectorRPCAPI) Resolve(ctx context.Context, in string, out *api.Cid) error { - c, err := rpcapi.ipfs.Resolve(ctx, in) - if err != nil { - return err - } - *out = c - return nil -} - -/* - Consensus component methods -*/ - -// LogPin runs Consensus.LogPin(). -func (rpcapi *ConsensusRPCAPI) LogPin(ctx context.Context, in api.Pin, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/consensus/LogPin") - defer span.End() - return rpcapi.cons.LogPin(ctx, in) -} - -// LogUnpin runs Consensus.LogUnpin(). -func (rpcapi *ConsensusRPCAPI) LogUnpin(ctx context.Context, in api.Pin, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/consensus/LogUnpin") - defer span.End() - return rpcapi.cons.LogUnpin(ctx, in) -} - -// AddPeer runs Consensus.AddPeer(). -func (rpcapi *ConsensusRPCAPI) AddPeer(ctx context.Context, in peer.ID, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/consensus/AddPeer") - defer span.End() - return rpcapi.cons.AddPeer(ctx, in) -} - -// RmPeer runs Consensus.RmPeer(). -func (rpcapi *ConsensusRPCAPI) RmPeer(ctx context.Context, in peer.ID, out *struct{}) error { - ctx, span := trace.StartSpan(ctx, "rpc/consensus/RmPeer") - defer span.End() - return rpcapi.cons.RmPeer(ctx, in) -} - -// Peers runs Consensus.Peers(). -func (rpcapi *ConsensusRPCAPI) Peers(ctx context.Context, in struct{}, out *[]peer.ID) error { - peers, err := rpcapi.cons.Peers(ctx) - if err != nil { - return err - } - *out = peers - return nil -} - -/* - PeerMonitor -*/ - -// LatestMetrics runs PeerMonitor.LatestMetrics(). -func (rpcapi *PeerMonitorRPCAPI) LatestMetrics(ctx context.Context, in string, out *[]api.Metric) error { - *out = rpcapi.mon.LatestMetrics(ctx, in) - return nil -} - -// MetricNames runs PeerMonitor.MetricNames(). -func (rpcapi *PeerMonitorRPCAPI) MetricNames(ctx context.Context, in struct{}, out *[]string) error { - *out = rpcapi.mon.MetricNames(ctx) - return nil -} diff --git a/packages/networking/ipfs-cluster/rpc_policy.go b/packages/networking/ipfs-cluster/rpc_policy.go deleted file mode 100644 index cb93594..0000000 --- a/packages/networking/ipfs-cluster/rpc_policy.go +++ /dev/null @@ -1,72 +0,0 @@ -package ipfscluster - -// This file can be generated with rpcutil/policygen. - -// DefaultRPCPolicy associates all rpc endpoints offered by cluster peers to an -// endpoint type. See rpcutil/policygen.go as a quick way to generate this -// without missing any endpoint. -var DefaultRPCPolicy = map[string]RPCEndpointType{ - // Cluster methods - "Cluster.Alerts": RPCClosed, - "Cluster.BlockAllocate": RPCClosed, - "Cluster.ConnectGraph": RPCClosed, - "Cluster.ID": RPCOpen, - "Cluster.IDStream": RPCOpen, - "Cluster.IPFSID": RPCClosed, - "Cluster.Join": RPCClosed, - "Cluster.PeerAdd": RPCOpen, // Used by Join() - "Cluster.PeerRemove": RPCTrusted, - "Cluster.Peers": RPCTrusted, // Used by ConnectGraph() - "Cluster.PeersWithFilter": RPCClosed, - "Cluster.Pin": RPCClosed, - "Cluster.PinGet": RPCClosed, - "Cluster.PinPath": RPCClosed, - "Cluster.Pins": RPCClosed, // Used in stateless tracker, ipfsproxy, restapi - "Cluster.Recover": RPCClosed, - "Cluster.RecoverAll": RPCClosed, - "Cluster.RecoverAllLocal": RPCTrusted, - "Cluster.RecoverLocal": RPCTrusted, - "Cluster.RepoGC": RPCClosed, - "Cluster.RepoGCLocal": RPCTrusted, - "Cluster.SendInformerMetrics": RPCClosed, - "Cluster.SendInformersMetrics": RPCClosed, - "Cluster.Status": RPCClosed, - "Cluster.StatusAll": RPCClosed, - "Cluster.StatusAllLocal": RPCClosed, - "Cluster.StatusLocal": RPCClosed, - "Cluster.Unpin": RPCClosed, - "Cluster.UnpinPath": RPCClosed, - "Cluster.Version": RPCOpen, - - // PinTracker methods - "PinTracker.PinQueueSize": RPCClosed, - "PinTracker.Recover": RPCTrusted, // Called in broadcast from Recover() - "PinTracker.RecoverAll": RPCClosed, // Broadcast in RecoverAll unimplemented - "PinTracker.Status": RPCTrusted, - "PinTracker.StatusAll": RPCTrusted, - "PinTracker.Track": RPCClosed, - "PinTracker.Untrack": RPCClosed, - - // IPFSConnector methods - "IPFSConnector.BlockGet": RPCClosed, - "IPFSConnector.BlockStream": RPCTrusted, // Called by adders - "IPFSConnector.ConfigKey": RPCClosed, - "IPFSConnector.Pin": RPCClosed, - "IPFSConnector.PinLs": RPCClosed, - "IPFSConnector.PinLsCid": RPCClosed, - "IPFSConnector.RepoStat": RPCTrusted, // Called in broadcast from proxy/repo/stat - "IPFSConnector.Resolve": RPCClosed, - "IPFSConnector.SwarmPeers": RPCTrusted, // Called in ConnectGraph - "IPFSConnector.Unpin": RPCClosed, - - // Consensus methods - "Consensus.AddPeer": RPCTrusted, // Called by Raft/redirect to leader - "Consensus.LogPin": RPCTrusted, // Called by Raft/redirect to leader - "Consensus.LogUnpin": RPCTrusted, // Called by Raft/redirect to leader - "Consensus.Peers": RPCClosed, - "Consensus.RmPeer": RPCTrusted, // Called by Raft/redirect to leader - - // PeerMonitor methods - "PeerMonitor.LatestMetrics": RPCClosed, - "PeerMonitor.MetricNames": RPCClosed, -} diff --git a/packages/networking/ipfs-cluster/rpcutil/policygen/policygen.go b/packages/networking/ipfs-cluster/rpcutil/policygen/policygen.go deleted file mode 100644 index 6f0944d..0000000 --- a/packages/networking/ipfs-cluster/rpcutil/policygen/policygen.go +++ /dev/null @@ -1,101 +0,0 @@ -package main - -import ( - "fmt" - "go/format" - "os" - "reflect" - "strings" - - cluster "github.com/ipfs-cluster/ipfs-cluster" -) - -func rpcTypeStr(t cluster.RPCEndpointType) string { - switch t { - case cluster.RPCClosed: - return "RPCClosed" - case cluster.RPCTrusted: - return "RPCTrusted" - case cluster.RPCOpen: - return "RPCOpen" - default: - return "ERROR" - } -} - -var comments = map[string]string{ - "Cluster.PeerAdd": "Used by Join()", - "Cluster.Peers": "Used by ConnectGraph()", - "Cluster.Pins": "Used in stateless tracker, ipfsproxy, restapi", - "PinTracker.Recover": "Called in broadcast from Recover()", - "PinTracker.RecoverAll": "Broadcast in RecoverAll unimplemented", - "Pintracker.Status": "Called in broadcast from Status()", - "Pintracker.StatusAll": "Called in broadcast from StatusAll()", - "IPFSConnector.BlockStream": "Called by adders", - "IPFSConnector.RepoStat": "Called in broadcast from proxy/repo/stat", - "IPFSConnector.SwarmPeers": "Called in ConnectGraph", - "Consensus.AddPeer": "Called by Raft/redirect to leader", - "Consensus.LogPin": "Called by Raft/redirect to leader", - "Consensus.LogUnpin": "Called by Raft/redirect to leader", - "Consensus.RmPeer": "Called by Raft/redirect to leader", -} - -func main() { - rpcComponents := []interface{}{ - &cluster.ClusterRPCAPI{}, - &cluster.PinTrackerRPCAPI{}, - &cluster.IPFSConnectorRPCAPI{}, - &cluster.ConsensusRPCAPI{}, - &cluster.PeerMonitorRPCAPI{}, - } - - fmt.Fprintln(os.Stderr, ` -// The below generated policy keeps the endpoint types -// from the existing one, marking new endpoints as NEW. Redirect stdout -// into ../../rpc_policy.go and set the NEW endpoints to their correct -// type (make sure you have recompiled this binary with the current version -// of the code). If you are redirecting already, and things went fine, you -// should only see this message. -============================================================================`) - fmt.Fprintln(os.Stderr) - - var rpcPolicyDotGo strings.Builder - - rpcPolicyDotGo.WriteString("package ipfscluster\n\n") - rpcPolicyDotGo.WriteString("// This file can be generated with rpcutil/policygen.\n\n") - rpcPolicyDotGo.WriteString(` -// DefaultRPCPolicy associates all rpc endpoints offered by cluster peers to an -// endpoint type. See rpcutil/policygen.go as a quick way to generate this -// without missing any endpoint.`) - rpcPolicyDotGo.WriteString("\nvar DefaultRPCPolicy = map[string]RPCEndpointType{\n") - - for _, c := range rpcComponents { - t := reflect.TypeOf(c) - - rpcPolicyDotGo.WriteString("// " + cluster.RPCServiceID(c) + " methods\n") - for i := 0; i < t.NumMethod(); i++ { - method := t.Method(i) - name := cluster.RPCServiceID(c) + "." + method.Name - rpcT, ok := cluster.DefaultRPCPolicy[name] - rpcTStr := "NEW" - if ok { - rpcTStr = rpcTypeStr(rpcT) - } - comment, ok := comments[name] - if ok { - comment = "// " + comment - } - - fmt.Fprintf(&rpcPolicyDotGo, "\"%s\": %s, %s\n", name, rpcTStr, comment) - } - rpcPolicyDotGo.WriteString("\n") - } - - rpcPolicyDotGo.WriteString("}\n") - src, err := format.Source([]byte(rpcPolicyDotGo.String())) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - fmt.Println(string(src)) -} diff --git a/packages/networking/ipfs-cluster/rpcutil/rpcutil.go b/packages/networking/ipfs-cluster/rpcutil/rpcutil.go deleted file mode 100644 index dababc1..0000000 --- a/packages/networking/ipfs-cluster/rpcutil/rpcutil.go +++ /dev/null @@ -1,162 +0,0 @@ -// Package rpcutil provides utility methods to perform go-libp2p-gorpc calls, -// particularly gorpc.MultiCall(). -package rpcutil - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -// CtxsWithTimeout returns n contexts, derived from the given parent -// using the given timeout. -func CtxsWithTimeout( - parent context.Context, - n int, - timeout time.Duration, -) ([]context.Context, []context.CancelFunc) { - - ctxs := make([]context.Context, n) - cancels := make([]context.CancelFunc, n) - for i := 0; i < n; i++ { - ctx, cancel := context.WithTimeout(parent, timeout) - ctxs[i] = ctx - cancels[i] = cancel - } - return ctxs, cancels -} - -// CtxsWithCancel returns n cancellable contexts, derived from the given parent. -func CtxsWithCancel( - parent context.Context, - n int, -) ([]context.Context, []context.CancelFunc) { - - ctxs := make([]context.Context, n) - cancels := make([]context.CancelFunc, n) - for i := 0; i < n; i++ { - ctx, cancel := context.WithCancel(parent) - ctxs[i] = ctx - cancels[i] = cancel - } - return ctxs, cancels -} - -// MultiCancel calls all the provided CancelFuncs. It -// is useful with "defer Multicancel()" -func MultiCancel(cancels []context.CancelFunc) { - for _, cancel := range cancels { - cancel() - } -} - -// The copy functions below are used in calls to Cluster.multiRPC() - -// // CopyPIDsToIfaces converts a peer.ID slice to an empty interface -// // slice using pointers to each elements of the original slice. -// // Useful to handle gorpc.MultiCall() replies. -// func CopyPIDsToIfaces(in []peer.ID) []interface{} { -// ifaces := make([]interface{}, len(in)) -// for i := range in { -// ifaces[i] = &in[i] -// } -// return ifaces -// } - -// CopyIDsToIfaces converts an api.ID slice to an empty interface -// slice using pointers to each elements of the original slice. -// Useful to handle gorpc.MultiCall() replies. -func CopyIDsToIfaces(in []api.ID) []interface{} { - ifaces := make([]interface{}, len(in)) - for i := range in { - in[i] = api.ID{} - ifaces[i] = &(in[i]) - } - return ifaces -} - -// CopyIDSliceToIfaces converts an api.ID slice of slices -// to an empty interface slice using pointers to each elements of the -// original slice. Useful to handle gorpc.MultiCall() replies. -func CopyIDSliceToIfaces(in [][]api.ID) []interface{} { - ifaces := make([]interface{}, len(in)) - for i := range in { - ifaces[i] = &(in[i]) - } - return ifaces -} - -// CopyPinInfoToIfaces converts an api.PinInfo slice to -// an empty interface slice using pointers to each elements of -// the original slice. Useful to handle gorpc.MultiCall() replies. -func CopyPinInfoToIfaces(in []api.PinInfo) []interface{} { - ifaces := make([]interface{}, len(in)) - for i := range in { - in[i] = api.PinInfo{} - ifaces[i] = &(in[i]) - } - return ifaces -} - -// CopyPinInfoSliceToIfaces converts an api.PinInfo slice of slices -// to an empty interface slice using pointers to each elements of the original -// slice. Useful to handle gorpc.MultiCall() replies. -func CopyPinInfoSliceToIfaces(in [][]api.PinInfo) []interface{} { - ifaces := make([]interface{}, len(in)) - for i := range in { - ifaces[i] = &(in[i]) - } - return ifaces -} - -// CopyRepoGCSliceToIfaces converts an api.RepoGC slice to -// an empty interface slice using pointers to each elements of -// the original slice. Useful to handle gorpc.MultiCall() replies. -func CopyRepoGCSliceToIfaces(in []api.RepoGC) []interface{} { - ifaces := make([]interface{}, len(in)) - for i := range in { - in[i] = api.RepoGC{} - ifaces[i] = &(in[i]) - } - return ifaces -} - -// CopyEmptyStructToIfaces converts an empty struct slice to an empty interface -// slice using pointers to each elements of the original slice. -// Useful to handle gorpc.MultiCall() replies. -func CopyEmptyStructToIfaces(in []struct{}) []interface{} { - ifaces := make([]interface{}, len(in)) - for i := range in { - ifaces[i] = &(in[i]) - } - return ifaces -} - -// RPCDiscardReplies returns a []interface{} slice made from a []struct{} -// slice of then given length. Useful for RPC methods which have no response -// types (so they use empty structs). -func RPCDiscardReplies(n int) []interface{} { - replies := make([]struct{}, n) - return CopyEmptyStructToIfaces(replies) -} - -// CheckErrs returns nil if all the errors in a slice are nil, otherwise -// it returns a single error formed by joining the error messages existing -// in the slice with a line-break. -func CheckErrs(errs []error) error { - errMsg := "" - - for _, e := range errs { - if e != nil { - errMsg += fmt.Sprintf("%s\n", e.Error()) - } - } - - if len(errMsg) > 0 { - return errors.New(errMsg) - } - return nil -} diff --git a/packages/networking/ipfs-cluster/sharness/config/basic_auth/identity.json b/packages/networking/ipfs-cluster/sharness/config/basic_auth/identity.json deleted file mode 100644 index 9dd7207..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/basic_auth/identity.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "id": "QmdEtBsfumeH2V6dnx1fgn8zuW7XYjWdgJF4NEYpEBcTsg", - "private_key": "CAASqAkwggSkAgEAAoIBAQC/ZmfWDbwyI0nJdRxgHcTdEaBFQo8sky9E+OOvtwZa5WKoLdHyHOLWxCAdpIHUBbhxz5rkMEWLwPI6ykqLIJToMPO8lJbKVzphOjv4JwpiAPdmeSiYMKLjx5V8MpqU2rwj/Uf3sRL8Gg9/Tei3PZ8cftxN1rkQQeeaOtk0CBxUFZSHEsyut1fbgIeL7TAY+4vCmXW0DBr4wh9fnoES/YivOvSiN9rScgWg6N65LfkI78hzaOJ4Nok2S4vYFCxjTAI9NWFUbhP5eJIFzTU+bZuQZxOn2qsoyw8pNZwuF+JClA/RcgBcCvVZcDH2ueVq/zT++bGCN+EWsAEdvJqJ5bsjAgMBAAECggEAaGDUZ6t94mnUJ4UyQEh7v4OJP7wYkFqEAL0qjfzl/lPyBX1XbQ3Ltwul6AR6uMGV4JszARZCFwDWGLGRDWZrTmTDxyfRQ+9l6vfzFFVWGDQmtz+Dn9uGOWnyX5TJMDxJNec+hBmRHOKpaOd37dYxGz0jr19V9UO7piRJp1J1AHUCypUGv5x1IekioSCu5fEyc7dyWwnmITHBjD08st+bCcjrIUFeXSdJKC8SymYeXdaVE3xH3zVEISKnrfT7bhuKZY1iibZIlXbVLNpyX36LkYJOiCqsMum3u70LH0VvTypkqiDbD4S6qfJ4vvUakpmKpOPutikiP7jkSP+AkaO0AQKBgQDkTuhnDK6+Y0a/HgpHJisji0coO+g2gsIszargHk8nNY2AB8t+EUn7C+Qu8cmrem5V8EXcdxS6z7iAXpJmY1Xepnsz+JP7Q91Lgt3OoqK5EybzUXXKkmNCD65n70Xxn2fEFzm6+GJP3c/HymlDKU2KBCYIyuUeaREjT0Fu3v6tgQKBgQDWnXppJwn4LJHhzFOCeO4zomDJDbLTZCabdKZoFP9r+vtEHAnclDDKx4AYbomSqgERe+DX6HR/tPHRVizP63RYPf7al2mJmPzt1nTkoc1/q5hQoD+oE154dADsW1pUp7AQjwCtys4iq5S0qAwIDpuY8M8bOHwZ+QmBvHYAigJCowKBgQC3HH6TX/2rH463bE2MARXqXSPGJj45sigwrQfW1xhe9zm1LQtN4mn2mvP5nt1D1l82OA6gIzYSGtX8x10eF5/ggqAf78goZ6bOkHh76b8fNzgvQO97eGt5qYAVRjhP8azU/lfEGMEpE1s5/6LrRe41utwSg0C+YkBnlIKDfQDAgQKBgDoBTCF5hK9H1JHzuKpt5uubuo78ndWWnvyrNYKyEirsJddNwLiWcO2NqChyT8qNGkbQdX/Fex89F5KduPTlTYfAEc6g18xxxgK+UM+uj60vArbf6PSTb5gculcnha2VuPdwvx050Cb8uu9s7/uJfzKB+2f/B0O51ID1H+ubYWsDAoGBAKrwGKHyqFTHSPg3XuRA1FgDAoOsfzP9ZJvMEXUWyu/VxjNt+0mRlyGeZ5qb9UZG+K/In4FbC/ux2P/PucCUIbgy/XGPtPXVavMwNbx0MquAcU0FihKXP0CUpi8zwiYc42MF7n/SztQnismxigBMSuJEDurcXXazjfcSRTypduNn" -} \ No newline at end of file diff --git a/packages/networking/ipfs-cluster/sharness/config/basic_auth/service.json b/packages/networking/ipfs-cluster/sharness/config/basic_auth/service.json deleted file mode 100644 index 2bd9f07..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/basic_auth/service.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "cluster": { - "peername": "testname", - "secret": "84399cd0be811c2ca372d6ca473ffd73c09034f991c5e306fe9ada6c5fcfb641", - "leave_on_shutdown": false, - "listen_multiaddress": [ - "/ip4/0.0.0.0/tcp/9096", - "/ip6/::/tcp/9096" - ], - "state_sync_interval": "1m0s", - "replication_factor": -1, - "monitor_ping_interval": "15s" - }, - "consensus": { - "raft": { - "heartbeat_timeout": "1s", - "election_timeout": "1s", - "commit_timeout": "50ms", - "max_append_entries": 64, - "trailing_logs": 10240, - "snapshot_interval": "2m0s", - "snapshot_threshold": 8192, - "leader_lease_timeout": "500ms" - } - }, - "api": { - "ipfsproxy": { - "listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "read_timeout": "10m0s", - "read_header_timeout": "5s", - "write_timeout": "10m0s", - "idle_timeout": "1m0s" - }, - "restapi": { - "ssl_cert_file": "", - "ssl_key_file": "", - "http_listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", - "read_timeout": "30s", - "read_header_timeout": "5s", - "write_timeout": "1m0s", - "idle_timeout": "2m0s", - "basic_auth_credentials": { - "testuser": "testpass" - }, - "cors_allowed_origins": [ - "*" - ], - "cors_allowed_methods": [ - "GET" - ], - "cors_allowed_headers": [], - "cors_exposed_headers": [ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length" - ], - "cors_allow_credentials": true, - "cors_max_age": "0s" - } - }, - "ipfs_connector": { - "ipfshttp": { - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "connect_swarms_delay": "30s", - "ipfs_request_timeout": "5m0s", - "pin_timeout": "0h2m0s", - "unpin_timeout": "3h0m0s" - } - }, - "pin_tracker": { - "stateless": { - "max_pin_queue_size": 50000, - "concurrent_pins": 10 - } - }, - "monitor": { - "monbasic": { - "check_interval": "15s" - }, - "pubsubmon": { - "check_interval": "15s" - } - }, - "informer": { - "disk": { - "metric_ttl": "30s", - "metric_type": "reposize" - }, - "numpin": { - "metric_ttl": "10s" - } - } -} diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/identity.json b/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/identity.json deleted file mode 100644 index 9dd7207..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/identity.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "id": "QmdEtBsfumeH2V6dnx1fgn8zuW7XYjWdgJF4NEYpEBcTsg", - "private_key": "CAASqAkwggSkAgEAAoIBAQC/ZmfWDbwyI0nJdRxgHcTdEaBFQo8sky9E+OOvtwZa5WKoLdHyHOLWxCAdpIHUBbhxz5rkMEWLwPI6ykqLIJToMPO8lJbKVzphOjv4JwpiAPdmeSiYMKLjx5V8MpqU2rwj/Uf3sRL8Gg9/Tei3PZ8cftxN1rkQQeeaOtk0CBxUFZSHEsyut1fbgIeL7TAY+4vCmXW0DBr4wh9fnoES/YivOvSiN9rScgWg6N65LfkI78hzaOJ4Nok2S4vYFCxjTAI9NWFUbhP5eJIFzTU+bZuQZxOn2qsoyw8pNZwuF+JClA/RcgBcCvVZcDH2ueVq/zT++bGCN+EWsAEdvJqJ5bsjAgMBAAECggEAaGDUZ6t94mnUJ4UyQEh7v4OJP7wYkFqEAL0qjfzl/lPyBX1XbQ3Ltwul6AR6uMGV4JszARZCFwDWGLGRDWZrTmTDxyfRQ+9l6vfzFFVWGDQmtz+Dn9uGOWnyX5TJMDxJNec+hBmRHOKpaOd37dYxGz0jr19V9UO7piRJp1J1AHUCypUGv5x1IekioSCu5fEyc7dyWwnmITHBjD08st+bCcjrIUFeXSdJKC8SymYeXdaVE3xH3zVEISKnrfT7bhuKZY1iibZIlXbVLNpyX36LkYJOiCqsMum3u70LH0VvTypkqiDbD4S6qfJ4vvUakpmKpOPutikiP7jkSP+AkaO0AQKBgQDkTuhnDK6+Y0a/HgpHJisji0coO+g2gsIszargHk8nNY2AB8t+EUn7C+Qu8cmrem5V8EXcdxS6z7iAXpJmY1Xepnsz+JP7Q91Lgt3OoqK5EybzUXXKkmNCD65n70Xxn2fEFzm6+GJP3c/HymlDKU2KBCYIyuUeaREjT0Fu3v6tgQKBgQDWnXppJwn4LJHhzFOCeO4zomDJDbLTZCabdKZoFP9r+vtEHAnclDDKx4AYbomSqgERe+DX6HR/tPHRVizP63RYPf7al2mJmPzt1nTkoc1/q5hQoD+oE154dADsW1pUp7AQjwCtys4iq5S0qAwIDpuY8M8bOHwZ+QmBvHYAigJCowKBgQC3HH6TX/2rH463bE2MARXqXSPGJj45sigwrQfW1xhe9zm1LQtN4mn2mvP5nt1D1l82OA6gIzYSGtX8x10eF5/ggqAf78goZ6bOkHh76b8fNzgvQO97eGt5qYAVRjhP8azU/lfEGMEpE1s5/6LrRe41utwSg0C+YkBnlIKDfQDAgQKBgDoBTCF5hK9H1JHzuKpt5uubuo78ndWWnvyrNYKyEirsJddNwLiWcO2NqChyT8qNGkbQdX/Fex89F5KduPTlTYfAEc6g18xxxgK+UM+uj60vArbf6PSTb5gculcnha2VuPdwvx050Cb8uu9s7/uJfzKB+2f/B0O51ID1H+ubYWsDAoGBAKrwGKHyqFTHSPg3XuRA1FgDAoOsfzP9ZJvMEXUWyu/VxjNt+0mRlyGeZ5qb9UZG+K/In4FbC/ux2P/PucCUIbgy/XGPtPXVavMwNbx0MquAcU0FihKXP0CUpi8zwiYc42MF7n/SztQnismxigBMSuJEDurcXXazjfcSRTypduNn" -} \ No newline at end of file diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/server.crt b/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/server.crt deleted file mode 100644 index b4f82ce..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID7TCCAtWgAwIBAgIJAMqpHdKRMzMLMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD -VQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8xDzANBgNVBAcMBmdvbGRlbjEMMAoG -A1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3IgNzEMMAoGA1UEAwwDQm9iMSAwHgYJ -KoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9yZzAeFw0xNzA3MjExNjA5NTlaFw0y -NzA3MTkxNjA5NTlaMIGCMQswCQYDVQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8x -DzANBgNVBAcMBmdvbGRlbjEMMAoGA1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3Ig -NzEMMAoGA1UEAwwDQm9iMSAwHgYJKoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9y -ZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALuoP8PehGItmKPi3+8S -IV1qz8C3FiK85X/INxYLjyuzvpmDROtlkOvdmPCJrveKDZF7ECQpwIGApFbnKCCW -3zdOPQmAVzm4N8bvnzFtM9mTm8qKb9SwRi6ZLZ/qXo98t8C7CV6FaNKUkIw0lUes -ZiXEcmknrlPy3svaDQVoSOH8L38d0g4geqiNrMmZDaGe8FAYdpCoeYDIm/u0Ag9y -G3+XAbETxWhkfTyH3XcQ/Izg0wG9zFY8y/fyYwC+C7+xF75x4gbIzHAY2iFS2ua7 -GTKa2GZhOXtMuzJ6cf+TZW460Z+O+PkA1aH01WrGL7iCW/6Cn9gPRKL+IP6iyDnh -9HMCAwEAAaNkMGIwDwYDVR0RBAgwBocEfwAAATAdBgNVHQ4EFgQU9mXv8mv/LlAa -jwr8X9hzk52cBagwHwYDVR0jBBgwFoAU9mXv8mv/LlAajwr8X9hzk52cBagwDwYD -VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAIxqpKYzF6A9RlLso0lkF -nYfcyeVAvi03IBdiTNnpOe6ROa4gNwKH/JUJMCRDPzm/x78+srCmrcCCAJJTcqgi -b84vq3DegGPg2NXbn9qVUA1SdiXFelqMFwLitDn2KKizihEN4L5PEArHuDaNvLI+ -kMr+yZSALWTdtfydj211c7hTBvFqO8l5MYDXCmfoS9sqniorlNHIaBim/SNfDsi6 -8hAhvfRvk3e6dPjAPrIZYdQR5ROGewtD4F/anXgKY2BmBtWwd6gbGeMnnVi1SGRP -0UHc4O9aq9HrAOFL/72WVk/kyyPyJ/GtSaPYL1OFS12R/l0hNi+pER7xDtLOVHO2 -iw== ------END CERTIFICATE----- diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/server.key b/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/server.key deleted file mode 100644 index 28da7be..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/server.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAu6g/w96EYi2Yo+Lf7xIhXWrPwLcWIrzlf8g3FguPK7O+mYNE -62WQ692Y8Imu94oNkXsQJCnAgYCkVucoIJbfN049CYBXObg3xu+fMW0z2ZObyopv -1LBGLpktn+pej3y3wLsJXoVo0pSQjDSVR6xmJcRyaSeuU/Ley9oNBWhI4fwvfx3S -DiB6qI2syZkNoZ7wUBh2kKh5gMib+7QCD3Ibf5cBsRPFaGR9PIfddxD8jODTAb3M -VjzL9/JjAL4Lv7EXvnHiBsjMcBjaIVLa5rsZMprYZmE5e0y7Mnpx/5NlbjrRn474 -+QDVofTVasYvuIJb/oKf2A9Eov4g/qLIOeH0cwIDAQABAoIBAAOYreArG45mIU7C -wlfqmQkZSvH+kEYKKLvSMnwRrKTBxR1cDq4UPDrI/G1ftiK4Wpo3KZAH3NCejoe7 -1mEJgy2kKjdMZl+M0ETXws1Hsn6w/YNcM9h3qGCsPtuZukY1ta/T5dIR7HhcsIh/ -WX0OKMcAhNDPGeAx/2MYwrcf0IXELx0+eP1fuBllkajH14J8+ZkVrBMDhqppn8Iq -f9poVNQliJtN7VkL6lJ60HwoVNGEhFaOYphn3CR/sCc6xl+/CzV4h6c5X/RIUfDs -kjgl9mlPFuWq9S19Z+XVfLSE+sYd6LDrh0IZEx9s0OfOjucH2bUAuKNDnCq0wW70 -FzH6KoECgYEA4ZOcAMgujk8goL8nleNjuEq7d8pThAsuAy5vq9oyol8oe+p1pXHR -SHP6wHyhXeTS5g1Ej+QV6f0v9gVFS2pFqTXymc9Gxald3trcnheodZXx63YbxHm2 -H7mYWyZvq05A0qRLmmqCoSRJHUOkH2wVqgj9KsVYP1anIhdykbycansCgYEA1Pdp -uAfWt/GLZ7B0q3JPlVvusf97wBIUcoaxLHGKopvfsaFp0EY3NRxLSTaZ0NPOxTHh -W6xaIlBmKllyt6q8W609A8hrXayV1yYnVE44b5UEMhVlfRFeEdf9Sp4YdQJ8r1J0 -QA89jHCjf8VocP5pSJz5tXvWHhmaotXBthFgWGkCgYEAiy7dwenCOBKAqk5n6Wb9 -X3fVBguzzjRrtpDPXHTsax1VyGeZIXUB0bemD2CW3G1U55dmJ3ZvQwnyrtT/tZGj -280qnFa1bz6aaegW2gD082CKfWNJrMgAZMDKTeuAWW2WN6Ih9+wiH7VY25Kh0LWL -BHg5ZUuQsLwRscpP6bY7uMMCgYEAwY23hK2DJZyfEXcbIjL7R4jNMPM82nzUHp5x -6i2rTUyTitJj5Anc5SU4+2pnc5b9RtWltva22Jbvs6+mBm1jUYLqgESn5/QSHv8r -IYER47+wl4BAw+GD+H2wVB/JpJbFEWbEBvCTBM/emSKmYIOo1njsrlfFa4fjtfjG -XJ4ATXkCgYEAzeSrCCVrfPMLCmOijIYD1F7TMFthosW2JJie3bcHZMu2QEM8EIif -YzkUvMaDAXJ4VniTHkDf3ubRoUi3DwLbvJIPnoOlx3jmzz6KYiEd+uXx40Yrebb0 -V9GB2S2q1RY7wsFoCqT/mq8usQkjr3ulYMJqeIWnCTWgajXWqAHH/Mw= ------END RSA PRIVATE KEY----- diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/service.json b/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/service.json deleted file mode 100644 index eaa4707..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl-basic_auth/service.json +++ /dev/null @@ -1,93 +0,0 @@ -{ - "cluster": { - "peername": "testname", - "secret": "84399cd0be811c2ca372d6ca473ffd73c09034f991c5e306fe9ada6c5fcfb641", - "leave_on_shutdown": false, - "listen_multiaddress": [ - "/ip4/0.0.0.0/tcp/9096", - "/ip6/::/tcp/9096" - ], - "state_sync_interval": "1m0s", - "replication_factor": -1, - "monitor_ping_interval": "15s" - }, - "consensus": { - "raft": { - "heartbeat_timeout": "1s", - "election_timeout": "1s", - "commit_timeout": "50ms", - "max_append_entries": 64, - "trailing_logs": 10240, - "snapshot_interval": "2m0s", - "snapshot_threshold": 8192, - "leader_lease_timeout": "500ms" - } - }, - "api": { - "ipfsproxy": { - "listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "read_timeout": "10m0s", - "read_header_timeout": "5s", - "write_timeout": "10m0s", - "idle_timeout": "1m0s" - }, - "restapi": { - "ssl_cert_file": "server.crt", - "ssl_key_file": "server.key", - "http_listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", - "read_timeout": "30s", - "read_header_timeout": "5s", - "write_timeout": "1m0s", - "idle_timeout": "2m0s", - "basic_auth_credentials": { - "testuser": "testpass", - "userwithoutpass": "" - }, - "cors_allowed_origins": [ - "*" - ], - "cors_allowed_methods": [ - "GET" - ], - "cors_allowed_headers": [], - "cors_exposed_headers": [ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length" - ], - "cors_allow_credentials": true, - "cors_max_age": "0s" - } - }, - "ipfs_connector": { - "ipfshttp": { - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "connect_swarms_delay": "30s", - "ipfs_request_timeout": "5m0s", - "pin_timeout": "0h2m0s", - "unpin_timeout": "3h0m0s" - } - }, - "pin_tracker": { - "stateless": { - "max_pin_queue_size": 50000, - "concurrent_pins": 10 - } - }, - "monitor": { - "pubsubmon": { - "check_interval": "15s" - } - }, - "informer": { - "disk": { - "metric_ttl": "30s", - "metric_type": "reposize" - }, - "numpin": { - "metric_ttl": "10s" - } - } -} diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl/identity.json b/packages/networking/ipfs-cluster/sharness/config/ssl/identity.json deleted file mode 100644 index 9dd7207..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl/identity.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "id": "QmdEtBsfumeH2V6dnx1fgn8zuW7XYjWdgJF4NEYpEBcTsg", - "private_key": "CAASqAkwggSkAgEAAoIBAQC/ZmfWDbwyI0nJdRxgHcTdEaBFQo8sky9E+OOvtwZa5WKoLdHyHOLWxCAdpIHUBbhxz5rkMEWLwPI6ykqLIJToMPO8lJbKVzphOjv4JwpiAPdmeSiYMKLjx5V8MpqU2rwj/Uf3sRL8Gg9/Tei3PZ8cftxN1rkQQeeaOtk0CBxUFZSHEsyut1fbgIeL7TAY+4vCmXW0DBr4wh9fnoES/YivOvSiN9rScgWg6N65LfkI78hzaOJ4Nok2S4vYFCxjTAI9NWFUbhP5eJIFzTU+bZuQZxOn2qsoyw8pNZwuF+JClA/RcgBcCvVZcDH2ueVq/zT++bGCN+EWsAEdvJqJ5bsjAgMBAAECggEAaGDUZ6t94mnUJ4UyQEh7v4OJP7wYkFqEAL0qjfzl/lPyBX1XbQ3Ltwul6AR6uMGV4JszARZCFwDWGLGRDWZrTmTDxyfRQ+9l6vfzFFVWGDQmtz+Dn9uGOWnyX5TJMDxJNec+hBmRHOKpaOd37dYxGz0jr19V9UO7piRJp1J1AHUCypUGv5x1IekioSCu5fEyc7dyWwnmITHBjD08st+bCcjrIUFeXSdJKC8SymYeXdaVE3xH3zVEISKnrfT7bhuKZY1iibZIlXbVLNpyX36LkYJOiCqsMum3u70LH0VvTypkqiDbD4S6qfJ4vvUakpmKpOPutikiP7jkSP+AkaO0AQKBgQDkTuhnDK6+Y0a/HgpHJisji0coO+g2gsIszargHk8nNY2AB8t+EUn7C+Qu8cmrem5V8EXcdxS6z7iAXpJmY1Xepnsz+JP7Q91Lgt3OoqK5EybzUXXKkmNCD65n70Xxn2fEFzm6+GJP3c/HymlDKU2KBCYIyuUeaREjT0Fu3v6tgQKBgQDWnXppJwn4LJHhzFOCeO4zomDJDbLTZCabdKZoFP9r+vtEHAnclDDKx4AYbomSqgERe+DX6HR/tPHRVizP63RYPf7al2mJmPzt1nTkoc1/q5hQoD+oE154dADsW1pUp7AQjwCtys4iq5S0qAwIDpuY8M8bOHwZ+QmBvHYAigJCowKBgQC3HH6TX/2rH463bE2MARXqXSPGJj45sigwrQfW1xhe9zm1LQtN4mn2mvP5nt1D1l82OA6gIzYSGtX8x10eF5/ggqAf78goZ6bOkHh76b8fNzgvQO97eGt5qYAVRjhP8azU/lfEGMEpE1s5/6LrRe41utwSg0C+YkBnlIKDfQDAgQKBgDoBTCF5hK9H1JHzuKpt5uubuo78ndWWnvyrNYKyEirsJddNwLiWcO2NqChyT8qNGkbQdX/Fex89F5KduPTlTYfAEc6g18xxxgK+UM+uj60vArbf6PSTb5gculcnha2VuPdwvx050Cb8uu9s7/uJfzKB+2f/B0O51ID1H+ubYWsDAoGBAKrwGKHyqFTHSPg3XuRA1FgDAoOsfzP9ZJvMEXUWyu/VxjNt+0mRlyGeZ5qb9UZG+K/In4FbC/ux2P/PucCUIbgy/XGPtPXVavMwNbx0MquAcU0FihKXP0CUpi8zwiYc42MF7n/SztQnismxigBMSuJEDurcXXazjfcSRTypduNn" -} \ No newline at end of file diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl/server.crt b/packages/networking/ipfs-cluster/sharness/config/ssl/server.crt deleted file mode 100644 index b4f82ce..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl/server.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID7TCCAtWgAwIBAgIJAMqpHdKRMzMLMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD -VQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8xDzANBgNVBAcMBmdvbGRlbjEMMAoG -A1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3IgNzEMMAoGA1UEAwwDQm9iMSAwHgYJ -KoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9yZzAeFw0xNzA3MjExNjA5NTlaFw0y -NzA3MTkxNjA5NTlaMIGCMQswCQYDVQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8x -DzANBgNVBAcMBmdvbGRlbjEMMAoGA1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3Ig -NzEMMAoGA1UEAwwDQm9iMSAwHgYJKoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9y -ZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALuoP8PehGItmKPi3+8S -IV1qz8C3FiK85X/INxYLjyuzvpmDROtlkOvdmPCJrveKDZF7ECQpwIGApFbnKCCW -3zdOPQmAVzm4N8bvnzFtM9mTm8qKb9SwRi6ZLZ/qXo98t8C7CV6FaNKUkIw0lUes -ZiXEcmknrlPy3svaDQVoSOH8L38d0g4geqiNrMmZDaGe8FAYdpCoeYDIm/u0Ag9y -G3+XAbETxWhkfTyH3XcQ/Izg0wG9zFY8y/fyYwC+C7+xF75x4gbIzHAY2iFS2ua7 -GTKa2GZhOXtMuzJ6cf+TZW460Z+O+PkA1aH01WrGL7iCW/6Cn9gPRKL+IP6iyDnh -9HMCAwEAAaNkMGIwDwYDVR0RBAgwBocEfwAAATAdBgNVHQ4EFgQU9mXv8mv/LlAa -jwr8X9hzk52cBagwHwYDVR0jBBgwFoAU9mXv8mv/LlAajwr8X9hzk52cBagwDwYD -VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAIxqpKYzF6A9RlLso0lkF -nYfcyeVAvi03IBdiTNnpOe6ROa4gNwKH/JUJMCRDPzm/x78+srCmrcCCAJJTcqgi -b84vq3DegGPg2NXbn9qVUA1SdiXFelqMFwLitDn2KKizihEN4L5PEArHuDaNvLI+ -kMr+yZSALWTdtfydj211c7hTBvFqO8l5MYDXCmfoS9sqniorlNHIaBim/SNfDsi6 -8hAhvfRvk3e6dPjAPrIZYdQR5ROGewtD4F/anXgKY2BmBtWwd6gbGeMnnVi1SGRP -0UHc4O9aq9HrAOFL/72WVk/kyyPyJ/GtSaPYL1OFS12R/l0hNi+pER7xDtLOVHO2 -iw== ------END CERTIFICATE----- diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl/server.key b/packages/networking/ipfs-cluster/sharness/config/ssl/server.key deleted file mode 100644 index 28da7be..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl/server.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEAu6g/w96EYi2Yo+Lf7xIhXWrPwLcWIrzlf8g3FguPK7O+mYNE -62WQ692Y8Imu94oNkXsQJCnAgYCkVucoIJbfN049CYBXObg3xu+fMW0z2ZObyopv -1LBGLpktn+pej3y3wLsJXoVo0pSQjDSVR6xmJcRyaSeuU/Ley9oNBWhI4fwvfx3S -DiB6qI2syZkNoZ7wUBh2kKh5gMib+7QCD3Ibf5cBsRPFaGR9PIfddxD8jODTAb3M -VjzL9/JjAL4Lv7EXvnHiBsjMcBjaIVLa5rsZMprYZmE5e0y7Mnpx/5NlbjrRn474 -+QDVofTVasYvuIJb/oKf2A9Eov4g/qLIOeH0cwIDAQABAoIBAAOYreArG45mIU7C -wlfqmQkZSvH+kEYKKLvSMnwRrKTBxR1cDq4UPDrI/G1ftiK4Wpo3KZAH3NCejoe7 -1mEJgy2kKjdMZl+M0ETXws1Hsn6w/YNcM9h3qGCsPtuZukY1ta/T5dIR7HhcsIh/ -WX0OKMcAhNDPGeAx/2MYwrcf0IXELx0+eP1fuBllkajH14J8+ZkVrBMDhqppn8Iq -f9poVNQliJtN7VkL6lJ60HwoVNGEhFaOYphn3CR/sCc6xl+/CzV4h6c5X/RIUfDs -kjgl9mlPFuWq9S19Z+XVfLSE+sYd6LDrh0IZEx9s0OfOjucH2bUAuKNDnCq0wW70 -FzH6KoECgYEA4ZOcAMgujk8goL8nleNjuEq7d8pThAsuAy5vq9oyol8oe+p1pXHR -SHP6wHyhXeTS5g1Ej+QV6f0v9gVFS2pFqTXymc9Gxald3trcnheodZXx63YbxHm2 -H7mYWyZvq05A0qRLmmqCoSRJHUOkH2wVqgj9KsVYP1anIhdykbycansCgYEA1Pdp -uAfWt/GLZ7B0q3JPlVvusf97wBIUcoaxLHGKopvfsaFp0EY3NRxLSTaZ0NPOxTHh -W6xaIlBmKllyt6q8W609A8hrXayV1yYnVE44b5UEMhVlfRFeEdf9Sp4YdQJ8r1J0 -QA89jHCjf8VocP5pSJz5tXvWHhmaotXBthFgWGkCgYEAiy7dwenCOBKAqk5n6Wb9 -X3fVBguzzjRrtpDPXHTsax1VyGeZIXUB0bemD2CW3G1U55dmJ3ZvQwnyrtT/tZGj -280qnFa1bz6aaegW2gD082CKfWNJrMgAZMDKTeuAWW2WN6Ih9+wiH7VY25Kh0LWL -BHg5ZUuQsLwRscpP6bY7uMMCgYEAwY23hK2DJZyfEXcbIjL7R4jNMPM82nzUHp5x -6i2rTUyTitJj5Anc5SU4+2pnc5b9RtWltva22Jbvs6+mBm1jUYLqgESn5/QSHv8r -IYER47+wl4BAw+GD+H2wVB/JpJbFEWbEBvCTBM/emSKmYIOo1njsrlfFa4fjtfjG -XJ4ATXkCgYEAzeSrCCVrfPMLCmOijIYD1F7TMFthosW2JJie3bcHZMu2QEM8EIif -YzkUvMaDAXJ4VniTHkDf3ubRoUi3DwLbvJIPnoOlx3jmzz6KYiEd+uXx40Yrebb0 -V9GB2S2q1RY7wsFoCqT/mq8usQkjr3ulYMJqeIWnCTWgajXWqAHH/Mw= ------END RSA PRIVATE KEY----- diff --git a/packages/networking/ipfs-cluster/sharness/config/ssl/service.json b/packages/networking/ipfs-cluster/sharness/config/ssl/service.json deleted file mode 100644 index 0b1447b..0000000 --- a/packages/networking/ipfs-cluster/sharness/config/ssl/service.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "cluster": { - "id": "QmdEtBsfumeH2V6dnx1fgn8zuW7XYjWdgJF4NEYpEBcTsg", - "peername": "testname", - "private_key": "CAASqAkwggSkAgEAAoIBAQC/ZmfWDbwyI0nJdRxgHcTdEaBFQo8sky9E+OOvtwZa5WKoLdHyHOLWxCAdpIHUBbhxz5rkMEWLwPI6ykqLIJToMPO8lJbKVzphOjv4JwpiAPdmeSiYMKLjx5V8MpqU2rwj/Uf3sRL8Gg9/Tei3PZ8cftxN1rkQQeeaOtk0CBxUFZSHEsyut1fbgIeL7TAY+4vCmXW0DBr4wh9fnoES/YivOvSiN9rScgWg6N65LfkI78hzaOJ4Nok2S4vYFCxjTAI9NWFUbhP5eJIFzTU+bZuQZxOn2qsoyw8pNZwuF+JClA/RcgBcCvVZcDH2ueVq/zT++bGCN+EWsAEdvJqJ5bsjAgMBAAECggEAaGDUZ6t94mnUJ4UyQEh7v4OJP7wYkFqEAL0qjfzl/lPyBX1XbQ3Ltwul6AR6uMGV4JszARZCFwDWGLGRDWZrTmTDxyfRQ+9l6vfzFFVWGDQmtz+Dn9uGOWnyX5TJMDxJNec+hBmRHOKpaOd37dYxGz0jr19V9UO7piRJp1J1AHUCypUGv5x1IekioSCu5fEyc7dyWwnmITHBjD08st+bCcjrIUFeXSdJKC8SymYeXdaVE3xH3zVEISKnrfT7bhuKZY1iibZIlXbVLNpyX36LkYJOiCqsMum3u70LH0VvTypkqiDbD4S6qfJ4vvUakpmKpOPutikiP7jkSP+AkaO0AQKBgQDkTuhnDK6+Y0a/HgpHJisji0coO+g2gsIszargHk8nNY2AB8t+EUn7C+Qu8cmrem5V8EXcdxS6z7iAXpJmY1Xepnsz+JP7Q91Lgt3OoqK5EybzUXXKkmNCD65n70Xxn2fEFzm6+GJP3c/HymlDKU2KBCYIyuUeaREjT0Fu3v6tgQKBgQDWnXppJwn4LJHhzFOCeO4zomDJDbLTZCabdKZoFP9r+vtEHAnclDDKx4AYbomSqgERe+DX6HR/tPHRVizP63RYPf7al2mJmPzt1nTkoc1/q5hQoD+oE154dADsW1pUp7AQjwCtys4iq5S0qAwIDpuY8M8bOHwZ+QmBvHYAigJCowKBgQC3HH6TX/2rH463bE2MARXqXSPGJj45sigwrQfW1xhe9zm1LQtN4mn2mvP5nt1D1l82OA6gIzYSGtX8x10eF5/ggqAf78goZ6bOkHh76b8fNzgvQO97eGt5qYAVRjhP8azU/lfEGMEpE1s5/6LrRe41utwSg0C+YkBnlIKDfQDAgQKBgDoBTCF5hK9H1JHzuKpt5uubuo78ndWWnvyrNYKyEirsJddNwLiWcO2NqChyT8qNGkbQdX/Fex89F5KduPTlTYfAEc6g18xxxgK+UM+uj60vArbf6PSTb5gculcnha2VuPdwvx050Cb8uu9s7/uJfzKB+2f/B0O51ID1H+ubYWsDAoGBAKrwGKHyqFTHSPg3XuRA1FgDAoOsfzP9ZJvMEXUWyu/VxjNt+0mRlyGeZ5qb9UZG+K/In4FbC/ux2P/PucCUIbgy/XGPtPXVavMwNbx0MquAcU0FihKXP0CUpi8zwiYc42MF7n/SztQnismxigBMSuJEDurcXXazjfcSRTypduNn", - "secret": "84399cd0be811c2ca372d6ca473ffd73c09034f991c5e306fe9ada6c5fcfb641", - "leave_on_shutdown": false, - "listen_multiaddress": [ - "/ip4/0.0.0.0/tcp/9096", - "/ip6/::/tcp/9096" - ], - "state_sync_interval": "1m0s", - "replication_factor": -1, - "monitor_ping_interval": "15s" - }, - "consensus": { - "raft": { - "heartbeat_timeout": "1s", - "election_timeout": "1s", - "commit_timeout": "50ms", - "max_append_entries": 64, - "trailing_logs": 10240, - "snapshot_interval": "2m0s", - "snapshot_threshold": 8192, - "leader_lease_timeout": "500ms" - } - }, - "api": { - "ipfsproxy": { - "listen_multiaddress": "/ip4/127.0.0.1/tcp/9095", - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "read_timeout": "10m0s", - "read_header_timeout": "5s", - "write_timeout": "10m0s", - "idle_timeout": "1m0s" - }, - "restapi": { - "ssl_cert_file": "server.crt", - "ssl_key_file": "server.key", - "http_listen_multiaddress": "/ip4/127.0.0.1/tcp/9094", - "read_timeout": "30s", - "read_header_timeout": "5s", - "write_timeout": "1m0s", - "idle_timeout": "2m0s", - "basic_auth_credentials": null, - "cors_allowed_origins": [ - "*" - ], - "cors_allowed_methods": [ - "GET" - ], - "cors_allowed_headers": [], - "cors_exposed_headers": [ - "Content-Type", - "X-Stream-Output", - "X-Chunked-Output", - "X-Content-Length" - ], - "cors_allow_credentials": true, - "cors_max_age": "0s" - } - }, - "ipfs_connector": { - "ipfshttp": { - "node_multiaddress": "/ip4/127.0.0.1/tcp/5001", - "connect_swarms_delay": "30s", - "ipfs_request_timeout": "5m0s", - "pin_timeout": "0h2m0s", - "unpin_timeout": "3h0m0s" - } - }, - "pin_tracker": { - "stateless": { - "max_pin_queue_size": 50000, - "concurrent_pins": 10 - } - }, - "monitor": { - "pubsubmon": { - "check_interval": "15s" - } - }, - "informer": { - "disk": { - "metric_ttl": "30s", - "metric_type": "reposize" - }, - "numpin": { - "metric_ttl": "10s" - } - } -} diff --git a/packages/networking/ipfs-cluster/sharness/lib/test-lib.sh b/packages/networking/ipfs-cluster/sharness/lib/test-lib.sh deleted file mode 100755 index 4061834..0000000 --- a/packages/networking/ipfs-cluster/sharness/lib/test-lib.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash - -# Sharness test framework for ipfs-cluster -# -# We are using sharness (https://github.com/chriscool/sharness) -# which was extracted from the Git test framework. - -SHARNESS_TEST_SRCDIR="lib/sharness/test" -SHARNESS_LIB="lib/sharness/sharness.sh" - -# Daemons output will be redirected to... -IPFS_OUTPUT="/dev/null" -#IPFS_OUTPUT="/dev/stderr" # uncomment for debugging - -. "$SHARNESS_LIB" || { - echo >&2 "Cannot source: $SHARNESS_LIB" - echo >&2 "Please check Sharness installation." - exit 1 -} - -which jq >/dev/null 2>&1 -if [ $? -eq 0 ]; then - test_set_prereq JQ -fi - -# Set prereqs -test_ipfs_init() { - which docker >/dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "Docker not found" - exit 1 - fi - if docker ps --format '{{.Names}}' | egrep -q '^ipfs$'; then - echo "ipfs container already running" - else - docker run --name ipfs -d -p 127.0.0.1:5001:5001 ipfs/go-ipfs > /dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "IPFS init FAIL: Error running go-ipfs in docker." - exit 1 - fi - while ! curl -s "localhost:5001/api/v0/version" > /dev/null; do - sleep 0.2 - done - sleep 2 - fi - test_set_prereq IPFS -} - -ipfsCmd() { - docker exec -i ipfs ipfs $@ -} - -test_ipfs_running() { - if curl -s "localhost:5001/api/v0/version" > /dev/null; then - test_set_prereq IPFS - else - echo "IPFS is not running" - exit 1 - fi -} - -test_cluster_init() { - custom_config_files="$1" - consensus="$2" - if [ -z "$consensus" ]; then - consensus="crdt" - fi - - which ipfs-cluster-service >/dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "cluster init FAIL: ipfs-cluster-service not found" - exit 1 - fi - which ipfs-cluster-ctl >/dev/null 2>&1 - if [ $? -ne 0 ]; then - echo "cluster init FAIL: ipfs-cluster-ctl not found" - exit 1 - fi - ipfs-cluster-service --config "test-config" init --force --consensus "$consensus" >"$IPFS_OUTPUT" 2>&1 - if [ $? -ne 0 ]; then - echo "cluster init FAIL: error on ipfs cluster init" - exit 1 - fi - rm -rf "test-config/raft" - if [ -n "$custom_config_files" ]; then - cp -f ${custom_config_files}/* "test-config" - fi - cluster_start -} - -test_cluster_config() { - # export CLUSTER_CONFIG_PATH="test-config/service.json" - export CLUSTER_IDENTITY_PATH="test-config/identity.json" - export CLUSTER_IDENTITY_ID=`jq --raw-output ".id" $CLUSTER_IDENTITY_PATH` - export CLUSTER_IDENTITY_PK=`jq --raw-output ".private_key" $CLUSTER_IDENTITY_PATH` - [ "$CLUSTER_IDENTITY_ID" != "null" ] && [ "$CLUSTER_IDENTITY_PK" != "null" ] -} - -cluster_id() { - jq --raw-output ".id" test-config/identity.json -} - -test_confirm_v1State() { - V1_SNAP_PATH="../test_data/v1State" - V1_CRC_PATH="../test_data/v1Crc" - if [ -f $V1_SNAP_PATH ] && [ -f $V1_CRC_PATH ]; then - export V1_CRC=$(cat ../test_data/v1Crc) - cp $V1_SNAP_PATH v1State - test_set_prereq V1STATE - fi -} - -test_confirm_importState() { - IMP_STATE_PATH="../test_data/importState" - if [ -f $IMP_STATE_PATH ]; then - cp $IMP_STATE_PATH importState - test_set_prereq IMPORTSTATE - fi -} - -cluster_kill(){ - pkill -1 -f ipfs-cluster-service - while pgrep ipfs-cluster-service >/dev/null; do - sleep 0.2 - done -} - -cluster_start(){ - ipfs-cluster-service --config "test-config" daemon >"$IPFS_OUTPUT" 2>&1 & - while ! curl -s 'localhost:9095/api/v0/version' >/dev/null; do - sleep 0.2 - done - sleep 5 # wait for leader election - test_set_prereq CLUSTER -} - - -# Cleanup functions -test_clean_ipfs(){ - docker kill ipfs >/dev/null - docker rm ipfs >/dev/null - sleep 1 -} - -test_clean_cluster(){ - cluster_kill - rm -rf 'test-config' -} diff --git a/packages/networking/ipfs-cluster/sharness/run-sharness-tests.sh b/packages/networking/ipfs-cluster/sharness/run-sharness-tests.sh deleted file mode 100755 index df8405f..0000000 --- a/packages/networking/ipfs-cluster/sharness/run-sharness-tests.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Run tests -cd "$(dirname "$0")" -statuses=0 -for i in t0*.sh; -do - echo "*** $i ***" - ./$i - status=$? - statuses=$((statuses + $status)) - if [ $status -ne 0 ]; then - echo "Test $i failed" - fi -done - -# Aggregate Results -echo "Aggregating..." -for f in test-results/*.counts; do - echo "$f"; -done | bash lib/sharness/aggregate-results.sh - -# Cleanup results -rm -rf test-results - -# Exit with error if any test has failed -if [ $statuses -gt 0 ]; then - echo $statuses - exit 1 -fi -exit 0 diff --git a/packages/networking/ipfs-cluster/sharness/t0010-ctl-basic-commands.sh b/packages/networking/ipfs-cluster/sharness/t0010-ctl-basic-commands.sh deleted file mode 100755 index d4aeea6..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0010-ctl-basic-commands.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -test_description="Test ctl installation and some basic commands" - -. lib/test-lib.sh - - -test_expect_success "current dir is writeable" ' - echo "Writability check" >test.txt && - test_when_finished "rm test.txt" -' - -test_expect_success "cluster-ctl --version succeeds" ' - ipfs-cluster-ctl --version -' - -test_expect_success "cluster-ctl help commands succeed" ' - ipfs-cluster-ctl --help && - ipfs-cluster-ctl -h && - ipfs-cluster-ctl h && - ipfs-cluster-ctl help -' - -test_expect_success "cluster-ctl help has 120 char limits" ' - ipfs-cluster-ctl --help >help.txt && - test_when_finished "rm help.txt" && - LENGTH="$(cat help.txt | awk '"'"'{print length }'"'"' | sort -nr | head -n 1)" && - [ ! "$LENGTH" -gt 120 ] -' - -test_expect_success "cluster-ctl help output looks good" ' - ipfs-cluster-ctl --help | egrep -q -i "^(Usage|Commands|Global options)" -' - -test_expect_success "cluster-ctl commands output looks good" ' - ipfs-cluster-ctl commands > commands.txt && - test_when_finished "rm commands.txt" && - egrep -q "ipfs-cluster-ctl id" commands.txt && - egrep -q "ipfs-cluster-ctl peers" commands.txt && - egrep -q "ipfs-cluster-ctl pin" commands.txt && - egrep -q "ipfs-cluster-ctl status" commands.txt && - egrep -q "ipfs-cluster-ctl recover" commands.txt && - egrep -q "ipfs-cluster-ctl version" commands.txt && - egrep -q "ipfs-cluster-ctl commands" commands.txt -' - -test_expect_success "All cluster-ctl command docs are 120 columns or less" ' - export failure="0" && - ipfs-cluster-ctl commands | awk "NF" >commands.txt && - test_when_finished "rm commands.txt" && - while read cmd - do - LENGTH="$($cmd --help | awk "{ print length }" | sort -nr | head -n 1)" - [ "$LENGTH" -gt 120 ] && - { echo "$cmd" help text is longer than 119 chars "($LENGTH)"; export failure="1"; } - done expected_mode && - cid=`docker exec ipfs sh -c "echo test-pin-direct | ipfs add -q -pin=false"` && - echo "$cid direct" > expected_pin_ls && - ipfs-cluster-ctl pin add --mode direct "$cid" && - ipfs-cluster-ctl pin ls "$cid" | grep -q "PIN-DIRECT" && - docker exec ipfs sh -c "ipfs pin ls --type direct $cid" > actual_pin_ls && - ipfs-cluster-ctl --enc=json pin ls "$cid" | jq -r .mode > actual_mode && - test_cmp expected_mode actual_mode && - test_cmp expected_pin_ls actual_pin_ls -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0031-ctl-add.sh b/packages/networking/ipfs-cluster/sharness/t0031-ctl-add.sh deleted file mode 100755 index d4580e8..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0031-ctl-add.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash - -test_description="Test cluster-ctl's add functionality" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init - -test_expect_success IPFS,CLUSTER "add files locally and compare with ipfs" ' - dd if=/dev/urandom bs=1M count=20 of=bigfile.bin - dd if=/dev/urandom bs=1 count=500 of=smallfile.bin - mkdir -p testFolder/subfolder - echo "abc" > testFolder/abc.txt - cp bigfile.bin testFolder/subfolder/bigfile.bin - cp smallfile.bin testFolder/smallfile.bin - - docker cp bigfile.bin ipfs:/tmp/bigfile.bin - docker cp smallfile.bin ipfs:/tmp/smallfile.bin - docker cp testFolder ipfs:/tmp/testFolder - - ipfs-cluster-ctl add smallfile.bin > cidscluster.txt - ipfs-cluster-ctl add -w smallfile.bin >> cidscluster.txt - - ipfs-cluster-ctl add --raw-leaves -w smallfile.bin >> cidscluster.txt - ipfs-cluster-ctl add --raw-leaves smallfile.bin >> cidscluster.txt - - ipfs-cluster-ctl add bigfile.bin >> cidscluster.txt - ipfs-cluster-ctl add --layout trickle bigfile.bin >> cidscluster.txt - ipfs-cluster-ctl add -w bigfile.bin >> cidscluster.txt - ipfs-cluster-ctl add --raw-leaves -w bigfile.bin >> cidscluster.txt - ipfs-cluster-ctl add --raw-leaves bigfile.bin >> cidscluster.txt - - ipfs-cluster-ctl add -r testFolder >> cidscluster.txt - ipfs-cluster-ctl add -r -w testFolder >> cidscluster.txt - - ipfs-cluster-ctl add --cid-version 1 -r testFolder >> cidscluster.txt - ipfs-cluster-ctl add --hash sha3-512 -r testFolder >> cidscluster.txt - - ipfsCmd add /tmp/smallfile.bin > cidsipfs.txt - ipfsCmd add -w /tmp/smallfile.bin >> cidsipfs.txt - - ipfsCmd add --raw-leaves -w /tmp/smallfile.bin >> cidsipfs.txt - ipfsCmd add --raw-leaves /tmp/smallfile.bin >> cidsipfs.txt - - ipfsCmd add /tmp/bigfile.bin >> cidsipfs.txt - ipfsCmd add --trickle /tmp/bigfile.bin >> cidsipfs.txt - ipfsCmd add -w /tmp/bigfile.bin >> cidsipfs.txt - ipfsCmd add --raw-leaves -w /tmp/bigfile.bin >> cidsipfs.txt - ipfsCmd add --raw-leaves /tmp/bigfile.bin >> cidsipfs.txt - - ipfsCmd add -r /tmp/testFolder >> cidsipfs.txt - ipfsCmd add -r -w /tmp/testFolder >> cidsipfs.txt - - ipfsCmd add --cid-version 1 -r /tmp/testFolder >> cidsipfs.txt - ipfsCmd add --hash sha3-512 -r /tmp/testFolder >> cidsipfs.txt - - test_cmp cidscluster.txt cidsipfs.txt -' - -test_expect_success IPFS,CLUSTER "add CAR file" ' - mkdir testFolderCar - echo "abc" > testFolderCar/abc.txt - docker cp testFolderCar ipfs:/tmp/testFolderCar - - ipfsCmd add -Q -w -r /tmp/testFolderCar >> caripfs.txt - ipfsCmd dag export `cat caripfs.txt` > test.car - docker cp ipfs:/tmp/test.car test.car - ipfs-cluster-ctl add --format car -Q test.car >> carcluster.txt - test_cmp carcluster.txt caripfs.txt -' - -# Adding a folder with a single file is the same as adding the file -# and wrapping it. -test_expect_success IPFS,CLUSTER "check add folders" ' - mkdir testFolder2 - echo "abc" > testFolder2/abc.txt - ipfs-cluster-ctl add --quieter -w testFolder2/abc.txt > wrapped.txt - ipfs-cluster-ctl add --quieter -r testFolder2 > folder.txt - test_cmp wrapped.txt folder.txt -' - -test_expect_success IPFS,CLUSTER "check pin after locally added" ' - mkdir testFolder3 - echo "abc" > testFolder3/abc.txt - cid=`ipfs-cluster-ctl add -r --quieter testFolder3` - ipfs-cluster-ctl pin ls | grep -q -i "$cid" -' - -test_expect_success IPFS,CLUSTER "add with metadata" ' - echo "test1" > test1.txt - cid1=`ipfs-cluster-ctl add --quieter --metadata kind=text test1.txt` - echo "test2" > test2.txt - cid2=`ipfs-cluster-ctl add --quieter test2.txt` - ipfs-cluster-ctl pin ls "$cid1" | grep -q "Metadata: yes" && - ipfs-cluster-ctl --enc=json pin ls "$cid1" | jq .metadata | grep -q "\"kind\": \"text\"" && - ipfs-cluster-ctl pin ls "$cid2" | grep -q "Metadata: no" -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0032-ctl-health.sh b/packages/networking/ipfs-cluster/sharness/t0032-ctl-health.sh deleted file mode 100755 index 0c3921d..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0032-ctl-health.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -test_description="Test cluster-ctl's information monitoring functionality" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init - -test_expect_success IPFS,CLUSTER "health graph succeeds and prints as expected" ' - ipfs-cluster-ctl health graph | grep -q "C0 -> I0" -' - -test_expect_success IPFS,CLUSTER "health metrics with metric name must succeed" ' - ipfs-cluster-ctl health metrics ping && - ipfs-cluster-ctl health metrics freespace -' - -test_expect_success IPFS,CLUSTER "health metrics without metric name doesn't fail" ' - ipfs-cluster-ctl health metrics -' - -test_expect_success IPFS,CLUSTER "list latest metrics logged by this peer" ' - pid=`ipfs-cluster-ctl --enc=json id | jq -r ".id"` - ipfs-cluster-ctl health metrics freespace | grep -q -E "(^$pid \| freespace: [0-9]+ (G|M|K)B \| Expires in: [0-9]+ seconds from now)" -' - -test_expect_success IPFS,CLUSTER "alerts must succeed" ' - ipfs-cluster-ctl health alerts -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0040-ssl-simple-exchange.sh b/packages/networking/ipfs-cluster/sharness/t0040-ssl-simple-exchange.sh deleted file mode 100755 index bcc89c9..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0040-ssl-simple-exchange.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -test_description="Test service + ctl SSL interaction" - -ssl_config="`pwd`/config/ssl" - -. lib/test-lib.sh - -test_ipfs_init - -test_cluster_init "$ssl_config" -cleanup test_clean_cluster - -test_expect_success "prerequisites" ' - test_have_prereq IPFS && test_have_prereq CLUSTER -' - -test_expect_success "ssl interaction succeeds" ' - id=`cluster_id` - ipfs-cluster-ctl --https --no-check-certificate id | egrep -q "$id" -' - -test_clean_ipfs - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0041-ssl-enforcement.sh b/packages/networking/ipfs-cluster/sharness/t0041-ssl-enforcement.sh deleted file mode 100755 index ef55cc5..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0041-ssl-enforcement.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -test_description="Test failure when server not using SSL but client requests it" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init - -test_expect_success "prerequisites" ' - test_have_prereq IPFS && test_have_prereq CLUSTER -' - -test_expect_success "ssl enforced by client" ' - id=`cluster_id` - test_must_fail ipfs-cluster-ctl --https --no-check-certificate id -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0042-basic-auth.sh b/packages/networking/ipfs-cluster/sharness/t0042-basic-auth.sh deleted file mode 100755 index fb90255..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0042-basic-auth.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -test_description="Test service + ctl SSL interaction" - -config="`pwd`/config/basic_auth" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init "$config" - -test_expect_success "prerequisites" ' - test_have_prereq IPFS && test_have_prereq CLUSTER -' - -test_expect_success "BasicAuth fails without credentials" ' - id=`cluster_id` - { test_must_fail ipfs-cluster-ctl id; } | grep -A1 "401" | grep -i "unauthorized" -' - -test_expect_success "BasicAuth fails with bad credentials" ' - id=`cluster_id` - { test_must_fail ipfs-cluster-ctl --basic-auth "testuser" --force-http id; } | grep -A1 "401" | grep -i "unauthorized" && - { test_must_fail ipfs-cluster-ctl --basic-auth "testuser:badpass" --force-http id; } | grep -A1 "401" | grep -i "unauthorized" && - { test_must_fail ipfs-cluster-ctl --basic-auth "baduser:testpass" --force-http id; } | grep -A1 "401" | grep -i "unauthorized" && - { test_must_fail ipfs-cluster-ctl --basic-auth "baduser:badpass" --force-http id; } | grep -A1 "401" | grep -i "unauthorized" -' - -test_expect_success "BasicAuth over HTTP succeeds with CLI flag credentials" ' - id=`cluster_id` - ipfs-cluster-ctl --basic-auth "testuser:testpass" --force-http id | grep -q "$id" -' - -test_expect_success "BasicAuth succeeds with env var credentials" ' - id=`cluster_id` - export CLUSTER_CREDENTIALS="testuser:testpass" - ipfs-cluster-ctl --force-http id | egrep -q "$id" -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0043-ssl-basic-auth.sh b/packages/networking/ipfs-cluster/sharness/t0043-ssl-basic-auth.sh deleted file mode 100755 index 8d21e0f..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0043-ssl-basic-auth.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -test_description="Test service + ctl SSL interaction" - -config="`pwd`/config/ssl-basic_auth" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init "$config" - -test_expect_success "prerequisites" ' - test_have_prereq IPFS && test_have_prereq CLUSTER -' - -test_expect_success "ssl interaction fails with bad credentials" ' - id=`cluster_id` - { test_must_fail ipfs-cluster-ctl --no-check-certificate --basic-auth "testuser:badpass" id; } | grep -A1 "401" | grep -i "unauthorized" -' - -test_expect_success "ssl interaction succeeds" ' - id=`cluster_id` - ipfs-cluster-ctl --no-check-certificate --basic-auth "testuser:testpass" id | egrep -q "$id" -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0052-service-state-export.sh b/packages/networking/ipfs-cluster/sharness/t0052-service-state-export.sh deleted file mode 100755 index 6070037..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0052-service-state-export.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -test_description="Test service state export" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init "" crdt - -test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (crdt)" ' - cid=`docker exec ipfs sh -c "echo test_52-1 | ipfs add -q"` && - ipfs-cluster-ctl pin add "$cid" && - sleep 5 && - cluster_kill && sleep 5 && - ipfs-cluster-service --debug --config "test-config" state export -f export.json && - [ -f export.json ] && - jq -r ".cid" export.json | grep -q "$cid" -' - -cluster_kill -sleep 5 -test_cluster_init "" raft - -test_expect_success IPFS,CLUSTER,JQ "state export saves the correct state to expected file (raft)" ' - cid=`docker exec ipfs sh -c "echo test_52-2 | ipfs add -q"` && - ipfs-cluster-ctl pin add "$cid" && - sleep 5 && - cluster_kill && sleep 5 && - ipfs-cluster-service --debug --config "test-config" state export -f export.json && - [ -f export.json ] && - jq -r ".cid" export.json | grep -q "$cid" -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0053-service-state-import.sh b/packages/networking/ipfs-cluster/sharness/t0053-service-state-import.sh deleted file mode 100755 index 68e4a72..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0053-service-state-import.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -test_description="Test service state import" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init -test_confirm_importState - -# Kill cluster daemon but keep data folder -cluster_kill - - -# WARNING: Updating the added content needs updating the importState file. - -test_expect_success IPFS,CLUSTER "state import fails on incorrect format (crdt)" ' - sleep 5 && - echo "not exactly json" > badImportFile && - test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile -' - -test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (crdt)" ' - sleep 5 - cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && - ipfs-cluster-service --config "test-config" state import -f importState && - cluster_start && - sleep 5 && - ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && - ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" -' - -# Kill cluster daemon but keep data folder -cluster_kill -sleep 5 - -test_expect_success IPFS,CLUSTER "state import fails on incorrect format (raft)" ' - ipfs-cluster-service --config "test-config" init --force --consensus raft && - echo "not exactly json" > badImportFile && - test_expect_code 1 ipfs-cluster-service --config "test-config" state import -f badImportFile -' - -test_expect_success IPFS,CLUSTER,IMPORTSTATE "state import succeeds on correct format (raft)" ' - sleep 5 - cid=`docker exec ipfs sh -c "echo test_53 | ipfs add -q"` && - ipfs-cluster-service --config "test-config" state import -f importState && - cluster_start && - sleep 5 && - ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && - ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" -' - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/t0054-service-state-clean.sh b/packages/networking/ipfs-cluster/sharness/t0054-service-state-clean.sh deleted file mode 100755 index 90ac021..0000000 --- a/packages/networking/ipfs-cluster/sharness/t0054-service-state-clean.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -test_description="Test service state import" - -. lib/test-lib.sh - -test_ipfs_init -test_cluster_init - -test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (crdt)" ' - cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && - ipfs-cluster-ctl pin add "$cid" && sleep 5 && - ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && - ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && - [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] && - cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state cleanup -f && - cluster_start && sleep 5 && - [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] -' - -test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (crdt)" ' - cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && - ipfs-cluster-ctl pin add "$cid" && sleep 5 && - [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] && - cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state export -f import.json && - ipfs-cluster-service --config "test-config" state cleanup -f && - ipfs-cluster-service --config "test-config" state import -f import.json && - cluster_start && sleep 5 && - ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && - ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && - [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] -' - -cluster_kill -sleep 5 -test_cluster_init "" raft - -test_expect_success IPFS,CLUSTER "state cleanup refreshes state on restart (raft)" ' - cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && - ipfs-cluster-ctl pin add "$cid" && sleep 5 && - ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && - ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && - [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] && - cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state cleanup -f && - cluster_start && sleep 5 && - [ 0 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] -' - -test_expect_success IPFS,CLUSTER "export + cleanup + import == noop (raft)" ' - cid=`docker exec ipfs sh -c "echo test_54 | ipfs add -q"` && - ipfs-cluster-ctl pin add "$cid" && sleep 5 && - [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] && - cluster_kill && sleep 5 && - ipfs-cluster-service --config "test-config" state export -f import.json && - ipfs-cluster-service --config "test-config" state cleanup -f && - ipfs-cluster-service --config "test-config" state import -f import.json && - cluster_start && sleep 5 && - ipfs-cluster-ctl pin ls "$cid" | grep -q "$cid" && - ipfs-cluster-ctl status "$cid" | grep -q -i "PINNED" && - [ 1 -eq "$(ipfs-cluster-ctl --enc=json status | jq -n "[inputs] | length")" ] -' - - -test_clean_ipfs -test_clean_cluster - -test_done diff --git a/packages/networking/ipfs-cluster/sharness/test_data/importState b/packages/networking/ipfs-cluster/sharness/test_data/importState deleted file mode 100644 index 5719dd0..0000000 --- a/packages/networking/ipfs-cluster/sharness/test_data/importState +++ /dev/null @@ -1,8 +0,0 @@ -{ - "cid": "QmbrCtydGyPeHiLURSPMqrvE5mCgMCwFYq3UD4XLCeAYw6", - "name": "", - "allocations": [], - "replication_factor_min": -1, - "replication_factor_max": -1 -} - diff --git a/packages/networking/ipfs-cluster/sharness/test_data/small_file b/packages/networking/ipfs-cluster/sharness/test_data/small_file deleted file mode 100644 index acd2e9b..0000000 --- a/packages/networking/ipfs-cluster/sharness/test_data/small_file +++ /dev/null @@ -1 +0,0 @@ -small file diff --git a/packages/networking/ipfs-cluster/sharness/test_data/v1Crc b/packages/networking/ipfs-cluster/sharness/test_data/v1Crc deleted file mode 100644 index 26695be..0000000 --- a/packages/networking/ipfs-cluster/sharness/test_data/v1Crc +++ /dev/null @@ -1 +0,0 @@ -y8SrOIoXJo4= diff --git a/packages/networking/ipfs-cluster/state/dsstate/datastore.go b/packages/networking/ipfs-cluster/state/dsstate/datastore.go deleted file mode 100644 index aab985e..0000000 --- a/packages/networking/ipfs-cluster/state/dsstate/datastore.go +++ /dev/null @@ -1,365 +0,0 @@ -// Package dsstate implements the IPFS Cluster state interface using -// an underlying go-datastore. -package dsstate - -import ( - "context" - "fmt" - "io" - "sync/atomic" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/observations" - "github.com/ipfs-cluster/ipfs-cluster/state" - - ds "github.com/ipfs/go-datastore" - query "github.com/ipfs/go-datastore/query" - dshelp "github.com/ipfs/go-ipfs-ds-help" - logging "github.com/ipfs/go-log/v2" - codec "github.com/ugorji/go/codec" - - "go.opencensus.io/stats" - trace "go.opencensus.io/trace" -) - -var _ state.State = (*State)(nil) -var _ state.BatchingState = (*BatchingState)(nil) - -var logger = logging.Logger("dsstate") - -// State implements the IPFS Cluster "state" interface by wrapping -// a go-datastore and choosing how api.Pin objects are stored -// in it. It also provides serialization methods for the whole -// state which are datastore-independent. -type State struct { - dsRead ds.Read - dsWrite ds.Write - codecHandle codec.Handle - namespace ds.Key - // version int - - totalPins int64 -} - -// DefaultHandle returns the codec handler of choice (Msgpack). -func DefaultHandle() codec.Handle { - h := &codec.MsgpackHandle{} - return h -} - -// New returns a new state using the given datastore. -// -// All keys are namespaced with the given string when written. Thus the same -// go-datastore can be sharded for different uses. -// -// The Handle controls options for the serialization of the full state -// (marshaling/unmarshaling). -func New(ctx context.Context, dstore ds.Datastore, namespace string, handle codec.Handle) (*State, error) { - if handle == nil { - handle = DefaultHandle() - } - - st := &State{ - dsRead: dstore, - dsWrite: dstore, - codecHandle: handle, - namespace: ds.NewKey(namespace), - totalPins: 0, - } - - stats.Record(ctx, observations.Pins.M(0)) - - return st, nil -} - -// Add adds a new Pin or replaces an existing one. -func (st *State) Add(ctx context.Context, c api.Pin) (err error) { - _, span := trace.StartSpan(ctx, "state/dsstate/Add") - defer span.End() - - ps, err := st.serializePin(c) - if err != nil { - return - } - - has, _ := st.Has(ctx, c.Cid) - defer func() { - if !has && err == nil { - total := atomic.AddInt64(&st.totalPins, 1) - stats.Record(ctx, observations.Pins.M(total)) - } - }() - - err = st.dsWrite.Put(ctx, st.key(c.Cid), ps) - return -} - -// Rm removes an existing Pin. It is a no-op when the -// item does not exist. -func (st *State) Rm(ctx context.Context, c api.Cid) error { - _, span := trace.StartSpan(ctx, "state/dsstate/Rm") - defer span.End() - - err := st.dsWrite.Delete(ctx, st.key(c)) - if err == ds.ErrNotFound { - return nil - } - if err == nil { - total := atomic.AddInt64(&st.totalPins, -1) - stats.Record(ctx, observations.Pins.M(total)) - } - - return err -} - -// Get returns a Pin from the store and whether it -// was present. When not present, a default pin -// is returned. -func (st *State) Get(ctx context.Context, c api.Cid) (api.Pin, error) { - _, span := trace.StartSpan(ctx, "state/dsstate/Get") - defer span.End() - - v, err := st.dsRead.Get(ctx, st.key(c)) - if err != nil { - if err == ds.ErrNotFound { - return api.Pin{}, state.ErrNotFound - } - return api.Pin{}, err - } - p, err := st.deserializePin(c, v) - if err != nil { - return api.Pin{}, err - } - return p, nil -} - -// Has returns whether a Cid is stored. -func (st *State) Has(ctx context.Context, c api.Cid) (bool, error) { - _, span := trace.StartSpan(ctx, "state/dsstate/Has") - defer span.End() - - ok, err := st.dsRead.Has(ctx, st.key(c)) - if err != nil { - return false, err - } - return ok, nil -} - -// List sends all the pins on the pinset on the given channel. -// Returns and closes channel when done. -func (st *State) List(ctx context.Context, out chan<- api.Pin) error { - defer close(out) - - _, span := trace.StartSpan(ctx, "state/dsstate/List") - defer span.End() - - q := query.Query{ - Prefix: st.namespace.String(), - } - - results, err := st.dsRead.Query(ctx, q) - if err != nil { - return err - } - defer results.Close() - - var total int64 - for r := range results.Next() { - // Abort if we shutdown. - select { - case <-ctx.Done(): - err = fmt.Errorf("full pinset listing aborted: %w", ctx.Err()) - logger.Warning(err) - return err - default: - } - if r.Error != nil { - err := fmt.Errorf("error in query result: %w", r.Error) - logger.Error(err) - return err - } - k := ds.NewKey(r.Key) - ci, err := st.unkey(k) - if err != nil { - logger.Warn("bad key (ignoring). key: ", k, "error: ", err) - continue - } - - p, err := st.deserializePin(ci, r.Value) - if err != nil { - logger.Errorf("error deserializing pin (%s): %s", r.Key, err) - continue - } - out <- p - - if total > 0 && total%500000 == 0 { - logger.Infof("Full pinset listing in progress: %d pins so far", total) - } - total++ - } - if total >= 500000 { - logger.Infof("Full pinset listing finished: %d pins", total) - } - atomic.StoreInt64(&st.totalPins, total) - stats.Record(ctx, observations.Pins.M(total)) - return nil -} - -// Migrate migrates an older state version to the current one. -// This is a no-op for now. -func (st *State) Migrate(ctx context.Context, r io.Reader) error { - return nil -} - -type serialEntry struct { - Key string `codec:"k"` - Value []byte `codec:"v"` -} - -// Marshal dumps the state to a writer. It does this by encoding every -// key/value in the store. The keys are stored without the namespace part to -// reduce the size of the snapshot. -func (st *State) Marshal(w io.Writer) error { - q := query.Query{ - Prefix: st.namespace.String(), - } - - results, err := st.dsRead.Query(context.Background(), q) - if err != nil { - return err - } - defer results.Close() - - enc := codec.NewEncoder(w, st.codecHandle) - - for r := range results.Next() { - if r.Error != nil { - logger.Errorf("error in query result: %s", r.Error) - return r.Error - } - - k := ds.NewKey(r.Key) - // reduce snapshot size by not storing the prefix - err := enc.Encode(serialEntry{ - Key: k.BaseNamespace(), - Value: r.Value, - }) - if err != nil { - logger.Error(err) - return err - } - } - return nil -} - -// Unmarshal reads and parses a previous dump of the state. -// All the parsed key/values are added to the store. As of now, -// Unmarshal does not empty the existing store from any values -// before unmarshaling from the given reader. -func (st *State) Unmarshal(r io.Reader) error { - dec := codec.NewDecoder(r, st.codecHandle) - for { - var entry serialEntry - if err := dec.Decode(&entry); err == io.EOF { - break - } else if err != nil { - return err - } - k := st.namespace.Child(ds.NewKey(entry.Key)) - err := st.dsWrite.Put(context.Background(), k, entry.Value) - if err != nil { - logger.Error("error adding unmarshaled key to datastore:", err) - return err - } - } - - return nil -} - -// used to be on go-ipfs-ds-help -func cidToDsKey(c api.Cid) ds.Key { - return dshelp.NewKeyFromBinary(c.Bytes()) -} - -// used to be on go-ipfs-ds-help -func dsKeyToCid(k ds.Key) (api.Cid, error) { - kb, err := dshelp.BinaryFromDsKey(k) - if err != nil { - return api.CidUndef, err - } - c, err := api.CastCid(kb) - return c, err -} - -// convert Cid to /namespace/cid1Key -func (st *State) key(c api.Cid) ds.Key { - k := cidToDsKey(c) - return st.namespace.Child(k) -} - -// convert /namespace/cidKey to Cid -func (st *State) unkey(k ds.Key) (api.Cid, error) { - return dsKeyToCid(ds.NewKey(k.BaseNamespace())) -} - -// this decides how a Pin object is serialized to be stored in the -// datastore. Changing this may require a migration! -func (st *State) serializePin(c api.Pin) ([]byte, error) { - return c.ProtoMarshal() -} - -// this deserializes a Pin object from the datastore. It should be -// the exact opposite from serializePin. -func (st *State) deserializePin(c api.Cid, buf []byte) (api.Pin, error) { - p := api.Pin{} - err := p.ProtoUnmarshal(buf) - p.Cid = c - return p, err -} - -// BatchingState implements the IPFS Cluster "state" interface by wrapping a -// batching go-datastore. All writes are batched and only written disk -// when Commit() is called. -type BatchingState struct { - *State - batch ds.Batch -} - -// NewBatching returns a new batching statate using the given datastore. -// -// All keys are namespaced with the given string when written. Thus the same -// go-datastore can be sharded for different uses. -// -// The Handle controls options for the serialization of the full state -// (marshaling/unmarshaling). -func NewBatching(ctx context.Context, dstore ds.Batching, namespace string, handle codec.Handle) (*BatchingState, error) { - if handle == nil { - handle = DefaultHandle() - } - - batch, err := dstore.Batch(context.Background()) - if err != nil { - return nil, err - } - - st := &State{ - dsRead: dstore, - dsWrite: batch, - codecHandle: handle, - namespace: ds.NewKey(namespace), - } - - bst := &BatchingState{} - bst.State = st - bst.batch = batch - - stats.Record(ctx, observations.Pins.M(0)) - return bst, nil -} - -// Commit persists the batched write operations. -func (bst *BatchingState) Commit(ctx context.Context) error { - _, span := trace.StartSpan(ctx, "state/dsstate/Commit") - defer span.End() - return bst.batch.Commit(ctx) -} diff --git a/packages/networking/ipfs-cluster/state/dsstate/datastore_test.go b/packages/networking/ipfs-cluster/state/dsstate/datastore_test.go deleted file mode 100644 index 6c88d3a..0000000 --- a/packages/networking/ipfs-cluster/state/dsstate/datastore_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package dsstate - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var testCid1, _ = api.DecodeCid("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq") -var testPeerID1, _ = peer.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc") - -var c = api.Pin{ - Cid: testCid1, - Type: api.DataType, - Allocations: []peer.ID{testPeerID1}, - MaxDepth: -1, - PinOptions: api.PinOptions{ - ReplicationFactorMax: -1, - ReplicationFactorMin: -1, - Name: "test", - }, -} - -func newState(t *testing.T) *State { - store := inmem.New() - ds, err := New(context.Background(), store, "", DefaultHandle()) - if err != nil { - t.Fatal(err) - } - return ds -} - -func TestAdd(t *testing.T) { - ctx := context.Background() - st := newState(t) - st.Add(ctx, c) - if ok, err := st.Has(ctx, c.Cid); !ok || err != nil { - t.Error("should have added it") - } -} - -func TestRm(t *testing.T) { - ctx := context.Background() - st := newState(t) - st.Add(ctx, c) - st.Rm(ctx, c.Cid) - if ok, err := st.Has(ctx, c.Cid); ok || err != nil { - t.Error("should have removed it") - } -} - -func TestGet(t *testing.T) { - ctx := context.Background() - defer func() { - if r := recover(); r != nil { - t.Fatal("paniced") - } - }() - st := newState(t) - st.Add(ctx, c) - get, err := st.Get(ctx, c.Cid) - if err != nil { - t.Fatal(err) - } - - if get.Cid.String() != c.Cid.String() { - t.Error("bad cid decoding: ", get.Cid) - } - - if get.Allocations[0] != c.Allocations[0] { - t.Error("bad allocations decoding:", get.Allocations) - } - - if get.ReplicationFactorMax != c.ReplicationFactorMax || - get.ReplicationFactorMin != c.ReplicationFactorMin { - t.Error("bad replication factors decoding") - } -} - -func TestList(t *testing.T) { - ctx := context.Background() - defer func() { - if r := recover(); r != nil { - t.Fatal("paniced") - } - }() - st := newState(t) - st.Add(ctx, c) - out := make(chan api.Pin) - go func() { - err := st.List(ctx, out) - if err != nil { - t.Error(err) - } - }() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - var list0 api.Pin - for { - select { - case p, ok := <-out: - if !ok && !list0.Cid.Defined() { - t.Fatal("should have read list0 first") - } - if !ok { - return - } - list0 = p - if !p.Equals(c) { - t.Error("returned something different") - } - case <-ctx.Done(): - t.Error("should have read from channel") - return - } - } - -} - -func TestMarshalUnmarshal(t *testing.T) { - ctx := context.Background() - st := newState(t) - st.Add(ctx, c) - buf := new(bytes.Buffer) - err := st.Marshal(buf) - if err != nil { - t.Fatal(err) - } - st2 := newState(t) - err = st2.Unmarshal(buf) - if err != nil { - t.Fatal(err) - } - - get, err := st2.Get(ctx, c.Cid) - if err != nil { - t.Fatal(err) - } - if get.Allocations[0] != testPeerID1 { - t.Error("expected different peer id") - } - if !get.Cid.Equals(c.Cid) { - t.Error("expected different cid") - } -} diff --git a/packages/networking/ipfs-cluster/state/empty.go b/packages/networking/ipfs-cluster/state/empty.go deleted file mode 100644 index 5af5d87..0000000 --- a/packages/networking/ipfs-cluster/state/empty.go +++ /dev/null @@ -1,27 +0,0 @@ -package state - -import ( - "context" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -type empty struct{} - -func (e *empty) List(ctx context.Context, out chan<- api.Pin) error { - close(out) - return nil -} - -func (e *empty) Has(ctx context.Context, c api.Cid) (bool, error) { - return false, nil -} - -func (e *empty) Get(ctx context.Context, c api.Cid) (api.Pin, error) { - return api.Pin{}, ErrNotFound -} - -// Empty returns an empty read-only state. -func Empty() ReadOnly { - return &empty{} -} diff --git a/packages/networking/ipfs-cluster/state/interface.go b/packages/networking/ipfs-cluster/state/interface.go deleted file mode 100644 index 7cc4f05..0000000 --- a/packages/networking/ipfs-cluster/state/interface.go +++ /dev/null @@ -1,56 +0,0 @@ -// Package state holds the interface that any state implementation for -// IPFS Cluster must satisfy. -package state - -// State represents the shared state of the cluster -import ( - "context" - "errors" - "io" - - "github.com/ipfs-cluster/ipfs-cluster/api" -) - -// ErrNotFound should be returned when a pin is not part of the state. -var ErrNotFound = errors.New("pin is not part of the pinset") - -// State is a wrapper to the Cluster shared state so that Pin objects can -// be easily read, written and queried. The state can be marshaled and -// unmarshaled. Implementation should be thread-safe. -type State interface { - ReadOnly - WriteOnly - // Migrate restores the serialized format of an outdated state to the - // current version. - Migrate(ctx context.Context, r io.Reader) error - // Marshal serializes the state to a byte slice. - Marshal(io.Writer) error - // Unmarshal deserializes the state from marshaled bytes. - Unmarshal(io.Reader) error -} - -// ReadOnly represents the read side of a State. -type ReadOnly interface { - // List lists all the pins in the state. - List(context.Context, chan<- api.Pin) error - // Has returns true if the state is holding information for a Cid. - Has(context.Context, api.Cid) (bool, error) - // Get returns the information attacthed to this pin, if any. If the - // pin is not part of the state, it should return ErrNotFound. - Get(context.Context, api.Cid) (api.Pin, error) -} - -// WriteOnly represents the write side of a State. -type WriteOnly interface { - // Add adds a pin to the State - Add(context.Context, api.Pin) error - // Rm removes a pin from the State. - Rm(context.Context, api.Cid) error -} - -// BatchingState represents a state which batches write operations. -type BatchingState interface { - State - // Commit writes any batched operations. - Commit(context.Context) error -} diff --git a/packages/networking/ipfs-cluster/test/cids.go b/packages/networking/ipfs-cluster/test/cids.go deleted file mode 100644 index f1fd697..0000000 --- a/packages/networking/ipfs-cluster/test/cids.go +++ /dev/null @@ -1,59 +0,0 @@ -package test - -import ( - "github.com/ipfs-cluster/ipfs-cluster/api" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -// Common variables used all around tests. -var ( - Cid1, _ = api.DecodeCid("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq") - Cid2, _ = api.DecodeCid("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmma") - Cid3, _ = api.DecodeCid("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb") - Cid4Data = "Cid4Data" - // Cid resulting from block put using blake2b-256 and raw format - Cid4, _ = api.DecodeCid("bafk2bzaceawsyhsnrwwy5mtit2emnjfalkxsyq2p2ptd6fuliolzwwjbs42fq") - - // Cid resulting from block put using format "v0" defaults - Cid5, _ = api.DecodeCid("QmbgmXgsFjxAJ7cEaziL2NDSptHAkPwkEGMmKMpfyYeFXL") - Cid5Data = "Cid5Data" - SlowCid1, _ = api.DecodeCid("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmd") - CidResolved, _ = api.DecodeCid("zb2rhiKhUepkTMw7oFfBUnChAN7ABAvg2hXUwmTBtZ6yxuabc") - // ErrorCid is meant to be used as a Cid which causes errors. i.e. the - // ipfs mock fails when pinning this CID. - ErrorCid, _ = api.DecodeCid("QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmc") - // NotFoundCid is meant to be used as a CID that doesn't exist in the - // pinset. - NotFoundCid, _ = api.DecodeCid("bafyreiay3jpjk74dkckv2r74eyvf3lfnxujefay2rtuluintasq2zlapv4") - PeerID1, _ = peer.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc") - PeerID2, _ = peer.Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6") - PeerID3, _ = peer.Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa") - PeerID4, _ = peer.Decode("QmZ8naDy5mEz4GLuQwjWt9MPYqHTBbsm8tQBrNSjiq6zBc") - PeerID5, _ = peer.Decode("QmZVAo3wd8s5eTTy2kPYs34J9PvfxpKPuYsePPYGjgRRjg") - PeerID6, _ = peer.Decode("QmR8Vu6kZk7JvAN2rWVWgiduHatgBq2bb15Yyq8RRhYSbx") - PeerID7, _ = peer.Decode("12D3KooWGHTKzeT4KaLGLrbKKyT8zKrBPXAUBRzCAN6ZMDMo4M6M") - PeerID8, _ = peer.Decode("12D3KooWFBFCDQzAkQSwPZLV883pKdsmb6urQ3sMjfJHUxn5GCVv") - PeerID9, _ = peer.Decode("12D3KooWKuJ8LPTyHbyX4nt4C7uWmUobzFsiceTVoFw7HpmoNakM") - - PeerName1 = "TestPeer1" - PeerName2 = "TestPeer2" - PeerName3 = "TestPeer3" - PeerName4 = "TestPeer4" - PeerName5 = "TestPeer5" - PeerName6 = "TestPeer6" - - PathIPFS1 = "/ipfs/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY" - PathIPFS2 = "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif" - PathIPFS3 = "/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif/" - PathIPNS1 = "/ipns/QmbmSAQNnfGcBAB8M8AsSPxd1TY7cpT9hZ398kXAScn2Ka" - PathIPNS2 = "/ipns/QmbmSAQNnfGcBAB8M8AsSPxd1TY7cpT9hZ398kXAScn2Ka/" - PathIPLD1 = "/ipld/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY" - PathIPLD2 = "/ipld/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY/" - - // NotFoundPath is meant to be used as a path that resolves into a CID that doesn't exist in the - // pinset. - NotFoundPath = "/ipfs/bafyreiay3jpjk74dkckv2r74eyvf3lfnxujefay2rtuluintasq2zlapv4" - InvalidPath1 = "/invalidkeytype/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY/" - InvalidPath2 = "/ipfs/invalidhash" - InvalidPath3 = "/ipfs/" -) diff --git a/packages/networking/ipfs-cluster/test/ipfs_mock.go b/packages/networking/ipfs-cluster/test/ipfs_mock.go deleted file mode 100644 index bebbb2a..0000000 --- a/packages/networking/ipfs-cluster/test/ipfs_mock.go +++ /dev/null @@ -1,557 +0,0 @@ -package test - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/datastore/inmem" - "github.com/ipfs-cluster/ipfs-cluster/state" - "github.com/ipfs-cluster/ipfs-cluster/state/dsstate" - "github.com/multiformats/go-multicodec" - "github.com/multiformats/go-multihash" - - cid "github.com/ipfs/go-cid" - cors "github.com/rs/cors" -) - -// Some values used by the ipfs mock -const ( - IpfsCustomHeaderName = "X-Custom-Header" - IpfsTimeHeaderName = "X-Time-Now" - IpfsCustomHeaderValue = "42" - IpfsACAOrigin = "myorigin" - IpfsErrFromNotPinned = "'from' cid was not recursively pinned already" -) - -// IpfsMock is an ipfs daemon mock which should sustain the functionality used by ipfscluster. -type IpfsMock struct { - server *httptest.Server - Addr string - Port int - pinMap state.State - BlockStore map[string][]byte - reqCounter chan string - - reqCountsMux sync.Mutex // guards access to reqCounts - reqCounts map[string]int - - closeMux sync.Mutex - closed bool -} - -type mockPinResp struct { - Pins []string - Progress int `json:",omitempty"` -} - -type mockPinType struct { - Type string -} - -type mockPinLsAllResp struct { - Keys map[string]mockPinType -} - -type ipfsErr struct { - Code int - Message string -} - -type mockIDResp struct { - ID string - Addresses []string -} - -type mockRepoStatResp struct { - RepoSize uint64 - NumObjects uint64 - StorageMax uint64 -} - -type mockConfigResp struct { - Datastore struct { - StorageMax string - } -} - -type mockRefsResp struct { - Ref string - Err string -} - -type mockSwarmPeersResp struct { - Peers []mockIpfsPeer -} - -type mockIpfsPeer struct { - Peer string -} - -type mockBlockPutResp struct { - Key string -} - -type mockRepoGCResp struct { - Key cid.Cid `json:",omitempty"` - Error string `json:",omitempty"` -} - -// NewIpfsMock returns a new mock. -func NewIpfsMock(t *testing.T) *IpfsMock { - store := inmem.New() - st, err := dsstate.New(context.Background(), store, "", dsstate.DefaultHandle()) - if err != nil { - t.Fatal(err) - } - - m := &IpfsMock{ - pinMap: st, - BlockStore: make(map[string][]byte), - reqCounts: make(map[string]int), - reqCounter: make(chan string, 100), - } - - go m.countRequests() - - mux := http.NewServeMux() - mux.HandleFunc("/", m.handler) - - c := cors.New(cors.Options{ - AllowedOrigins: []string{IpfsACAOrigin}, - AllowedMethods: []string{"POST"}, - ExposedHeaders: []string{"X-Stream-Output", "X-Chunked-Output", "X-Content-Length"}, - AllowCredentials: true, // because IPFS does it, even if for no reason. - }) - corsHandler := c.Handler(mux) - - ts := httptest.NewServer(corsHandler) - m.server = ts - - url, _ := url.Parse(ts.URL) - h := strings.Split(url.Host, ":") - i, _ := strconv.Atoi(h[1]) - - m.Port = i - m.Addr = h[0] - return m -} - -func (m *IpfsMock) countRequests() { - for str := range m.reqCounter { - m.reqCountsMux.Lock() - m.reqCounts[str]++ - m.reqCountsMux.Unlock() - } -} - -// GetCount allows to get the number of times and endpoint was called. -func (m *IpfsMock) GetCount(path string) int { - m.reqCountsMux.Lock() - defer m.reqCountsMux.Unlock() - return m.reqCounts[path] -} - -// FIXME: what if IPFS API changes? -func (m *IpfsMock) handler(w http.ResponseWriter, r *http.Request) { - ctx := context.Background() - p := r.URL.Path - w.Header().Set(IpfsCustomHeaderName, IpfsCustomHeaderValue) - w.Header().Set("Server", "ipfs-mock") - w.Header().Set(IpfsTimeHeaderName, fmt.Sprintf("%d", time.Now().Unix())) - endp := strings.TrimPrefix(p, "/api/v0/") - - m.reqCounter <- endp - - switch endp { - case "id": - resp := mockIDResp{ - ID: PeerID1.Pretty(), - Addresses: []string{ - "/ip4/0.0.0.0/tcp/1234", - "/ip6/::/tcp/1234", - }, - } - j, _ := json.Marshal(resp) - w.Write(j) - case "pin/add": - arg, ok := extractCid(r.URL) - if !ok { - goto ERROR - } - if arg == ErrorCid.String() { - goto ERROR - } - c, err := api.DecodeCid(arg) - if err != nil { - goto ERROR - } - mode := extractMode(r.URL) - opts := api.PinOptions{ - Mode: mode, - } - pinObj := api.PinWithOpts(c, opts) - m.pinMap.Add(ctx, pinObj) - resp := mockPinResp{ - Pins: []string{arg}, - } - - if c.Equals(SlowCid1) { - for i := 0; i <= 10; i++ { - time.Sleep(1 * time.Second) - resp.Progress = i - j, _ := json.Marshal(resp) - w.Write(j) - } - } else { - j, _ := json.Marshal(resp) - w.Write(j) - } - case "pin/rm": - arg, ok := extractCid(r.URL) - if !ok { - goto ERROR - } - c, err := api.DecodeCid(arg) - if err != nil { - goto ERROR - } - m.pinMap.Rm(ctx, c) - resp := mockPinResp{ - Pins: []string{arg}, - } - j, _ := json.Marshal(resp) - w.Write(j) - case "pin/update": - args := r.URL.Query()["arg"] - if len(args) != 2 { - goto ERROR - } - fromStr := args[0] - toStr := args[1] - from, err := api.DecodeCid(fromStr) - if err != nil { - goto ERROR - } - to, err := api.DecodeCid(toStr) - if err != nil { - goto ERROR - } - - pin, err := m.pinMap.Get(ctx, from) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - resp := ipfsErr{0, IpfsErrFromNotPinned} - j, _ := json.Marshal(resp) - w.Write(j) - return - } - pin.Cid = to - err = m.pinMap.Add(ctx, pin) - if err != nil { - goto ERROR - } - - resp := mockPinResp{ - Pins: []string{from.String(), to.String()}, - } - - j, _ := json.Marshal(resp) - w.Write(j) - case "pin/ls": - query := r.URL.Query() - stream := query.Get("stream") == "true" - - arg, ok := extractCid(r.URL) - if !ok { - pins := make(chan api.Pin, 10) - - go func() { - m.pinMap.List(ctx, pins) - }() - - if stream { - for p := range pins { - j, _ := json.Marshal(api.IPFSPinInfo{ - Cid: api.Cid(p.Cid), - Type: p.Mode.ToIPFSPinStatus(), - }) - w.Write(j) - } - break - } else { - rMap := make(map[string]mockPinType) - for p := range pins { - rMap[p.Cid.String()] = mockPinType{p.Mode.String()} - } - j, _ := json.Marshal(mockPinLsAllResp{rMap}) - w.Write(j) - break - } - } - - cidStr := arg - c, err := api.DecodeCid(cidStr) - if err != nil { - goto ERROR - } - - pinObj, err := m.pinMap.Get(ctx, c) - if err != nil && err != state.ErrNotFound { - goto ERROR - } - if err == state.ErrNotFound { - w.WriteHeader(http.StatusInternalServerError) - resp := ipfsErr{0, fmt.Sprintf("Path '%s' is not pinned", cidStr)} - j, _ := json.Marshal(resp) - w.Write(j) - return - } - - if stream { - if c.Equals(Cid4) { - // this a v1 cid. Do not return default-base32 but base58btc encoding of it - w.Write([]byte(`{ "Cid": "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1", "Type": "recursive" }`)) - break - } - j, _ := json.Marshal(api.IPFSPinInfo{ - Cid: api.Cid(pinObj.Cid), - Type: pinObj.Mode.ToIPFSPinStatus(), - }) - w.Write(j) - } else { - if c.Equals(Cid4) { - // this a v1 cid. Do not return default-base32 but base58btc encoding of it - w.Write([]byte(`{ "Keys": { "zCT5htkdztJi3x4zBNHo8TRvGHPLTdHUdCLKgTGMgQcRKSLoWxK1": { "Type": "recursive" }}}`)) - break - } - rMap := make(map[string]mockPinType) - rMap[cidStr] = mockPinType{pinObj.Mode.String()} - j, _ := json.Marshal(mockPinLsAllResp{rMap}) - w.Write(j) - } - case "swarm/connect": - arg, ok := extractCid(r.URL) - if !ok { - goto ERROR - } - addr := arg - splits := strings.Split(addr, "/") - pid := splits[len(splits)-1] - resp := struct { - Strings []string - }{ - Strings: []string{fmt.Sprintf("connect %s success", pid)}, - } - j, _ := json.Marshal(resp) - w.Write(j) - case "swarm/peers": - peer1 := mockIpfsPeer{ - Peer: PeerID4.Pretty(), - } - peer2 := mockIpfsPeer{ - Peer: PeerID5.Pretty(), - } - resp := mockSwarmPeersResp{ - Peers: []mockIpfsPeer{peer1, peer2}, - } - j, _ := json.Marshal(resp) - w.Write(j) - case "block/put": - w.Header().Set("Trailer", "X-Stream-Error") - - query := r.URL.Query() - codecStr := query.Get("cid-codec") - var mc multicodec.Code - mc.Set(codecStr) - mhType := multihash.Names[query.Get("mhtype")] - mhLen, _ := strconv.Atoi(query.Get("mhLen")) - - // Get the data and retun the hash - mpr, err := r.MultipartReader() - if err != nil { - goto ERROR - } - - w.WriteHeader(http.StatusOK) - - for { - part, err := mpr.NextPart() - if err == io.EOF { - return - } - if err != nil { - w.Header().Set("X-Stream-Error", err.Error()) - return - } - data, err := io.ReadAll(part) - if err != nil { - w.Header().Set("X-Stream-Error", err.Error()) - return - } - // Parse cid from data and format and add to mock block-store - builder := cid.V1Builder{ - Codec: uint64(mc), - MhType: mhType, - MhLength: mhLen, - } - - c, err := builder.Sum(data) - if err != nil { - w.Header().Set("X-Stream-Error", err.Error()) - return - } - m.BlockStore[c.String()] = data - - resp := mockBlockPutResp{ - Key: c.String(), - } - j, _ := json.Marshal(resp) - w.Write(j) - } - case "block/get": - query := r.URL.Query() - arg, ok := query["arg"] - if !ok { - goto ERROR - } - if len(arg) != 1 { - goto ERROR - } - data, ok := m.BlockStore[arg[0]] - if !ok { - goto ERROR - } - w.Write(data) - case "repo/gc": - // It assumes `/repo/gc` with parameter `stream-errors=true` - enc := json.NewEncoder(w) - resp := []mockRepoGCResp{ - { - Key: Cid1.Cid, - }, - { - Key: Cid2.Cid, - }, - { - Key: Cid3.Cid, - }, - { - Key: Cid4.Cid, - }, - { - Error: "no link by that name", - }, - } - - for _, r := range resp { - if err := enc.Encode(&r); err != nil { - goto ERROR - } - } - - case "repo/stat": - sizeOnly := r.URL.Query().Get("size-only") - pinsCh := make(chan api.Pin, 10) - go func() { - m.pinMap.List(ctx, pinsCh) - }() - - var pins []api.Pin - for p := range pinsCh { - pins = append(pins, p) - } - - len := len(pins) - numObjs := uint64(len) - if sizeOnly == "true" { - numObjs = 0 - } - resp := mockRepoStatResp{ - RepoSize: uint64(len) * 1000, - NumObjects: numObjs, - StorageMax: 10000000000, // 10 GB - } - j, _ := json.Marshal(resp) - w.Write(j) - case "resolve": - w.Write([]byte("{\"Path\":\"" + "/ipfs/" + CidResolved.String() + "\"}")) - case "config/show": - resp := mockConfigResp{ - Datastore: struct { - StorageMax string - }{ - StorageMax: "10G", - }, - } - j, _ := json.Marshal(resp) - w.Write(j) - case "refs": - arg, ok := extractCid(r.URL) - if !ok { - goto ERROR - } - resp := mockRefsResp{ - Ref: arg, - } - j, _ := json.Marshal(resp) - if arg == SlowCid1.String() { - for i := 0; i <= 5; i++ { - time.Sleep(2 * time.Second) - w.Write(j) - } - } else { - w.Write(j) - } - case "version": - w.Write([]byte("{\"Version\":\"m.o.c.k\"}")) - default: - w.WriteHeader(http.StatusNotFound) - } - return -ERROR: - w.WriteHeader(http.StatusInternalServerError) -} - -// Close closes the mock server. It's important to call after each test or -// the listeners are left hanging around. -func (m *IpfsMock) Close() { - m.closeMux.Lock() - defer m.closeMux.Unlock() - if !m.closed { - m.closed = true - m.server.Close() - close(m.reqCounter) - } -} - -// extractCidAndMode extracts the cid argument from a url.URL, either via -// the query string parameters or from the url path itself. -func extractCid(u *url.URL) (string, bool) { - arg := u.Query().Get("arg") - if arg != "" { - return arg, true - } - - p := strings.TrimPrefix(u.Path, "/api/v0/") - segs := strings.Split(p, "/") - - if len(segs) > 2 { - return segs[len(segs)-1], true - } - return "", false -} - -func extractMode(u *url.URL) api.PinMode { - return api.PinModeFromString(u.Query().Get("type")) -} diff --git a/packages/networking/ipfs-cluster/test/rpc_api_mock.go b/packages/networking/ipfs-cluster/test/rpc_api_mock.go deleted file mode 100644 index c3028aa..0000000 --- a/packages/networking/ipfs-cluster/test/rpc_api_mock.go +++ /dev/null @@ -1,618 +0,0 @@ -package test - -import ( - "context" - "errors" - "strings" - "testing" - "time" - - "github.com/ipfs-cluster/ipfs-cluster/api" - "github.com/ipfs-cluster/ipfs-cluster/state" - - gopath "github.com/ipfs/go-path" - rpc "github.com/libp2p/go-libp2p-gorpc" - host "github.com/libp2p/go-libp2p/core/host" - peer "github.com/libp2p/go-libp2p/core/peer" -) - -var ( - // ErrBadCid is returned when using ErrorCid. Operations with that CID always - // fail. - ErrBadCid = errors.New("this is an expected error when using ErrorCid") - // ErrLinkNotFound is error returned when no link is found - ErrLinkNotFound = errors.New("no link by that name") -) - -// NewMockRPCClient creates a mock ipfs-cluster RPC server and returns -// a client to it. -func NewMockRPCClient(t testing.TB) *rpc.Client { - return NewMockRPCClientWithHost(t, nil) -} - -// NewMockRPCClientWithHost returns a mock ipfs-cluster RPC server -// initialized with a given host. -func NewMockRPCClientWithHost(t testing.TB, h host.Host) *rpc.Client { - s := rpc.NewServer(h, "mock", rpc.WithStreamBufferSize(1024)) - c := rpc.NewClientWithServer(h, "mock", s, rpc.WithMultiStreamBufferSize(1024)) - err := s.RegisterName("Cluster", &mockCluster{}) - if err != nil { - t.Fatal(err) - } - err = s.RegisterName("PinTracker", &mockPinTracker{}) - if err != nil { - t.Fatal(err) - } - err = s.RegisterName("IPFSConnector", &mockIPFSConnector{}) - if err != nil { - t.Fatal(err) - } - err = s.RegisterName("Consensus", &mockConsensus{}) - if err != nil { - t.Fatal(err) - } - err = s.RegisterName("PeerMonitor", &mockPeerMonitor{}) - if err != nil { - t.Fatal(err) - } - - return c -} - -type mockCluster struct{} -type mockPinTracker struct{} -type mockIPFSConnector struct{} -type mockConsensus struct{} -type mockPeerMonitor struct{} - -func (mock *mockCluster) Pin(ctx context.Context, in api.Pin, out *api.Pin) error { - if in.Cid.Equals(ErrorCid) { - return ErrBadCid - } - - // a pin is never returned the replications set to 0. - if in.ReplicationFactorMin == 0 { - in.ReplicationFactorMin = -1 - } - if in.ReplicationFactorMax == 0 { - in.ReplicationFactorMax = -1 - } - *out = in - return nil -} - -func (mock *mockCluster) Unpin(ctx context.Context, in api.Pin, out *api.Pin) error { - if in.Cid.Equals(ErrorCid) { - return ErrBadCid - } - if in.Cid.Equals(NotFoundCid) { - return state.ErrNotFound - } - *out = in - return nil -} - -func (mock *mockCluster) PinPath(ctx context.Context, in api.PinPath, out *api.Pin) error { - p, err := gopath.ParsePath(in.Path) - if err != nil { - return err - } - - var pin api.Pin - if p.IsJustAKey() && !strings.HasPrefix(in.Path, "/ipns") { - c, _, err := gopath.SplitAbsPath(p) - if err != nil { - return err - } - cc := api.NewCid(c) - if cc.Equals(ErrorCid) { - return ErrBadCid - } - pin = api.PinWithOpts(cc, in.PinOptions) - } else { - pin = api.PinWithOpts(CidResolved, in.PinOptions) - } - - *out = pin - return nil -} - -func (mock *mockCluster) UnpinPath(ctx context.Context, in api.PinPath, out *api.Pin) error { - if in.Path == NotFoundPath { - return state.ErrNotFound - } - - // Mock-Unpin behaves like pin (doing nothing). - return mock.PinPath(ctx, in, out) -} - -func (mock *mockCluster) Pins(ctx context.Context, in <-chan struct{}, out chan<- api.Pin) error { - opts := api.PinOptions{ - ReplicationFactorMin: -1, - ReplicationFactorMax: -1, - } - - out <- api.PinWithOpts(Cid1, opts) - out <- api.PinCid(Cid2) - out <- api.PinWithOpts(Cid3, opts) - close(out) - return nil -} - -func (mock *mockCluster) PinGet(ctx context.Context, in api.Cid, out *api.Pin) error { - switch in.String() { - case ErrorCid.String(): - return errors.New("this is an expected error when using ErrorCid") - case Cid1.String(), Cid3.String(): - p := api.PinCid(in) - p.ReplicationFactorMin = -1 - p.ReplicationFactorMax = -1 - *out = p - return nil - case Cid2.String(): // This is a remote pin - p := api.PinCid(in) - p.ReplicationFactorMin = 1 - p.ReplicationFactorMax = 1 - *out = p - default: - return state.ErrNotFound - } - return nil -} - -func (mock *mockCluster) ID(ctx context.Context, in struct{}, out *api.ID) error { - //_, pubkey, _ := crypto.GenerateKeyPair( - // DefaultConfigCrypto, - // DefaultConfigKeyLength) - - addr, _ := api.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/p2p/" + PeerID1.Pretty()) - *out = api.ID{ - ID: PeerID1, - //PublicKey: pubkey, - Version: "0.0.mock", - IPFS: api.IPFSID{ - ID: PeerID1, - Addresses: []api.Multiaddr{addr}, - }, - } - return nil -} - -func (mock *mockCluster) IDStream(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error { - defer close(out) - var id api.ID - mock.ID(ctx, struct{}{}, &id) - select { - case <-ctx.Done(): - return ctx.Err() - case out <- id: - } - return nil -} - -func (mock *mockCluster) Version(ctx context.Context, in struct{}, out *api.Version) error { - *out = api.Version{ - Version: "0.0.mock", - } - return nil -} - -func (mock *mockCluster) Peers(ctx context.Context, in <-chan struct{}, out chan<- api.ID) error { - id := api.ID{} - mock.ID(ctx, struct{}{}, &id) - out <- id - close(out) - return nil -} - -func (mock *mockCluster) PeersWithFilter(ctx context.Context, in <-chan []peer.ID, out chan<- api.ID) error { - inCh := make(chan struct{}) - close(inCh) - return mock.Peers(ctx, inCh, out) -} - -func (mock *mockCluster) PeerAdd(ctx context.Context, in peer.ID, out *api.ID) error { - id := api.ID{} - mock.ID(ctx, struct{}{}, &id) - *out = id - return nil -} - -func (mock *mockCluster) PeerRemove(ctx context.Context, in peer.ID, out *struct{}) error { - return nil -} - -func (mock *mockCluster) ConnectGraph(ctx context.Context, in struct{}, out *api.ConnectGraph) error { - *out = api.ConnectGraph{ - ClusterID: PeerID1, - IPFSLinks: map[string][]peer.ID{ - PeerID4.String(): {PeerID5, PeerID6}, - PeerID5.String(): {PeerID4, PeerID6}, - PeerID6.String(): {PeerID4, PeerID5}, - }, - ClusterLinks: map[string][]peer.ID{ - PeerID1.String(): {PeerID2, PeerID3}, - PeerID2.String(): {PeerID1, PeerID3}, - PeerID3.String(): {PeerID1, PeerID2}, - }, - ClustertoIPFS: map[string]peer.ID{ - PeerID1.String(): PeerID4, - PeerID2.String(): PeerID5, - PeerID3.String(): PeerID6, - }, - } - return nil -} - -func (mock *mockCluster) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.GlobalPinInfo) error { - defer close(out) - filter := <-in - - pid := PeerID1.String() - gPinInfos := []api.GlobalPinInfo{ - { - Cid: Cid1, - Name: "aaa", - PeerMap: map[string]api.PinInfoShort{ - pid: { - Status: api.TrackerStatusPinned, - TS: time.Now(), - }, - }, - }, - { - Cid: Cid2, - Name: "bbb", - PeerMap: map[string]api.PinInfoShort{ - pid: { - Status: api.TrackerStatusPinning, - TS: time.Now(), - }, - }, - }, - { - Cid: Cid3, - Name: "ccc", - Metadata: map[string]string{ - "ccc": "3c", - }, - PeerMap: map[string]api.PinInfoShort{ - pid: { - Status: api.TrackerStatusPinError, - TS: time.Now(), - }, - }, - }, - } - // If there is no filter match, we will not return that status and we - // will not have an entry for that peer in the peerMap. In turn, when - // a single peer, we will not have an entry for the cid at all. - for _, gpi := range gPinInfos { - for id, pi := range gpi.PeerMap { - if !filter.Match(pi.Status) { - delete(gpi.PeerMap, id) - } - } - } - for _, gpi := range gPinInfos { - if len(gpi.PeerMap) > 0 { - out <- gpi - } - } - - return nil -} - -func (mock *mockCluster) StatusAllLocal(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error { - return (&mockPinTracker{}).StatusAll(ctx, in, out) -} - -func (mock *mockCluster) Status(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error { - if in.Equals(ErrorCid) { - return ErrBadCid - } - ma, _ := api.NewMultiaddr("/ip4/1.2.3.4/ipfs/" + PeerID3.String()) - - *out = api.GlobalPinInfo{ - Cid: in, - Name: "test", - Allocations: nil, - Origins: nil, - Metadata: map[string]string{ - "meta": "data", - }, - - PeerMap: map[string]api.PinInfoShort{ - PeerID1.String(): { - PeerName: PeerName3, - IPFS: PeerID3, - IPFSAddresses: []api.Multiaddr{ma}, - Status: api.TrackerStatusPinned, - TS: time.Now(), - }, - }, - } - return nil -} - -func (mock *mockCluster) StatusLocal(ctx context.Context, in api.Cid, out *api.PinInfo) error { - return (&mockPinTracker{}).Status(ctx, in, out) -} - -func (mock *mockCluster) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.GlobalPinInfo) error { - f := make(chan api.TrackerStatus, 1) - f <- api.TrackerStatusUndefined - close(f) - return mock.StatusAll(ctx, f, out) -} - -func (mock *mockCluster) RecoverAllLocal(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error { - return (&mockPinTracker{}).RecoverAll(ctx, in, out) -} - -func (mock *mockCluster) Recover(ctx context.Context, in api.Cid, out *api.GlobalPinInfo) error { - return mock.Status(ctx, in, out) -} - -func (mock *mockCluster) RecoverLocal(ctx context.Context, in api.Cid, out *api.PinInfo) error { - return (&mockPinTracker{}).Recover(ctx, in, out) -} - -func (mock *mockCluster) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error { - if in.ReplicationFactorMin > 1 { - return errors.New("replMin too high: can only mock-allocate to 1") - } - *out = []peer.ID{""} // allocate to local peer - return nil -} - -func (mock *mockCluster) RepoGC(ctx context.Context, in struct{}, out *api.GlobalRepoGC) error { - localrepoGC := api.RepoGC{} - _ = mock.RepoGCLocal(ctx, struct{}{}, &localrepoGC) - *out = api.GlobalRepoGC{ - PeerMap: map[string]api.RepoGC{ - PeerID1.String(): localrepoGC, - }, - } - return nil -} - -func (mock *mockCluster) RepoGCLocal(ctx context.Context, in struct{}, out *api.RepoGC) error { - *out = api.RepoGC{ - Peer: PeerID1, - Keys: []api.IPFSRepoGC{ - { - Key: Cid1, - }, - { - Key: Cid2, - }, - { - Key: Cid3, - }, - { - Key: Cid4, - }, - { - Error: ErrLinkNotFound.Error(), - }, - }, - } - - return nil -} - -func (mock *mockCluster) SendInformerMetrics(ctx context.Context, in struct{}, out *struct{}) error { - return nil -} - -func (mock *mockCluster) Alerts(ctx context.Context, in struct{}, out *[]api.Alert) error { - *out = []api.Alert{ - { - Metric: api.Metric{ - Name: "ping", - Peer: PeerID2, - Expire: time.Now().Add(-30 * time.Second).UnixNano(), - Valid: true, - ReceivedAt: time.Now().Add(-60 * time.Second).UnixNano(), - }, - TriggeredAt: time.Now(), - }, - } - return nil -} - -func (mock *mockCluster) IPFSID(ctx context.Context, in peer.ID, out *api.IPFSID) error { - var id api.ID - _ = mock.ID(ctx, struct{}{}, &id) - *out = id.IPFS - return nil -} - -/* Tracker methods */ - -func (mock *mockPinTracker) Track(ctx context.Context, in api.Pin, out *struct{}) error { - return nil -} - -func (mock *mockPinTracker) Untrack(ctx context.Context, in api.Pin, out *struct{}) error { - return nil -} - -func (mock *mockPinTracker) StatusAll(ctx context.Context, in <-chan api.TrackerStatus, out chan<- api.PinInfo) error { - defer close(out) - filter := <-in - - pinInfos := []api.PinInfo{ - { - Cid: Cid1, - Peer: PeerID1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - TS: time.Now(), - }, - }, - { - Cid: Cid3, - Peer: PeerID1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinError, - TS: time.Now(), - }, - }, - } - for _, pi := range pinInfos { - if filter.Match(pi.Status) { - out <- pi - } - } - return nil -} - -func (mock *mockPinTracker) Status(ctx context.Context, in api.Cid, out *api.PinInfo) error { - if in.Equals(ErrorCid) { - return ErrBadCid - } - - *out = api.PinInfo{ - Cid: in, - Peer: PeerID2, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - TS: time.Now(), - }, - } - return nil -} - -func (mock *mockPinTracker) RecoverAll(ctx context.Context, in <-chan struct{}, out chan<- api.PinInfo) error { - close(out) - return nil -} - -func (mock *mockPinTracker) Recover(ctx context.Context, in api.Cid, out *api.PinInfo) error { - *out = api.PinInfo{ - Cid: in, - Peer: PeerID1, - PinInfoShort: api.PinInfoShort{ - Status: api.TrackerStatusPinned, - TS: time.Now(), - }, - } - return nil -} - -func (mock *mockPinTracker) PinQueueSize(ctx context.Context, in struct{}, out *int64) error { - *out = 10 - return nil -} - -/* PeerMonitor methods */ - -// LatestMetrics runs PeerMonitor.LatestMetrics(). -func (mock *mockPeerMonitor) LatestMetrics(ctx context.Context, in string, out *[]api.Metric) error { - m := api.Metric{ - Name: "test", - Peer: PeerID1, - Value: "0", - Valid: true, - } - m.SetTTL(2 * time.Second) - last := []api.Metric{m} - *out = last - return nil -} - -// MetricNames runs PeerMonitor.MetricNames(). -func (mock *mockPeerMonitor) MetricNames(ctx context.Context, in struct{}, out *[]string) error { - k := []string{"ping", "freespace"} - *out = k - return nil -} - -/* IPFSConnector methods */ - -func (mock *mockIPFSConnector) Pin(ctx context.Context, in api.Pin, out *struct{}) error { - switch in.Cid { - case SlowCid1: - time.Sleep(2 * time.Second) - } - return nil -} - -func (mock *mockIPFSConnector) Unpin(ctx context.Context, in api.Pin, out *struct{}) error { - switch in.Cid { - case SlowCid1: - time.Sleep(2 * time.Second) - } - return nil -} - -func (mock *mockIPFSConnector) PinLsCid(ctx context.Context, in api.Pin, out *api.IPFSPinStatus) error { - if in.Cid.Equals(Cid1) || in.Cid.Equals(Cid3) { - *out = api.IPFSPinStatusRecursive - } else { - *out = api.IPFSPinStatusUnpinned - } - return nil -} - -func (mock *mockIPFSConnector) PinLs(ctx context.Context, in <-chan []string, out chan<- api.IPFSPinInfo) error { - out <- api.IPFSPinInfo{Cid: api.Cid(Cid1), Type: api.IPFSPinStatusRecursive} - out <- api.IPFSPinInfo{Cid: api.Cid(Cid3), Type: api.IPFSPinStatusRecursive} - close(out) - return nil -} - -func (mock *mockIPFSConnector) SwarmPeers(ctx context.Context, in struct{}, out *[]peer.ID) error { - *out = []peer.ID{PeerID2, PeerID3} - return nil -} - -func (mock *mockIPFSConnector) ConfigKey(ctx context.Context, in string, out *interface{}) error { - switch in { - case "Datastore/StorageMax": - *out = "100KB" - default: - return errors.New("configuration key not found") - } - return nil -} - -func (mock *mockIPFSConnector) RepoStat(ctx context.Context, in struct{}, out *api.IPFSRepoStat) error { - // since we have two pins. Assume each is 1000B. - stat := api.IPFSRepoStat{ - StorageMax: 100000, - RepoSize: 2000, - } - *out = stat - return nil -} - -func (mock *mockIPFSConnector) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error { - close(out) - return nil -} - -func (mock *mockIPFSConnector) Resolve(ctx context.Context, in string, out *api.Cid) error { - switch in { - case ErrorCid.String(), "/ipfs/" + ErrorCid.String(): - *out = ErrorCid - default: - *out = Cid2 - } - return nil -} - -func (mock *mockConsensus) AddPeer(ctx context.Context, in peer.ID, out *struct{}) error { - return errors.New("mock rpc cannot redirect") -} - -func (mock *mockConsensus) RmPeer(ctx context.Context, in peer.ID, out *struct{}) error { - return errors.New("mock rpc cannot redirect") -} - -func (mock *mockConsensus) Peers(ctx context.Context, in struct{}, out *[]peer.ID) error { - *out = []peer.ID{PeerID1, PeerID2, PeerID3} - return nil -} diff --git a/packages/networking/ipfs-cluster/test/sharding.go b/packages/networking/ipfs-cluster/test/sharding.go deleted file mode 100644 index 0b157ce..0000000 --- a/packages/networking/ipfs-cluster/test/sharding.go +++ /dev/null @@ -1,374 +0,0 @@ -package test - -import ( - "context" - "encoding/hex" - "errors" - "io" - "math/rand" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/ipfs-cluster/ipfs-cluster/api" - files "github.com/ipfs/go-ipfs-files" - format "github.com/ipfs/go-ipld-format" - - cid "github.com/ipfs/go-cid" -) - -const shardingTestDir = "shardTesting" -const shardingTestTree = "testTree" -const shardingTestFile = "testFile" - -// Variables related to adding the testing directory generated by tests -var ( - ShardingDirBalancedRootCID = "QmdHXJgxeCFf6qDZqYYmMesV2DbZCVPEdEhj2oVTxP1y7Y" - ShardingDirBalancedRootCIDWrapped = "QmbfGRPTUd7L1xsAZZ1A3kUFP1zkEZ9kHdb6AGaajBzGGX" - ShardingDirTrickleRootCID = "QmYMbx56GFNBDAaAMchtjmWjDTdqNKCSGuFxtRosiPgJL6" - // These hashes should match all the blocks produced when adding - // the files resulting from GetShardingDir* - // They have been obtained by adding the "shardTesting" folder - // to go-ipfs (with default parameters). Then doing - // `refs -r` on the result. It contains the folder hash. - ShardingDirCids = [28]string{ - "QmdHXJgxeCFf6qDZqYYmMesV2DbZCVPEdEhj2oVTxP1y7Y", - "QmSpZcKTgfsxyL7nyjzTNB1gAWmGYC2t8kRPpZSG1ZbTkY", - "QmSijPKAE61CUs57wWU2M4YxkSaRogQxYRtHoEzP2uRaQt", - "QmYr6r514Pt8HbsFjwompLrHMyZEYg6aXfsv59Ys8uzLpr", - "QmfEeHL3iwDE8XjeFq9HDu2B8Dfu8L94y7HUB5sh5vN9TB", - "QmTz2gUzUNQnH3i818MAJPMLjBfRXZxoZbdNYT1K66LnZN", - "QmPZLJ3CZYgxH4K1w5jdbAdxJynXn5TCB4kHy7u8uHC3fy", - "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn", - "QmY6PArrjY66Nb4qEKWF7RUHCToRFyTsrM6cH8D6vJMSnk", - "QmYXgh47x4gr1iL6YRqAA8RcE3XNWPfB5VJTt9dBfRnRHX", - "QmXqkKUxgWsgXEUsxDJcs2hUrSrFnPkKyGnGdxpm1cb2me", - "Qmbne4XHMAiZwoFYdnGrdcW3UBYA7UnFE9WoDwEjG3deZH", - "Qmdz4kLZUjfGBSvfMxTQpcxjz2aZqupnF9KjKGpAuaZ4nT", - "QmavW3cdGuSfYMEQiBDfobwVtPEjUnML2Ry1q8w8X3Q8Wj", - "QmfPHRbeerRWgbu5BzxwK7UhmJGqGvZNxuFoMCUFTuhG3H", - "QmaYNfhw7L7KWX7LYpwWt1bh6Gq2p7z1tic35PnDRnqyBf", - "QmWWwH1GKMh6GmFQunjq7CHjr4g4z6Q4xHyDVfuZGX7MyU", - "QmVpHQGMF5PLsvfgj8bGo9q2YyLRPMvfu1uTb3DgREFtUc", - "QmUrdAn4Mx4kNioX9juLgwQotwFfxeo5doUNnLJrQynBEN", - "QmdJ86B7J8mfGq6SjQy8Jz7r5x1cLcXc9M2a7T7NmSMVZx", - "QmS77cTMdyx8P7rP2Gij6azgYPpjp2J34EVYuhB6mfjrQh", - "QmbsBsDspFcqi7xJ4xPxcNYnduzQ5UQDw9y6trQWZGoEHq", - "QmakAXHMeyE6fHHaeqicSKVMM2QyuGbS2g8dgUA7ns8gSY", - "QmTC6vGbH9ABkpXfrMmYkXbxEqH12jEVGpvGzibGZEDVHK", - "QmebQW6nfE5cPb85ZUGrSyqbFsVYwfuKsX8Ur3NWwfmnYk", - "QmSCcsb4mNMz3CXvVjPdc7kxrx4PbitrcRN8ocmyg62oit", - "QmZ2iUT3W7jh8QNnpWSiMZ1QYgpommCSQFZiPY5VdoCHyv", - "QmdmUbN9JS3BK3nvcycyzFUBJqXip5zf7bdKbYM3p14e9h", - } - - // Used for testing blockput/blockget - ShardCid, _ = api.DecodeCid("zdpuAoiNm1ntWx6jpgcReTiCWFHJSTpvTw4bAAn9p6yDnznqh") - ShardData, _ = hex.DecodeString("a16130d82a58230012209273fd63ec94bed5abb219b2d9cb010cabe4af7b0177292d4335eff50464060a") -) - -// ShardingTestHelper helps generating files and folders to test adding and -// sharding in IPFS Cluster -type ShardingTestHelper struct { - randSrc *rand.Rand -} - -// NewShardingTestHelper returns a new helper. -func NewShardingTestHelper() *ShardingTestHelper { - return &ShardingTestHelper{ - randSrc: rand.New(rand.NewSource(1)), - } -} - -// GetTreeMultiReader creates and returns a MultiFileReader for a testing -// directory tree. Files are pseudo-randomly generated and are always the same. -// Directory structure: -// - testingTree -// - A -// - alpha -// * small_file_0 (< 5 kB) -// - beta -// * small_file_1 (< 5 kB) -// - delta -// - empty -// * small_file_2 (< 5 kB) -// - gamma -// * small_file_3 (< 5 kB) -// - B -// * medium_file (~.3 MB) -// * big_file (3 MB) -// -// The total size in ext4 is ~3420160 Bytes = ~3340 kB = ~3.4MB -func (sth *ShardingTestHelper) GetTreeMultiReader(t *testing.T) (*files.MultiFileReader, io.Closer) { - sf := sth.GetTreeSerialFile(t) - - mapDir := files.NewMapDirectory(map[string]files.Node{ - shardingTestTree: sf, - }) - - return files.NewMultiFileReader(mapDir, true), sf -} - -// GetTreeSerialFile returns a files.Directory pointing to the testing -// directory tree (see GetTreeMultiReader). -func (sth *ShardingTestHelper) GetTreeSerialFile(t *testing.T) files.Directory { - st := sth.makeTree(t) - sf, err := files.NewSerialFile(sth.path(shardingTestTree), false, st) - - if err != nil { - t.Fatal(err) - } - return sf.(files.Directory) -} - -// GetRandFileMultiReader creates and returns a MultiFileReader for -// a testing random file of the given size (in kbs). The random -// file is different every time. -func (sth *ShardingTestHelper) GetRandFileMultiReader(t *testing.T, kbs int) (*files.MultiFileReader, io.Closer) { - slf, sf := sth.GetRandFileReader(t, kbs) - return files.NewMultiFileReader(slf, true), sf -} - -// GetRandFileReader creates and returns a directory containing a testing -// random file of the given size (in kbs) -func (sth *ShardingTestHelper) GetRandFileReader(t *testing.T, kbs int) (files.Directory, io.Closer) { - st := sth.makeRandFile(t, kbs) - sf, err := files.NewSerialFile(sth.path(shardingTestFile), false, st) - if err != nil { - t.Fatal(err) - } - mapDir := files.NewMapDirectory( - map[string]files.Node{"randomfile": sf}, - ) - return mapDir, sf -} - -// Clean deletes any folder and file generated by this helper. -func (sth *ShardingTestHelper) Clean(t *testing.T) { - err := os.RemoveAll(shardingTestDir) - if err != nil { - t.Fatal(err) - } -} - -func folderExists(t *testing.T, path string) bool { - if st, err := os.Stat(path); os.IsNotExist(err) { - return false - } else if err != nil { - t.Fatal(err) - } else if !st.IsDir() { - t.Fatalf("%s is not a directory", path) - } - return true -} - -func makeDir(t *testing.T, path string) { - if !folderExists(t, path) { - err := os.MkdirAll(path, os.ModePerm) - if err != nil { - t.Fatal(err) - } - } -} - -// see GetTreeMultiReader -func (sth *ShardingTestHelper) makeTestFolder(t *testing.T) { - makeDir(t, shardingTestDir) -} - -// This produces this: -// - shardTesting -// - testTree -// - A -// - alpha -// * small_file_0 (< 5 kB) -// - beta -// * small_file_1 (< 5 kB) -// - delta -// - empty -// * small_file_2 (< 5 kB) -// - gamma -// * small_file_3 (< 5 kB) -// - B -// * medium_file (~.3 MB) -// * big_file (3 MB) -// -// Take special care when modifying this function. File data depends on order -// and each file size. If this changes then hashes above -// recording the ipfs import hash tree must be updated manually. -func (sth *ShardingTestHelper) makeTree(t *testing.T) os.FileInfo { - sth.makeTestFolder(t) - basepath := sth.path(shardingTestTree) - - // do not re-create - if folderExists(t, basepath) { - st, _ := os.Stat(basepath) - return st - } - - p0 := shardingTestTree - paths := [][]string{ - {p0, "A", "alpha"}, - {p0, "A", "beta"}, - {p0, "A", "delta", "empty"}, - {p0, "A", "gamma"}, - {p0, "B"}, - } - for _, p := range paths { - makeDir(t, sth.path(p...)) - } - - files := [][]string{ - {p0, "A", "alpha", "small_file_0"}, - {p0, "A", "beta", "small_file_1"}, - {p0, "A", "small_file_2"}, - {p0, "A", "gamma", "small_file_3"}, - {p0, "B", "medium_file"}, - {p0, "B", "big_file"}, - } - - fileSizes := []int{5, 5, 5, 5, 300, 3000} - for i, fpath := range files { - path := sth.path(fpath...) - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - sth.randFile(t, f, fileSizes[i]) - f.Sync() - f.Close() - } - - st, err := os.Stat(basepath) - if err != nil { - t.Fatal(err) - } - return st -} - -func (sth *ShardingTestHelper) path(p ...string) string { - paths := append([]string{shardingTestDir}, p...) - return filepath.Join(paths...) -} - -// Writes randomness to a writer up to the given size (in kBs) -func (sth *ShardingTestHelper) randFile(t *testing.T, w io.Writer, kbs int) { - buf := make([]byte, 1024) - for i := 0; i < kbs; i++ { - sth.randSrc.Read(buf) // read 1 kb - if _, err := w.Write(buf); err != nil { - t.Fatal(err) - } - } -} - -// this creates shardingTestFile in the testFolder. It recreates it every -// time. -func (sth *ShardingTestHelper) makeRandFile(t *testing.T, kbs int) os.FileInfo { - sth.makeTestFolder(t) - path := sth.path(shardingTestFile) - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer f.Sync() - sth.randFile(t, f, kbs) - st, err := f.Stat() - if err != nil { - t.Fatal(err) - } - return st - -} - -// MockDAGService implements an in-memory DAGService. The stored nodes are -// inspectable via the Nodes map. -type MockDAGService struct { - mu sync.Mutex - Nodes map[cid.Cid]format.Node - - writeOnly bool -} - -// NewMockDAGService returns an in-memory DAG Service. -func NewMockDAGService(writeOnly bool) *MockDAGService { - return &MockDAGService{ - Nodes: make(map[cid.Cid]format.Node), - writeOnly: writeOnly, - } -} - -// Get reads a node. -func (d *MockDAGService) Get(ctx context.Context, cid cid.Cid) (format.Node, error) { - if d.writeOnly { - return nil, errors.New("dagservice: block not found") - } - - d.mu.Lock() - defer d.mu.Unlock() - if n, ok := d.Nodes[cid]; ok { - return n, nil - } - return nil, format.ErrNotFound{Cid: cid} -} - -// GetMany reads many nodes. -func (d *MockDAGService) GetMany(ctx context.Context, cids []cid.Cid) <-chan *format.NodeOption { - if d.writeOnly { - out := make(chan *format.NodeOption, 1) - out <- &format.NodeOption{Err: errors.New("failed to fetch all nodes")} - close(out) - return out - } - - d.mu.Lock() - defer d.mu.Unlock() - out := make(chan *format.NodeOption, len(cids)) - for _, c := range cids { - if n, ok := d.Nodes[c]; ok { - out <- &format.NodeOption{Node: n} - } else { - out <- &format.NodeOption{Err: format.ErrNotFound{Cid: c}} - } - } - close(out) - return out -} - -// Add adds a node. -func (d *MockDAGService) Add(ctx context.Context, node format.Node) error { - d.mu.Lock() - defer d.mu.Unlock() - d.Nodes[node.Cid()] = node - return nil -} - -// AddMany adds many nodes. -func (d *MockDAGService) AddMany(ctx context.Context, nodes []format.Node) error { - d.mu.Lock() - defer d.mu.Unlock() - for _, n := range nodes { - d.Nodes[n.Cid()] = n - } - return nil -} - -// Remove deletes a node. -func (d *MockDAGService) Remove(ctx context.Context, c cid.Cid) error { - d.mu.Lock() - defer d.mu.Unlock() - delete(d.Nodes, c) - return nil -} - -// RemoveMany removes many nodes. -func (d *MockDAGService) RemoveMany(ctx context.Context, cids []cid.Cid) error { - d.mu.Lock() - defer d.mu.Unlock() - for _, c := range cids { - delete(d.Nodes, c) - } - return nil -} diff --git a/packages/networking/ipfs-cluster/test/test.go b/packages/networking/ipfs-cluster/test/test.go deleted file mode 100644 index d291d6c..0000000 --- a/packages/networking/ipfs-cluster/test/test.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package test offers testing utilities for all the IPFS Cluster -// codebase, like IPFS daemon and RPC mocks and pre-defined testing CIDs. -package test diff --git a/packages/networking/ipfs-cluster/test/test_test.go b/packages/networking/ipfs-cluster/test/test_test.go deleted file mode 100644 index 8df9ba3..0000000 --- a/packages/networking/ipfs-cluster/test/test_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package test - -import ( - "reflect" - "testing" - - ipfscluster "github.com/ipfs-cluster/ipfs-cluster" -) - -func TestIpfsMock(t *testing.T) { - ipfsmock := NewIpfsMock(t) - defer ipfsmock.Close() -} - -// Test that our RPC mock resembles the original -func TestRPCMockValid(t *testing.T) { - type tc struct { - mock reflect.Type - real reflect.Type - } - - tcs := []tc{ - { - real: reflect.TypeOf(&ipfscluster.ClusterRPCAPI{}), - mock: reflect.TypeOf(&mockCluster{}), - }, - { - real: reflect.TypeOf(&ipfscluster.PinTrackerRPCAPI{}), - mock: reflect.TypeOf(&mockPinTracker{}), - }, - { - real: reflect.TypeOf(&ipfscluster.IPFSConnectorRPCAPI{}), - mock: reflect.TypeOf(&mockIPFSConnector{}), - }, - { - real: reflect.TypeOf(&ipfscluster.ConsensusRPCAPI{}), - mock: reflect.TypeOf(&mockConsensus{}), - }, - { - real: reflect.TypeOf(&ipfscluster.PeerMonitorRPCAPI{}), - mock: reflect.TypeOf(&mockPeerMonitor{}), - }, - } - - for _, tc := range tcs { - realT := tc.real - mockT := tc.mock - - // Make sure all the methods we have match the original - for i := 0; i < mockT.NumMethod(); i++ { - method := mockT.Method(i) - name := method.Name - origMethod, ok := realT.MethodByName(name) - if !ok { - t.Fatalf("%s method not found in real RPC", name) - } - - mType := method.Type - oType := origMethod.Type - - if nout := mType.NumOut(); nout != 1 || nout != oType.NumOut() { - t.Errorf("%s: more than 1 out parameter", name) - } - - if mType.Out(0).Name() != "error" { - t.Errorf("%s out param should be an error", name) - } - - if nin := mType.NumIn(); nin != oType.NumIn() || nin != 4 { - t.Fatalf("%s: num in parameter mismatch: %d vs. %d", name, nin, oType.NumIn()) - } - - for j := 1; j < 4; j++ { - mn := mType.In(j).String() - on := oType.In(j).String() - if mn != on { - t.Errorf("%s: name mismatch: %s vs %s", name, mn, on) - } - } - } - - for i := 0; i < realT.NumMethod(); i++ { - name := realT.Method(i).Name - _, ok := mockT.MethodByName(name) - if !ok { - t.Logf("Warning: %s: unimplemented in mock rpc", name) - } - } - } -} - -// Test that testing directory is created without error -func TestGenerateTestDirs(t *testing.T) { - sth := NewShardingTestHelper() - defer sth.Clean(t) - _, closer := sth.GetTreeMultiReader(t) - closer.Close() - _, closer = sth.GetRandFileMultiReader(t, 2) - closer.Close() -} diff --git a/packages/networking/ipfs-cluster/util.go b/packages/networking/ipfs-cluster/util.go deleted file mode 100644 index e039c6b..0000000 --- a/packages/networking/ipfs-cluster/util.go +++ /dev/null @@ -1,211 +0,0 @@ -package ipfscluster - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "net" - - blake2b "golang.org/x/crypto/blake2b" - - "github.com/ipfs-cluster/ipfs-cluster/api" - peer "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - madns "github.com/multiformats/go-multiaddr-dns" -) - -// PeersFromMultiaddrs returns all the different peers in the given addresses. -// each peer only will appear once in the result, even if several -// multiaddresses for it are provided. -func PeersFromMultiaddrs(addrs []ma.Multiaddr) []peer.ID { - var pids []peer.ID - pm := make(map[peer.ID]struct{}) - for _, addr := range addrs { - pinfo, err := peer.AddrInfoFromP2pAddr(addr) - if err != nil { - continue - } - _, ok := pm[pinfo.ID] - if !ok { - pm[pinfo.ID] = struct{}{} - pids = append(pids, pinfo.ID) - } - } - return pids -} - -// // connect to a peer ID. -// func connectToPeer(ctx context.Context, h host.Host, id peer.ID, addr ma.Multiaddr) error { -// err := h.Connect(ctx, peerstore.PeerInfo{ -// ID: id, -// Addrs: []ma.Multiaddr{addr}, -// }) -// return err -// } - -// // return the local multiaddresses used to communicate to a peer. -// func localMultiaddrsTo(h host.Host, pid peer.ID) []ma.Multiaddr { -// var addrs []ma.Multiaddr -// conns := h.Network().ConnsToPeer(pid) -// logger.Debugf("conns to %s are: %s", pid, conns) -// for _, conn := range conns { -// addrs = append(addrs, multiaddrJoin(conn.LocalMultiaddr(), h.ID())) -// } -// return addrs -// } - -func logError(fmtstr string, args ...interface{}) error { - msg := fmt.Sprintf(fmtstr, args...) - logger.Error(msg) - return errors.New(msg) -} - -func containsPeer(list []peer.ID, peer peer.ID) bool { - for _, p := range list { - if p == peer { - return true - } - } - return false -} - -func minInt(x, y int) int { - if x < y { - return x - } - return y -} - -// // updatePinParents modifies the api.Pin input to give it the correct parents -// // so that previous additions to the pins parents are maintained after this -// // pin is committed to consensus. If this pin carries new parents they are -// // merged with those already existing for this CID. -// func updatePinParents(pin *api.Pin, existing *api.Pin) { -// // no existing parents this pin is up to date -// if existing.Parents == nil || len(existing.Parents.Keys()) == 0 { -// return -// } -// for _, c := range existing.Parents.Keys() { -// pin.Parents.Add(c) -// } -// } - -type distance [blake2b.Size256]byte - -type distanceChecker struct { - local peer.ID - otherPeers []peer.ID - cache map[peer.ID]distance -} - -func (dc distanceChecker) isClosest(ci api.Cid) bool { - ciHash := convertKey(ci.KeyString()) - localPeerHash := dc.convertPeerID(dc.local) - myDistance := xor(ciHash, localPeerHash) - - for _, p := range dc.otherPeers { - peerHash := dc.convertPeerID(p) - distance := xor(peerHash, ciHash) - - // if myDistance is larger than for other peers... - if bytes.Compare(myDistance[:], distance[:]) > 0 { - return false - } - } - return true -} - -// convertPeerID hashes a Peer ID (Multihash). -func (dc distanceChecker) convertPeerID(id peer.ID) distance { - hash, ok := dc.cache[id] - if ok { - return hash - } - - hashBytes := convertKey(string(id)) - dc.cache[id] = hashBytes - return hashBytes -} - -// convertKey hashes a key. -func convertKey(id string) distance { - return blake2b.Sum256([]byte(id)) -} - -func xor(a, b distance) distance { - var c distance - for i := 0; i < len(c); i++ { - c[i] = a[i] ^ b[i] - } - return c -} - -// peersSubtract subtracts peers ID slice b from peers ID slice a. -func peersSubtract(a []peer.ID, b []peer.ID) []peer.ID { - var result []peer.ID - bMap := make(map[peer.ID]struct{}, len(b)) - - for _, p := range b { - bMap[p] = struct{}{} - } - - for _, p := range a { - _, ok := bMap[p] - if ok { - continue - } - result = append(result, p) - } - - return result -} - -// pingValue describes the value carried by ping metrics -type pingValue struct { - Peername string `json:"peer_name,omitempty"` - IPFSID peer.ID `json:"ipfs_id,omitempty"` - IPFSAddresses []api.Multiaddr `json:"ipfs_addresses,omitempty"` -} - -// Valid returns true if the PingValue has IPFSID set. -func (pv pingValue) Valid() bool { - return pv.IPFSID != "" -} - -// PingValue from metric parses a ping value from the value of a given metric, -// if possible. -func pingValueFromMetric(m api.Metric) (pv pingValue) { - json.Unmarshal([]byte(m.Value), &pv) - return -} - -func publicIPFSAddresses(in []api.Multiaddr) []api.Multiaddr { - var out []api.Multiaddr - for _, maddr := range in { - if madns.Matches(maddr.Value()) { // a dns multiaddress: take it - out = append(out, maddr) - continue - } - - ip, err := maddr.ValueForProtocol(ma.P_IP4) - if err != nil { - ip, err = maddr.ValueForProtocol(ma.P_IP6) - if err != nil { - continue - } - } - // We have an IP in the multiaddress. Only include - // global unicast. - netip := net.ParseIP(ip) - if netip == nil { - continue - } - - if !netip.IsGlobalUnicast() { - continue - } - out = append(out, maddr) - } - return out -} diff --git a/packages/networking/ipfs-cluster/version/version.go b/packages/networking/ipfs-cluster/version/version.go deleted file mode 100644 index 3c163f0..0000000 --- a/packages/networking/ipfs-cluster/version/version.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package version stores version information for IPFS Cluster. -package version - -import ( - semver "github.com/blang/semver" - protocol "github.com/libp2p/go-libp2p/core/protocol" -) - -// Version is the current cluster version. -var Version = semver.MustParse("1.0.2") - -// RPCProtocol is protocol handler used to send libp2p-rpc messages between -// cluster peers. All peers in the cluster need to speak the same protocol -// version. -// -// The RPC Protocol is not linked to the IPFS Cluster version (though it once -// was). The protocol version will be updated as needed when breaking changes -// are introduced, though at this point we aim to minimize those as much as -// possible. -var RPCProtocol = protocol.ID("/ipfscluster/1.0/rpc") diff --git a/packages/patched-derivations.nix b/packages/patched-derivations.nix index 3e39e40..2ef672f 100644 --- a/packages/patched-derivations.nix +++ b/packages/patched-derivations.nix @@ -38,7 +38,7 @@ super: rec { kanidm = patch super.kanidm "patches/base/kanidm"; - keycloak = super.callPackage ./security/keycloak { + keycloak = super.keycloak.override { jre = jre17_standard; }; @@ -48,6 +48,7 @@ super: rec { s3ql = (patch super.s3ql "patches/base/s3ql").overrideAttrs (old: { propagatedBuildInputs = old.propagatedBuildInputs ++ [ + super.python3Packages.packaging super.python3Packages.systemd ]; }); diff --git a/packages/projects.nix b/packages/projects.nix index 621a362..3328dbb 100644 --- a/packages/projects.nix +++ b/packages/projects.nix @@ -6,7 +6,6 @@ ./modules/devshell.nix ./build-support - ./networking/ipfs-cluster/project.nix ./servers/reflex-cache/project.nix ./websites/landing/project.nix ./websites/stop-using-nix-env/project.nix @@ -22,6 +21,8 @@ cinny = pkgs.callPackage ./web-apps/cinny { inherit pins; }; + consul = pkgs.callPackage ./servers/consul { }; + excalidraw = pkgs.callPackage ./web-apps/excalidraw { inherit pins; }; graf = pkgs.callPackage ./tools/graf { }; diff --git a/packages/security/keycloak/all-plugins.nix b/packages/security/keycloak/all-plugins.nix deleted file mode 100644 index f2f1117..0000000 --- a/packages/security/keycloak/all-plugins.nix +++ /dev/null @@ -1,9 +0,0 @@ -{ callPackage }: - -{ - scim-for-keycloak = callPackage ./scim-for-keycloak {}; - scim-keycloak-user-storage-spi = callPackage ./scim-keycloak-user-storage-spi {}; - keycloak-discord = callPackage ./keycloak-discord {}; - keycloak-metrics-spi = callPackage ./keycloak-metrics-spi {}; - keycloak-restrict-client-auth = callPackage ./keycloak-restrict-client-auth {}; -} diff --git a/packages/security/keycloak/config_vars.patch b/packages/security/keycloak/config_vars.patch deleted file mode 100644 index be2d547..0000000 --- a/packages/security/keycloak/config_vars.patch +++ /dev/null @@ -1,15 +0,0 @@ -diff --git a/quarkus/dist/src/main/content/bin/kc.sh b/quarkus/dist/src/main/content/bin/kc.sh -index d7be862cde..16f9aa78e0 100644 ---- a/bin/kc.sh -+++ b/bin/kc.sh -@@ -32,8 +32,8 @@ abs_path () { - fi - } - --SERVER_OPTS="-Dkc.home.dir='$(abs_path '..')'" --SERVER_OPTS="$SERVER_OPTS -Djboss.server.config.dir='$(abs_path '../conf')'" -+SERVER_OPTS="-Dkc.home.dir=$KC_HOME_DIR" -+SERVER_OPTS="$SERVER_OPTS -Djboss.server.config.dir=$KC_CONF_DIR" - SERVER_OPTS="$SERVER_OPTS -Djava.util.logging.manager=org.jboss.logmanager.LogManager" - SERVER_OPTS="$SERVER_OPTS -Dquarkus-log-max-startup-records=10000" - CLASSPATH_OPTS="'$(abs_path "../lib/quarkus-run.jar"):$(abs_path "../lib/bootstrap/*")'" diff --git a/packages/security/keycloak/default.nix b/packages/security/keycloak/default.nix deleted file mode 100644 index 1792b51..0000000 --- a/packages/security/keycloak/default.nix +++ /dev/null @@ -1,90 +0,0 @@ -{ stdenv -, lib -, fetchzip -, makeWrapper -, jre -, nixosTests -, callPackage -, confFile ? null -, plugins ? [ ] -, extraFeatures ? [ ] -, disabledFeatures ? [ ] -}: - -let - featuresSubcommand = '' - ${lib.optionalString (extraFeatures != [ ]) "--features=${lib.concatStringsSep "," extraFeatures}"} \ - ${lib.optionalString (disabledFeatures != [ ]) "--features-disabled=${lib.concatStringsSep "," disabledFeatures}"} - ''; -in stdenv.mkDerivation rec { - pname = "keycloak"; - version = "24.0.4"; - - src = fetchzip { - url = "https://github.com/keycloak/keycloak/releases/download/${version}/keycloak-${version}.zip"; - hash = "sha256-tqY3rYFRsRpbvms8DVtCp8nXl0hlX1CzuOVFCE+23o4="; - }; - - nativeBuildInputs = [ makeWrapper jre ]; - - patches = [ - # Make home.dir and config.dir configurable through the - # KC_HOME_DIR and KC_CONF_DIR environment variables. - ./config_vars.patch - ]; - - buildPhase = '' - runHook preBuild - '' + lib.optionalString (confFile != null) '' - install -m 0600 ${confFile} conf/keycloak.conf - '' + '' - install_plugin() { - if [ -d "$1" ]; then - find "$1" -type f \( -iname \*.ear -o -iname \*.jar \) -exec install -m 0500 "{}" "providers/" \; - else - install -m 0500 "$1" "providers/" - fi - } - ${lib.concatMapStringsSep "\n" (pl: "install_plugin ${lib.escapeShellArg pl}") plugins} - '' + '' - patchShebangs bin/kc.sh - export KC_HOME_DIR=$(pwd) - export KC_CONF_DIR=$(pwd)/conf - bin/kc.sh build ${featuresSubcommand} - - runHook postBuild - ''; - - installPhase = '' - runHook preInstall - - mkdir $out - cp -r * $out - - rm $out/bin/*.{ps1,bat} - - runHook postInstall - ''; - - postFixup = '' - for script in $(find $out/bin -type f -executable); do - wrapProgram "$script" --set JAVA_HOME ${jre} --prefix PATH : ${jre}/bin - done - ''; - - passthru = { - tests = nixosTests.keycloak; - plugins = callPackage ./all-plugins.nix { }; - enabledPlugins = plugins; - }; - - meta = with lib; { - homepage = "https://www.keycloak.org/"; - description = "Identity and access management for modern applications and services"; - sourceProvenance = with sourceTypes; [ binaryBytecode ]; - license = licenses.asl20; - platforms = jre.meta.platforms; - maintainers = with maintainers; [ ngerstle talyz nickcao ]; - }; - -} diff --git a/packages/security/keycloak/keycloak-discord/default.nix b/packages/security/keycloak/keycloak-discord/default.nix deleted file mode 100644 index 9f00a29..0000000 --- a/packages/security/keycloak/keycloak-discord/default.nix +++ /dev/null @@ -1,31 +0,0 @@ -{ stdenv -, lib -, fetchurl -}: - -stdenv.mkDerivation rec { - pname = "keycloak-discord"; - version = "0.5.0"; - - src = fetchurl { - url = "https://github.com/wadahiro/keycloak-discord/releases/download/v${version}/keycloak-discord-${version}.jar"; - hash = "sha256-radvUu2a6t0lbo5f/ADqy7+I/ONXB7/8pk2d1BtYzQA="; - }; - - dontUnpack = true; - dontBuild = true; - - installPhase = '' - runHook preInstall - install -Dm444 "$src" "$out/keycloak-discord-$version.jar" - runHook postInstall - ''; - - meta = with lib; { - homepage = "https://github.com/wadahiro/keycloak-discord"; - description = "Keycloak Social Login extension for Discord"; - license = licenses.asl20; - maintainers = with maintainers; [ mkg20001 ]; - sourceProvenance = with sourceTypes; [ binaryBytecode ]; - }; -} diff --git a/packages/security/keycloak/keycloak-metrics-spi/default.nix b/packages/security/keycloak/keycloak-metrics-spi/default.nix deleted file mode 100644 index 82e6163..0000000 --- a/packages/security/keycloak/keycloak-metrics-spi/default.nix +++ /dev/null @@ -1,33 +0,0 @@ -{ maven, stdenv, lib, fetchFromGitHub }: - -maven.buildMavenPackage rec { - pname = "keycloak-metrics-spi"; - version = "5.0.0"; - - src = fetchFromGitHub { - owner = "aerogear"; - repo = pname; - rev = "refs/tags/${version}"; - hash = "sha256-iagXbsKsU4vNP9eg05bwXEo67iij3N2FF0BW50MjRGE="; - }; - - mvnHash = { - aarch64-linux = "sha256-zO79pRrY8TqrSK4bB8l4pl6834aFX2pidyk1j9Itz1E=`"; - x86_64-linux = "sha256-+ySBrQ9yQ5ZxuVUh/mnHNEmugru3n8x5VR/RYEDCLAo="; - }.${stdenv.hostPlatform.system} or (throw "Unsupported system ${stdenv.hostPlatform.system} for ${pname}"); - - - installPhase = '' - runHook preInstall - install -Dm444 -t "$out" target/keycloak-metrics-spi-*.jar - runHook postInstall - ''; - - meta = with lib; { - homepage = "https://github.com/aerogear/keycloak-metrics-spi"; - description = "Keycloak Service Provider that adds a metrics endpoint"; - license = licenses.asl20; - maintainers = with maintainers; [ benley ]; - platforms = [ "aarch64-linux" "x86_64-linux" ]; - }; -} diff --git a/packages/security/keycloak/keycloak-restrict-client-auth/default.nix b/packages/security/keycloak/keycloak-restrict-client-auth/default.nix deleted file mode 100644 index 16d3761..0000000 --- a/packages/security/keycloak/keycloak-restrict-client-auth/default.nix +++ /dev/null @@ -1,28 +0,0 @@ -{ maven, lib, fetchFromGitHub }: - -maven.buildMavenPackage rec { - pname = "keycloak-restrict-client-auth"; - version = "24.0.0"; - - src = fetchFromGitHub { - owner = "sventorben"; - repo = "keycloak-restrict-client-auth"; - rev = "v${version}"; - hash = "sha256-Pk0tj8cTHSBwVIzINE7GLA5b/eI97wuOTvO7UoXBStM="; - }; - - mvnHash = "sha256-Pk2yYuBqGs4k1KwaU06RQe1LpohZu0VI1pHEUBU3EUE="; - - installPhase = '' - runHook preInstall - install -Dm444 -t "$out" target/keycloak-restrict-client-auth.jar - runHook postInstall - ''; - - meta = with lib; { - homepage = "https://github.com/sventorben/keycloak-restrict-client-auth"; - description = "A Keycloak authenticator to restrict authorization on clients"; - license = licenses.mit; - maintainers = with maintainers; [ leona ]; - }; -} diff --git a/packages/security/keycloak/scim-for-keycloak/default.nix b/packages/security/keycloak/scim-for-keycloak/default.nix deleted file mode 100644 index 81686d2..0000000 --- a/packages/security/keycloak/scim-for-keycloak/default.nix +++ /dev/null @@ -1,33 +0,0 @@ -{ lib -, fetchFromGitHub -, maven -}: - -maven.buildMavenPackage rec { - pname = "scim-for-keycloak"; - version = "kc-20-b1"; # When updating also update mvnHash - - src = fetchFromGitHub { - owner = "Captain-P-Goldfish"; - repo = "scim-for-keycloak"; - rev = version; - hash = "sha256-kHjCVkcD8C0tIaMExDlyQmcWMhypisR1nyG93laB8WU="; - }; - - mvnHash = "sha256-cOuJSU57OuP+U7lI+pDD7g9HPIfZAoDPYLf+eO+XuF4="; - - installPhase = '' - install -D "scim-for-keycloak-server/target/scim-for-keycloak-${version}.jar" "$out/scim-for-keycloak-${version}.jar" - ''; - - meta = with lib; { - homepage = "https://github.com/Captain-P-Goldfish/scim-for-keycloak"; - description = "A third party module that extends Keycloak with SCIM functionality"; - sourceProvenance = with sourceTypes; [ - fromSource - binaryBytecode # dependencies - ]; - license = licenses.bsd3; - maintainers = with maintainers; [ mkg20001 ]; - }; -} diff --git a/packages/security/keycloak/scim-keycloak-user-storage-spi/default.nix b/packages/security/keycloak/scim-keycloak-user-storage-spi/default.nix deleted file mode 100644 index 6ecd386..0000000 --- a/packages/security/keycloak/scim-keycloak-user-storage-spi/default.nix +++ /dev/null @@ -1,32 +0,0 @@ -{ lib -, fetchFromGitHub -, maven -}: - -maven.buildMavenPackage { - pname = "scim-keycloak-user-storage-spi"; - version = "unstable-2024-02-14"; - - src = fetchFromGitHub { - owner = "justin-stephenson"; - repo = "scim-keycloak-user-storage-spi"; - rev = "6c59915836d9a559983326bbb87f895324bb75e4"; - hash = "sha256-BSso9lU542Aroxu0RIX6NARc10lGZ04A/WIWOVtdxHw="; - }; - - mvnHash = "sha256-xbGlVZl3YtbF372kCDh+UdK5pLe6C6WnGgbEXahlyLw="; - - installPhase = '' - install -D "target/scim-user-spi-0.0.1-SNAPSHOT.jar" "$out/scim-user-spi-0.0.1-SNAPSHOT.jar" - ''; - - meta = with lib; { - homepage = "https://github.com/justin-stephenson/scim-keycloak-user-storage-spi"; - description = "A third party module that extends Keycloak, allow for user storage in an external scimv2 server"; - sourceProvenance = with sourceTypes; [ - fromSource - ]; - license = licenses.mit; - maintainers = with maintainers; [ s1341 ]; - }; -} diff --git a/packages/servers/consul/default.nix b/packages/servers/consul/default.nix new file mode 100644 index 0000000..44cbfbd --- /dev/null +++ b/packages/servers/consul/default.nix @@ -0,0 +1,46 @@ +{ lib, buildGoModule, fetchFromGitHub, nixosTests }: + +buildGoModule rec { + pname = "consul"; + version = "1.16.4"; + rev = "v${version}"; + + # Note: Currently only release tags are supported, because they have the Consul UI + # vendored. See + # https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834 + # If you want to use a non-release commit as `src`, you probably want to improve + # this derivation so that it can build the UI's JavaScript from source. + # See https://github.com/NixOS/nixpkgs/pull/49082 for something like that. + # Or, if you want to patch something that doesn't touch the UI, you may want + # to apply your changes as patches on top of a release commit. + src = fetchFromGitHub { + owner = "hashicorp"; + repo = pname; + inherit rev; + hash = "sha256-49SGkRdFSQRhe6G8cuTTJKqg3bigoB10QvZcGvoWuGg="; + }; + + passthru.tests.consul = nixosTests.consul; + + # This corresponds to paths with package main - normally unneeded but consul + # has a split module structure in one repo + subPackages = ["." "connect/certgen"]; + + vendorHash = "sha256-cUfXSks49IlQ5BeZHxrzs7u4ikS4bcT68oiwe+wrpRk="; + + doCheck = false; + + ldflags = [ + "-X github.com/hashicorp/consul/version.GitDescribe=v${version}" + "-X github.com/hashicorp/consul/version.Version=${version}" + "-X github.com/hashicorp/consul/version.VersionPrerelease=" + ]; + + meta = with lib; { + description = "Tool for service discovery, monitoring and configuration"; + homepage = "https://www.consul.io/"; + platforms = platforms.linux ++ platforms.darwin; + license = licenses.mpl20; + mainProgram = "consul"; + }; +} diff --git a/patches/base/forgejo/oauth2-secret-from-env.patch b/patches/base/forgejo/oauth2-secret-from-env.patch index aa8b5ca..809f359 100644 --- a/patches/base/forgejo/oauth2-secret-from-env.patch +++ b/patches/base/forgejo/oauth2-secret-from-env.patch @@ -1,12 +1,12 @@ -diff --git a/cmd/admin.go b/cmd/admin.go -index f9fb1b6c6..2725b1f1c 100644 ---- a/cmd/admin.go -+++ b/cmd/admin.go -@@ -151,6 +151,7 @@ var ( +diff --git a/cmd/admin_auth_oauth.go b/cmd/admin_auth_oauth.go +index c151c0af27..e8a4f34707 100644 +--- a/cmd/admin_auth_oauth.go ++++ b/cmd/admin_auth_oauth.go +@@ -34,6 +34,7 @@ var ( Name: "secret", Value: "", Usage: "Client Secret", -+ EnvVar: "FORGEJO_ADMIN_OAUTH2_SECRET", ++ EnvVars: []string{"FORGEJO_ADMIN_OAUTH2_SECRET"}, }, - cli.StringFlag{ + &cli.StringFlag{ Name: "auto-discover-url", diff --git a/patches/base/kanidm/unixd-authenticated.patch b/patches/base/kanidm/unixd-authenticated.patch index 3fb2c44..d02e122 100644 --- a/patches/base/kanidm/unixd-authenticated.patch +++ b/patches/base/kanidm/unixd-authenticated.patch @@ -1,16 +1,16 @@ diff --git a/unix_integration/src/idprovider/kanidm.rs b/unix_integration/src/idprovider/kanidm.rs -index d1b02de0f..599dec6d5 100644 +index 6fc015756..31593f03e 100644 --- a/unix_integration/src/idprovider/kanidm.rs +++ b/unix_integration/src/idprovider/kanidm.rs -@@ -2,6 +2,7 @@ use async_trait::async_trait; - use kanidm_client::{ClientError, KanidmClient, StatusCode}; - use kanidm_proto::v1::{OperationError, UnixGroupToken, UnixUserToken}; - use tokio::sync::RwLock; +@@ -4,6 +4,7 @@ use kanidm_client::{ClientError, KanidmClient, StatusCode}; + use kanidm_proto::internal::OperationError; + use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; + use tokio::sync::{broadcast, RwLock}; +use std::env; use super::interface::{ - AuthCacheAction, AuthCredHandler, AuthRequest, AuthResult, GroupToken, Id, IdProvider, -@@ -11,12 +12,28 @@ use crate::unix_proto::PamAuthRequest; + // KeyStore, +@@ -25,12 +26,28 @@ const TAG_IDKEY: &str = "idkey"; pub struct KanidmProvider { client: RwLock, @@ -39,10 +39,10 @@ index d1b02de0f..599dec6d5 100644 } } } -@@ -73,7 +90,11 @@ impl From for GroupToken { - impl IdProvider for KanidmProvider { +@@ -118,7 +135,11 @@ impl IdProvider for KanidmProvider { + // Needs .read on all types except re-auth. - async fn provider_authenticate(&self) -> Result<(), IdpError> { + async fn provider_authenticate(&self, _tpm: &mut tpm::BoxedDynTpm) -> Result<(), IdpError> { - match self.client.write().await.auth_anonymous().await { + let auth_method = match (&self.auth_name, &self.auth_password) { + (Some(name), Some(password)) => self.client.write().await.auth_simple_password(name, password).await, diff --git a/patches/base/s3ql/s3v4.patch b/patches/base/s3ql/s3v4.patch index 9fdaf71..baab9b3 100644 --- a/patches/base/s3ql/s3v4.patch +++ b/patches/base/s3ql/s3v4.patch @@ -1,19 +1,200 @@ +From 11e3a9cea77cd8498d874f7fd69a938af4da68cd Mon Sep 17 00:00:00 2001 +From: xeji <36407913+xeji@users.noreply.github.com> +Date: Thu, 28 Mar 2024 22:19:11 +0100 +Subject: [PATCH] new backend s3c4: s3c with V4 request signatures (#349) + +--- + rst/backends.rst | 15 ++++ + src/s3ql/backends/__init__.py | 3 +- + src/s3ql/backends/s3.py | 100 ++---------------------- + src/s3ql/backends/s3c4.py | 140 ++++++++++++++++++++++++++++++++++ + src/s3ql/parse_args.py | 2 +- + tests/mock_server.py | 11 +++ + 6 files changed, 174 insertions(+), 97 deletions(-) + create mode 100644 src/s3ql/backends/s3c4.py + +diff --git a/rst/backends.rst b/rst/backends.rst +index 7220ee96..4bc68387 100644 +--- a/rst/backends.rst ++++ b/rst/backends.rst +@@ -341,6 +341,14 @@ can be an arbitrary prefix that will be prepended to all object names + used by S3QL. This allows you to store several S3QL file systems in + the same bucket. + ++`s3c://` authenticates API requests using AWS V2 signatures, which are ++deprecated by AWS but still accepted by many S3 compatible services. ++ ++`s3c4://` denotes a variant of this backend that works the same ++but uses AWS V4 signatures for request authentication instead: :: ++ ++ s3c4://:// ++ + The S3 compatible backend accepts the following backend options: + + .. option:: no-ssl +@@ -385,6 +393,13 @@ The S3 compatible backend accepts the following backend options: + necessary if your storage server does not return a valid response + body for a successful copy operation. + ++.. option:: sig-region= ++ ++ For `s3c4://` variant only: Region to use for calculating V4 ++ request signatures. Contrary to S3, the region is not a defined ++ part of the storage URL and must be specified separately. ++ Defaults to `us-east-1`. ++ + .. _`S3 COPY API`: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html + .. __: https://doc.s3.amazonaws.com/proposals/copy.html + +diff --git a/src/s3ql/backends/__init__.py b/src/s3ql/backends/__init__.py +index a1335762..442828cd 100644 +--- a/src/s3ql/backends/__init__.py ++++ b/src/s3ql/backends/__init__.py +@@ -6,7 +6,7 @@ + This work can be distributed under the terms of the GNU GPLv3. + ''' + +-from . import gs, local, rackspace, s3, s3c, swift, swiftks ++from . import gs, local, rackspace, s3, s3c, s3c4, swift, swiftks + from .b2.b2_backend import B2Backend + + #: Mapping from storage URL prefixes to backend classes +@@ -15,6 +15,7 @@ + 'local': local.Backend, + 'gs': gs.Backend, + 's3c': s3c.Backend, ++ 's3c4': s3c4.Backend, + 'swift': swift.Backend, + 'swiftks': swiftks.Backend, + 'rackspace': rackspace.Backend, diff --git a/src/s3ql/backends/s3.py b/src/s3ql/backends/s3.py -index d19b783..5b5831f 100644 +index e05a49ba..5548a855 100644 --- a/src/s3ql/backends/s3.py +++ b/src/s3ql/backends/s3.py -@@ -9,6 +9,7 @@ This work can be distributed under the terms of the GNU GPLv3. - from ..logging import logging, QuietError # Ensure use of custom logger class - from . import s3c - from .s3c import get_S3Error -+from .s3c import hmac_sha256 - from .common import NoSuchObject, retry - from ..inherit_docstrings import copy_ancestor_docstring +@@ -15,7 +15,7 @@ from xml.sax.saxutils import escape as xml_escape -@@ -236,10 +237,3 @@ class Backend(s3c.Backend): - signing_key = hmac_sha256(service_key, b'aws4_request') - self.signing_key = (signing_key, ymd) + from ..logging import QuietError +-from . import s3c ++from . import s3c4 + from .common import retry + from .s3c import get_S3Error + +@@ -28,22 +28,23 @@ + # pylint: disable=E1002,E1101 + + +-class Backend(s3c.Backend): ++class Backend(s3c4.Backend): + """A backend to store data in Amazon S3 + + This class uses standard HTTP connections to connect to S3. + """ + +- known_options = (s3c.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - { ++ known_options = (s3c4.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - { + 'dumb-copy', + 'disable-expect100', ++ 'sig-region', + } + + def __init__(self, options): + self.region = None +- self.signing_key = None + super().__init__(options) + self._set_storage_options(self._extra_put_headers) ++ self.sig_region = self.region + + def _parse_storage_url(self, storage_url, ssl_context): + hit = re.match(r'^s3s?://([^/]+)/([^/]+)(?:/(.*))?$', storage_url) +@@ -147,94 +148,3 @@ def _delete_multi(self, keys): + + except: + self.conn.discard() +- +- def _authorize_request(self, method, path, headers, subres, query_string): +- '''Add authorization information to *headers*''' +- +- # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html +- +- now = time.gmtime() +- # now = time.strptime('Fri, 24 May 2013 00:00:00 GMT', +- # '%a, %d %b %Y %H:%M:%S GMT') +- +- ymd = time.strftime('%Y%m%d', now) +- ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now) +- +- headers['x-amz-date'] = ymdhms +- headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD' +- # headers['x-amz-content-sha256'] = hashlib.sha256(body).hexdigest() +- headers.pop('Authorization', None) +- +- auth_strs = [method] +- auth_strs.append(urllib.parse.quote(path)) +- +- if query_string: +- s = urllib.parse.urlencode( +- query_string, doseq=True, quote_via=urllib.parse.quote +- ).split('&') +- else: +- s = [] +- if subres: +- s.append(urllib.parse.quote(subres) + '=') +- if s: +- s = '&'.join(sorted(s)) +- else: +- s = '' +- auth_strs.append(s) +- +- # Headers +- sig_hdrs = sorted(x.lower() for x in headers.keys()) +- for hdr in sig_hdrs: +- auth_strs.append('%s:%s' % (hdr, headers[hdr].strip())) +- auth_strs.append('') +- auth_strs.append(';'.join(sig_hdrs)) +- auth_strs.append(headers['x-amz-content-sha256']) +- can_req = '\n'.join(auth_strs) +- # log.debug('canonical request: %s', can_req) +- +- can_req_hash = hashlib.sha256(can_req.encode()).hexdigest() +- str_to_sign = ( +- "AWS4-HMAC-SHA256\n" +- + ymdhms +- + '\n' +- + '%s/%s/s3/aws4_request\n' % (ymd, self.region) +- + can_req_hash +- ) +- # log.debug('string to sign: %s', str_to_sign) +- +- if self.signing_key is None or self.signing_key[1] != ymd: +- self.update_signing_key(ymd) +- signing_key = self.signing_key[0] +- +- sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True) +- +- cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % ( +- self.login, +- now.tm_year, +- now.tm_mon, +- now.tm_mday, +- self.region, +- ) +- +- headers['Authorization'] = ( +- 'AWS4-HMAC-SHA256 ' +- 'Credential=%s,' +- 'SignedHeaders=%s,' +- 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig) +- ) +- +- def update_signing_key(self, ymd): +- date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode()) +- region_key = hmac_sha256(date_key, self.region.encode()) +- service_key = hmac_sha256(region_key, b's3') +- signing_key = hmac_sha256(service_key, b'aws4_request') +- +- self.signing_key = (signing_key, ymd) +- - -def hmac_sha256(key, msg, hex=False): - d = hmac.new(key, msg, hashlib.sha256) @@ -21,76 +202,86 @@ index d19b783..5b5831f 100644 - return d.hexdigest() - else: - return d.digest() -diff --git a/src/s3ql/backends/s3c.py b/src/s3ql/backends/s3c.py -index 11687d5..05750b9 100644 ---- a/src/s3ql/backends/s3c.py -+++ b/src/s3ql/backends/s3c.py -@@ -78,6 +78,8 @@ class Backend(AbstractBackend, metaclass=ABCDocstMeta): - self.conn = self._get_conn() - self.password = options.backend_password - self.login = options.backend_login -+ self.region = "us-east-1" +diff --git a/src/s3ql/backends/s3c4.py b/src/s3ql/backends/s3c4.py +new file mode 100644 +index 00000000..37ff0b7a +--- /dev/null ++++ b/src/s3ql/backends/s3c4.py +@@ -0,0 +1,140 @@ ++''' ++s3c4.py - this file is part of S3QL. ++ ++Copyright © 2008 Nikolaus Rath ++ ++This work can be distributed under the terms of the GNU GPLv3. ++''' ++ ++import hashlib ++import hmac ++import logging ++import re ++import time ++import urllib.parse ++from xml.sax.saxutils import escape as xml_escape ++ ++from ..logging import QuietError ++from . import s3c ++from .common import retry ++from .s3c import get_S3Error ++ ++log = logging.getLogger(__name__) ++ ++# Maximum number of keys that can be deleted at once ++MAX_KEYS = 1000 ++ ++# Pylint goes berserk with false positives ++# pylint: disable=E1002,E1101 ++ ++ ++class Backend(s3c.Backend): ++ """A backend to stored data in some S3 compatible storage service. ++ ++ This classes uses AWS Signature V4 for authorization. ++ """ ++ ++ known_options = s3c.Backend.known_options | {'sig-region'} ++ ++ def __init__(self, options): ++ self.sig_region = options.backend_options.get('sig-region', 'us-east-1') + self.signing_key = None - - @property - @copy_ancestor_docstring -@@ -597,43 +599,76 @@ class Backend(AbstractBackend, metaclass=ABCDocstMeta): - def _authorize_request(self, method, path, headers, subres, query_string): - '''Add authorization information to *headers*''' - -- # See http://docs.amazonwebservices.com/AmazonS3/latest/dev/RESTAuthentication.html ++ super().__init__(options) ++ ++ def __str__(self): ++ return 's3c4://%s/%s/%s' % (self.hostname, self.bucket_name, self.prefix) ++ ++ def _authorize_request(self, method, path, headers, subres, query_string): ++ '''Add authorization information to *headers*''' ++ + # See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html - -- # Date, can't use strftime because it's locale dependent - now = time.gmtime() -- headers['Date'] = ('%s, %02d %s %04d %02d:%02d:%02d GMT' -- % (C_DAY_NAMES[now.tm_wday], -- now.tm_mday, -- C_MONTH_NAMES[now.tm_mon - 1], -- now.tm_year, now.tm_hour, -- now.tm_min, now.tm_sec)) -- -- auth_strs = [method, '\n'] -- -- for hdr in ('Content-MD5', 'Content-Type', 'Date'): -- if hdr in headers: -- auth_strs.append(headers[hdr]) -- auth_strs.append('\n') -- -- for hdr in sorted(x for x in headers if x.lower().startswith('x-amz-')): -- val = ' '.join(re.split(r'\s*\n\s*', headers[hdr].strip())) -- auth_strs.append('%s:%s\n' % (hdr, val)) -- -- # Always include bucket name in path for signing -- if self.hostname.startswith(self.bucket_name): -- path = '/%s%s' % (self.bucket_name, path) -- sign_path = urllib.parse.quote(path) -- auth_strs.append(sign_path) -- if subres: -- auth_strs.append('?%s' % subres) -+ #now = time.strptime('Fri, 24 May 2013 00:00:00 GMT', ++ ++ now = time.gmtime() ++ # now = time.strptime('Fri, 24 May 2013 00:00:00 GMT', + # '%a, %d %b %Y %H:%M:%S GMT') - -- # False positive, hashlib *does* have sha1 member -- #pylint: disable=E1101 -- auth_str = ''.join(auth_strs).encode() -- signature = b64encode(hmac.new(self.password.encode(), auth_str, -- hashlib.sha1).digest()).decode() ++ + ymd = time.strftime('%Y%m%d', now) + ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now) - -- headers['Authorization'] = 'AWS %s:%s' % (self.login, signature) ++ ++ # add non-standard port to host header, needed for correct signature ++ if self.port != 443: ++ headers['host'] = '%s:%s' % (self.hostname, self.port) ++ + headers['x-amz-date'] = ymdhms + headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD' -+ #headers['x-amz-content-sha256'] = hashlib.sha256(body).hexdigest() ++ + headers.pop('Authorization', None) + + auth_strs = [method] + auth_strs.append(urllib.parse.quote(path)) + + if query_string: -+ s = urllib.parse.urlencode(query_string, doseq=True, -+ quote_via=urllib.parse.quote).split('&') ++ s = urllib.parse.urlencode( ++ query_string, doseq=True, quote_via=urllib.parse.quote ++ ).split('&') + else: + s = [] + if subres: @@ -102,20 +293,24 @@ index 11687d5..05750b9 100644 + auth_strs.append(s) + + # Headers -+ sig_hdrs = sorted(x for x in (x.lower() for x in headers.keys()) if x == "host" or x == "content-type" or x.startswith("x-amz-")) ++ sig_hdrs = sorted(x.lower() for x in headers.keys()) + for hdr in sig_hdrs: + auth_strs.append('%s:%s' % (hdr, headers[hdr].strip())) + auth_strs.append('') + auth_strs.append(';'.join(sig_hdrs)) + auth_strs.append(headers['x-amz-content-sha256']) + can_req = '\n'.join(auth_strs) -+ #log.debug('canonical request: %s', can_req) ++ # log.debug('canonical request: %s', can_req) + + can_req_hash = hashlib.sha256(can_req.encode()).hexdigest() -+ str_to_sign = ("AWS4-HMAC-SHA256\n" + ymdhms + '\n' + -+ '%s/%s/s3/aws4_request\n' % (ymd, self.region) + -+ can_req_hash) -+ #log.debug('string to sign: %s', str_to_sign) ++ str_to_sign = ( ++ "AWS4-HMAC-SHA256\n" ++ + ymdhms ++ + '\n' ++ + '%s/%s/s3/aws4_request\n' % (ymd, self.sig_region) ++ + can_req_hash ++ ) ++ # log.debug('string to sign: %s', str_to_sign) + + if self.signing_key is None or self.signing_key[1] != ymd: + self.update_signing_key(ymd) @@ -123,47 +318,75 @@ index 11687d5..05750b9 100644 + + sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True) + -+ cred = ('%s/%04d%02d%02d/%s/s3/aws4_request' -+ % (self.login, now.tm_year, now.tm_mon, now.tm_mday, -+ self.region)) ++ cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % ( ++ self.login, ++ now.tm_year, ++ now.tm_mon, ++ now.tm_mday, ++ self.sig_region, ++ ) + + headers['Authorization'] = ( + 'AWS4-HMAC-SHA256 ' + 'Credential=%s,' + 'SignedHeaders=%s,' -+ 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig)) ++ 'Signature=%s' % (cred, ';'.join(sig_hdrs), sig) ++ ) + + def update_signing_key(self, ymd): -+ date_key = hmac_sha256(("AWS4" + self.password).encode(), -+ ymd.encode()) -+ region_key = hmac_sha256(date_key, self.region.encode()) ++ date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode()) ++ region_key = hmac_sha256(date_key, self.sig_region.encode()) + service_key = hmac_sha256(region_key, b's3') + signing_key = hmac_sha256(service_key, b'aws4_request') + + self.signing_key = (signing_key, ymd) - - def _send_request(self, method, path, headers, subres=None, query_string=None, body=None): - '''Add authentication and send request -@@ -646,7 +681,7 @@ class Backend(AbstractBackend, metaclass=ABCDocstMeta): - - if not self.hostname.startswith(self.bucket_name): - path = '/%s%s' % (self.bucket_name, path) -- headers['host'] = self.hostname -+ headers['host'] = self.hostname if int(self.port) == 80 or int(self.port) == 443 else f"{self.hostname}:{self.port}" - - self._authorize_request(method, path, headers, subres, query_string) - -@@ -950,6 +985,13 @@ def md5sum_b64(buf): - - return b64encode(hashlib.md5(buf).digest()).decode('ascii') - ++ ++ +def hmac_sha256(key, msg, hex=False): + d = hmac.new(key, msg, hashlib.sha256) + if hex: + return d.hexdigest() + else: + return d.digest() -+ - def _parse_retry_after(header): - '''Parse headers for Retry-After value''' +diff --git a/src/s3ql/parse_args.py b/src/s3ql/parse_args.py +index 272e10c7..24ad50f4 100644 +--- a/src/s3ql/parse_args.py ++++ b/src/s3ql/parse_args.py +@@ -374,7 +374,7 @@ def storage_url_type(s): + # slash (even when using a prefix), but we can't do that now because it + # would make file systems created without trailing slash inaccessible. + if re.match(r'^(s3|gs)://[^/]+$', s) or re.match( +- r'^(s3c|swift(ks)?|rackspace)://[^/]+/[^/]+$', s ++ r'^(s3c|s3c4|swift(ks)?|rackspace)://[^/]+/[^/]+$', s + ): + s += '/' +diff --git a/tests/mock_server.py b/tests/mock_server.py +index b453e705..e3084065 100644 +--- a/tests/mock_server.py ++++ b/tests/mock_server.py +@@ -292,6 +292,16 @@ def send_error(self, status, message=None, code='', resource='', extra_headers=N + self.wfile.write(content) + + ++class S3C4RequestHandler(S3CRequestHandler): ++ '''Request Handler for s3c4 backend ++ ++ Currently identical to S3CRequestHandler since mock request handlers ++ do not check request signatures. ++ ''' ++ ++ pass ++ ++ + class BasicSwiftRequestHandler(S3CRequestHandler): + '''A request handler implementing a subset of the OpenStack Swift Interface + +@@ -569,6 +579,7 @@ def inline_error(http_status, body): + #: corresponding storage urls + handler_list = [ + (S3CRequestHandler, 's3c://%(host)s:%(port)d/s3ql_test'), ++ (S3C4RequestHandler, 's3c4://%(host)s:%(port)d/s3ql_test'), + # Special syntax only for testing against mock server + (BasicSwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'), + (CopySwiftRequestHandler, 'swift://%(host)s:%(port)d/s3ql_test'),