Platform Unstable #97

Merged
max merged 34 commits from platform-unstable into master 2024-06-07 00:17:36 +03:00
233 changed files with 0 additions and 57530 deletions
Showing only changes of commit 88a7ec10dc - Show all commits

View file

@ -1,38 +0,0 @@
ratings:
paths:
- "**/*.go"
checks:
file-lines:
config:
threshold: 500
method-complexity:
config:
threshold: 15
method-lines:
config:
threshold: 80
similar-code:
enabled: false
return-statements:
config:
threshold: 10
argument-count:
config:
threshold: 6
engines:
fixme:
enabled: true
config:
strings:
- FIXME
- HACK
- XXX
- BUG
golint:
enabled: true
govet:
enabled: true
gofmt:
enabled: true

View file

@ -1,31 +0,0 @@
coverage:
status:
project:
default:
# basic
target: auto
threshold: 50
base: auto
# advanced
branches: null
if_no_uploads: error
if_not_found: success
if_ci_failed: error
only_pulls: false
flags: null
paths: null
patch:
default:
# basic
target: auto
threshold: 50
base: auto
# advanced
branches: null
if_no_uploads: error
if_not_found: success
if_ci_failed: error
only_pulls: false
flags: null
paths: null
comment: false

View file

@ -1,2 +0,0 @@
source ../../build-support/activate-shell
nix_direnv_watch_file project.nix

View file

@ -1,45 +0,0 @@
tag_annotation
coverage.out
cmd/ipfs-cluster-service/ipfs-cluster-service
cmd/ipfs-cluster-ctl/ipfs-cluster-ctl
cmd/ipfs-cluster-follow/ipfs-cluster-follow
sharness/lib/sharness
sharness/test-results
sharness/trash*
vendor/
raftFolderFromTest*
peerstore
shardTesting
compose
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
test/sharness/test-results
test/sharness/trash*
test/sharness/lib/sharness
test/sharness/.test_config
test/sharness/.test_ipfs
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

File diff suppressed because it is too large Load diff

View file

@ -1,3 +0,0 @@
# Guidelines for contributing
Please see https://ipfscluster.io/developer/contribute .

View file

@ -1,3 +0,0 @@
Copyright 2019. Protocol Labs, Inc.
This library is dual-licensed under Apache 2.0 and MIT terms.

View file

@ -1,5 +0,0 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View file

@ -1,13 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,19 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,82 +0,0 @@
sharness = sharness/lib/sharness
export GO111MODULE := on
all: build
clean: rwundo clean_sharness
$(MAKE) -C cmd/ipfs-cluster-service clean
$(MAKE) -C cmd/ipfs-cluster-ctl clean
$(MAKE) -C cmd/ipfs-cluster-follow clean
@rm -rf ./test/testingData
@rm -rf ./compose
install:
$(MAKE) -C cmd/ipfs-cluster-service install
$(MAKE) -C cmd/ipfs-cluster-ctl install
$(MAKE) -C cmd/ipfs-cluster-follow install
build:
$(MAKE) -C cmd/ipfs-cluster-service build
$(MAKE) -C cmd/ipfs-cluster-ctl build
$(MAKE) -C cmd/ipfs-cluster-follow build
service:
$(MAKE) -C cmd/ipfs-cluster-service ipfs-cluster-service
ctl:
$(MAKE) -C cmd/ipfs-cluster-ctl ipfs-cluster-ctl
follow:
$(MAKE) -C cmd/ipfs-cluster-follow ipfs-cluster-follow
check:
go vet ./...
staticcheck --checks all ./...
misspell -error -locale US .
test:
go test -v ./...
test_sharness: $(sharness)
@sh sharness/run-sharness-tests.sh
test_problem:
go test -timeout 20m -loglevel "DEBUG" -v -run $(problematic_test)
$(sharness):
@echo "Downloading sharness"
@curl -L -s -o sharness/lib/sharness.tar.gz http://github.com/chriscool/sharness/archive/28c7490f5cdf1e95a8ebebf8b06ed5588db13875.tar.gz
@cd sharness/lib; tar -zxf sharness.tar.gz; cd ../..
@mv sharness/lib/sharness-28c7490f5cdf1e95a8ebebf8b06ed5588db13875 sharness/lib/sharness
@rm sharness/lib/sharness.tar.gz
clean_sharness:
@rm -rf ./sharness/test-results
@rm -rf ./sharness/lib/sharness
@rm -rf sharness/trash\ directory*
docker:
docker build -t cluster-image -f Dockerfile .
docker run --name tmp-make-cluster -d --rm cluster-image && sleep 4
docker exec tmp-make-cluster sh -c "ipfs-cluster-ctl version"
docker exec tmp-make-cluster sh -c "ipfs-cluster-service -v"
docker kill tmp-make-cluster
docker build -t cluster-image-test -f Dockerfile-test .
docker run --name tmp-make-cluster-test -d --rm cluster-image && sleep 4
docker exec tmp-make-cluster-test sh -c "ipfs-cluster-ctl version"
docker exec tmp-make-cluster-test sh -c "ipfs-cluster-service -v"
docker kill tmp-make-cluster-test
docker-compose:
mkdir -p compose/ipfs0 compose/ipfs1 compose/cluster0 compose/cluster1
chmod -R 0777 compose
CLUSTER_SECRET=$(shell od -vN 32 -An -tx1 /dev/urandom | tr -d ' \n') docker-compose up -d
sleep 35
docker exec cluster0 ipfs-cluster-ctl peers ls
docker exec cluster1 ipfs-cluster-ctl peers ls
docker exec cluster0 ipfs-cluster-ctl peers ls | grep -o "Sees 2 other peers" | uniq -c | grep 3
docker exec cluster1 ipfs-cluster-ctl peers ls | grep -o "Sees 2 other peers" | uniq -c | grep 3
docker-compose down
prcheck: check service ctl follow test
.PHONY: all test test_sharness clean_sharness rw rwundo publish service ctl install clean docker

View file

@ -1,73 +0,0 @@
# IPFS Cluster
[![Made by](https://img.shields.io/badge/By-Protocol%20Labs-000000.svg?style=flat-square)](https://protocol.ai)
[![Main project](https://img.shields.io/badge/project-ipfs--cluster-ef5c43.svg?style=flat-square)](http://github.com/ipfs-cluster)
[![Discord](https://img.shields.io/badge/forum-discuss.ipfs.io-f9a035.svg?style=flat-square)](https://discuss.ipfs.io/c/help/help-ipfs-cluster/24)
[![Matrix channel](https://img.shields.io/badge/matrix-%23ipfs--cluster-3c8da0.svg?style=flat-square)](https://app.element.io/#/room/#ipfs-cluster:ipfs.io)
[![pkg.go.dev](https://pkg.go.dev/badge/github.com/ipfs-cluster/ipfs-cluster)](https://pkg.go.dev/github.com/ipfs-cluster/ipfs-cluster)
[![Go Report Card](https://goreportcard.com/badge/github.com/ipfs-cluster/ipfs-cluster)](https://goreportcard.com/report/github.com/ipfs-cluster/ipfs-cluster)
[![codecov](https://codecov.io/gh/ipfs-cluster/ipfs-cluster/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs-cluster/ipfs-cluster)
> Pinset orchestration for IPFS
<p align="center">
<img src="https://ipfscluster.io/cluster/png/IPFS_Cluster_color_no_text.png" alt="logo" width="300" height="300" />
</p>
[IPFS Cluster](https://ipfscluster.io) provides data orchestration across a swarm of IPFS daemons by allocating, replicating and tracking a global pinset distributed among multiple peers.
There are 3 different applications:
* A cluster peer application: `ipfs-cluster-service`, to be run along with `go-ipfs` as a sidecar.
* A client CLI application: `ipfs-cluster-ctl`, which allows easily interacting with the peer's HTTP API.
* An additional "follower" peer application: `ipfs-cluster-follow`, focused on simplifying the process of configuring and running follower peers.
---
### Are you using IPFS Cluster?
Please participate in the [IPFS Cluster user registry](https://docs.google.com/forms/d/e/1FAIpQLSdWF5aXNXrAK_sCyu1eVv2obTaKVO3Ac5dfgl2r5_IWcizGRg/viewform).
---
## Table of Contents
- [Documentation](#documentation)
- [News & Roadmap](#news--roadmap)
- [Install](#install)
- [Usage](#usage)
- [Contribute](#contribute)
- [License](#license)
## Documentation
Please visit https://ipfscluster.io/documentation/ to access user documentation, guides and any other resources, including detailed **download** and **usage** instructions.
## News & Roadmap
We regularly post project updates to https://ipfscluster.io/news/ .
The most up-to-date *Roadmap* is available at https://ipfscluster.io/roadmap/ .
## Install
Instructions for different installation methods (including from source) are available at https://ipfscluster.io/download .
## Usage
Extensive usage information is provided at https://ipfscluster.io/documentation/ , including:
* [Docs for `ipfs-cluster-service`](https://ipfscluster.io/documentation/reference/service/)
* [Docs for `ipfs-cluster-ctl`](https://ipfscluster.io/documentation/reference/ctl/)
* [Docs for `ipfs-cluster-follow`](https://ipfscluster.io/documentation/reference/follow/)
## Contribute
PRs accepted. As part of the IPFS project, we have some [contribution guidelines](https://ipfscluster.io/support/#contribution-guidelines).
## License
This library is dual-licensed under Apache 2.0 and MIT terms.
© 2022. Protocol Labs, Inc.

View file

@ -1,298 +0,0 @@
package ipfscluster
// This files has tests for Add* using multiple cluster peers.
import (
"context"
"mime/multipart"
"sync"
"testing"
"time"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
func TestAdd(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
waitForLeaderAndMetrics(t, clusters)
t.Run("default", func(t *testing.T) {
params := api.DefaultAddParams()
params.Shard = false
params.Name = "testlocal"
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
ci, err := clusters[0].AddFile(context.Background(), r, params)
if err != nil {
t.Fatal(err)
}
if ci.String() != test.ShardingDirBalancedRootCID {
t.Fatal("unexpected root CID for local add")
}
// We need to sleep a lot because it takes time to
// catch up on a first/single pin on crdts
time.Sleep(10 * time.Second)
f := func(t *testing.T, c *Cluster) {
pin := c.StatusLocal(ctx, ci)
if pin.Error != "" {
t.Error(pin.Error)
}
if pin.Status != api.TrackerStatusPinned {
t.Error("item should be pinned and is", pin.Status)
}
}
runF(t, clusters, f)
})
t.Run("local_one_allocation", func(t *testing.T) {
params := api.DefaultAddParams()
params.Shard = false
params.Name = "testlocal"
params.ReplicationFactorMin = 1
params.ReplicationFactorMax = 1
params.Local = true
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
ci, err := clusters[2].AddFile(context.Background(), r, params)
if err != nil {
t.Fatal(err)
}
if ci.String() != test.ShardingDirBalancedRootCID {
t.Fatal("unexpected root CID for local add")
}
// We need to sleep a lot because it takes time to
// catch up on a first/single pin on crdts
time.Sleep(10 * time.Second)
f := func(t *testing.T, c *Cluster) {
pin := c.StatusLocal(ctx, ci)
if pin.Error != "" {
t.Error(pin.Error)
}
switch c.id {
case clusters[2].id:
if pin.Status != api.TrackerStatusPinned {
t.Error("item should be pinned and is", pin.Status)
}
default:
if pin.Status != api.TrackerStatusRemote {
t.Errorf("item should only be allocated to cluster2")
}
}
}
runF(t, clusters, f)
})
}
func TestAddWithUserAllocations(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
waitForLeaderAndMetrics(t, clusters)
t.Run("local", func(t *testing.T) {
params := api.DefaultAddParams()
params.ReplicationFactorMin = 2
params.ReplicationFactorMax = 2
params.UserAllocations = []peer.ID{clusters[0].id, clusters[1].id}
params.Shard = false
params.Name = "testlocal"
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
ci, err := clusters[0].AddFile(context.Background(), r, params)
if err != nil {
t.Fatal(err)
}
pinDelay()
f := func(t *testing.T, c *Cluster) {
if c == clusters[0] || c == clusters[1] {
pin := c.StatusLocal(ctx, ci)
if pin.Error != "" {
t.Error(pin.Error)
}
if pin.Status != api.TrackerStatusPinned {
t.Error("item should be pinned and is", pin.Status)
}
} else {
pin := c.StatusLocal(ctx, ci)
if pin.Status != api.TrackerStatusRemote {
t.Error("expected tracker status remote")
}
}
}
runF(t, clusters, f)
})
}
func TestAddPeerDown(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
err := clusters[0].Shutdown(ctx)
if err != nil {
t.Fatal(err)
}
waitForLeaderAndMetrics(t, clusters)
t.Run("local", func(t *testing.T) {
params := api.DefaultAddParams()
params.Shard = false
params.Name = "testlocal"
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mfr, mfr.Boundary())
ci, err := clusters[1].AddFile(context.Background(), r, params)
if err != nil {
t.Fatal(err)
}
if ci.String() != test.ShardingDirBalancedRootCID {
t.Fatal("unexpected root CID for local add")
}
// We need to sleep a lot because it takes time to
// catch up on a first/single pin on crdts
time.Sleep(10 * time.Second)
f := func(t *testing.T, c *Cluster) {
if c.id == clusters[0].id {
return
}
pin := c.StatusLocal(ctx, ci)
if pin.Error != "" {
t.Error(pin.Error)
}
if pin.Status != api.TrackerStatusPinned {
t.Error("item should be pinned")
}
}
runF(t, clusters, f)
})
}
func TestAddOnePeerFails(t *testing.T) {
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
waitForLeaderAndMetrics(t, clusters)
t.Run("local", func(t *testing.T) {
params := api.DefaultAddParams()
params.Shard = false
params.Name = "testlocal"
lg, closer := sth.GetRandFileReader(t, 100000) // 100 MB
defer closer.Close()
mr := files.NewMultiFileReader(lg, true)
r := multipart.NewReader(mr, mr.Boundary())
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
_, err := clusters[0].AddFile(context.Background(), r, params)
if err != nil {
t.Error(err)
}
}()
// Disconnect 1 cluster (the last). Things should keep working.
// Important that we close the hosts, otherwise the RPC
// Servers keep working along with BlockPuts.
time.Sleep(100 * time.Millisecond)
c := clusters[nClusters-1]
c.Shutdown(context.Background())
c.dht.Close()
c.host.Close()
wg.Wait()
})
}
func TestAddAllPeersFail(t *testing.T) {
ctx := context.Background()
clusters, mock := createClusters(t)
defer shutdownClusters(t, clusters, mock)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
waitForLeaderAndMetrics(t, clusters)
t.Run("local", func(t *testing.T) {
// Prevent added content to be allocated to cluster 0
// as it is already going to have something.
_, err := clusters[0].Pin(ctx, test.Cid1, api.PinOptions{
ReplicationFactorMin: 1,
ReplicationFactorMax: 1,
UserAllocations: []peer.ID{clusters[0].host.ID()},
})
if err != nil {
t.Fatal(err)
}
ttlDelay()
params := api.DefaultAddParams()
params.Shard = false
params.Name = "testlocal"
// Allocate to every peer except 0 (which already has a pin)
params.PinOptions.ReplicationFactorMax = nClusters - 1
params.PinOptions.ReplicationFactorMin = nClusters - 1
lg, closer := sth.GetRandFileReader(t, 100000) // 100 MB
defer closer.Close()
mr := files.NewMultiFileReader(lg, true)
r := multipart.NewReader(mr, mr.Boundary())
// var cid cid.Cid
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
_, err := clusters[0].AddFile(context.Background(), r, params)
if err != adder.ErrBlockAdder {
t.Error("expected ErrBlockAdder. Got: ", err)
}
}()
time.Sleep(100 * time.Millisecond)
// Shutdown all clusters except 0 to see the right error.
// Important that we shut down the hosts, otherwise
// the RPC Servers keep working along with BlockPuts.
// Note that this kills raft.
runF(t, clusters[1:], func(t *testing.T, c *Cluster) {
c.Shutdown(ctx)
c.dht.Close()
c.host.Close()
})
wg.Wait()
})
}

View file

@ -1,331 +0,0 @@
// Package adder implements functionality to add content to IPFS daemons
// managed by the Cluster.
package adder
import (
"context"
"errors"
"fmt"
"io"
"mime/multipart"
"strings"
"github.com/ipfs-cluster/ipfs-cluster/adder/ipfsadd"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs/go-unixfs"
"github.com/ipld/go-car"
peer "github.com/libp2p/go-libp2p/core/peer"
cid "github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
merkledag "github.com/ipfs/go-merkledag"
multihash "github.com/multiformats/go-multihash"
)
var logger = logging.Logger("adder")
// go-merkledag does this, but it may be moved.
// We include for explicitness.
func init() {
ipld.Register(cid.DagProtobuf, merkledag.DecodeProtobufBlock)
ipld.Register(cid.Raw, merkledag.DecodeRawBlock)
ipld.Register(cid.DagCBOR, cbor.DecodeBlock)
}
// ClusterDAGService is an implementation of ipld.DAGService plus a Finalize
// method. ClusterDAGServices can be used to provide Adders with a different
// add implementation.
type ClusterDAGService interface {
ipld.DAGService
// Finalize receives the IPFS content root CID as
// returned by the ipfs adder.
Finalize(ctx context.Context, ipfsRoot api.Cid) (api.Cid, error)
// Allocations returns the allocations made by the cluster DAG service
// for the added content.
Allocations() []peer.ID
}
// A dagFormatter can create dags from files.Node. It can keep state
// to add several files to the same dag.
type dagFormatter interface {
Add(name string, f files.Node) (api.Cid, error)
}
// Adder is used to add content to IPFS Cluster using an implementation of
// ClusterDAGService.
type Adder struct {
ctx context.Context
cancel context.CancelFunc
dgs ClusterDAGService
params api.AddParams
// AddedOutput updates are placed on this channel
// whenever a block is processed. They contain information
// about the block, the CID, the Name etc. and are mostly
// meant to be streamed back to the user.
output chan api.AddedOutput
}
// New returns a new Adder with the given ClusterDAGService, add options and a
// channel to send updates during the adding process.
//
// An Adder may only be used once.
func New(ds ClusterDAGService, p api.AddParams, out chan api.AddedOutput) *Adder {
// Discard all progress update output as the caller has not provided
// a channel for them to listen on.
if out == nil {
out = make(chan api.AddedOutput, 100)
go func() {
for range out {
}
}()
}
return &Adder{
dgs: ds,
params: p,
output: out,
}
}
func (a *Adder) setContext(ctx context.Context) {
if a.ctx == nil { // only allows first context
ctxc, cancel := context.WithCancel(ctx)
a.ctx = ctxc
a.cancel = cancel
}
}
// FromMultipart adds content from a multipart.Reader. The adder will
// no longer be usable after calling this method.
func (a *Adder) FromMultipart(ctx context.Context, r *multipart.Reader) (api.Cid, error) {
logger.Debugf("adding from multipart with params: %+v", a.params)
f, err := files.NewFileFromPartReader(r, "multipart/form-data")
if err != nil {
return api.CidUndef, err
}
defer f.Close()
return a.FromFiles(ctx, f)
}
// FromFiles adds content from a files.Directory. The adder will no longer
// be usable after calling this method.
func (a *Adder) FromFiles(ctx context.Context, f files.Directory) (api.Cid, error) {
logger.Debug("adding from files")
a.setContext(ctx)
if a.ctx.Err() != nil { // don't allow running twice
return api.CidUndef, a.ctx.Err()
}
defer a.cancel()
defer close(a.output)
var dagFmtr dagFormatter
var err error
switch a.params.Format {
case "", "unixfs":
dagFmtr, err = newIpfsAdder(ctx, a.dgs, a.params, a.output)
case "car":
dagFmtr, err = newCarAdder(ctx, a.dgs, a.params, a.output)
default:
err = errors.New("bad dag formatter option")
}
if err != nil {
return api.CidUndef, err
}
// setup wrapping
if a.params.Wrap {
f = files.NewSliceDirectory(
[]files.DirEntry{files.FileEntry("", f)},
)
}
it := f.Entries()
var adderRoot api.Cid
for it.Next() {
select {
case <-a.ctx.Done():
return api.CidUndef, a.ctx.Err()
default:
logger.Debugf("ipfsAdder AddFile(%s)", it.Name())
adderRoot, err = dagFmtr.Add(it.Name(), it.Node())
if err != nil {
logger.Error("error adding to cluster: ", err)
return api.CidUndef, err
}
}
// TODO (hector): We can only add a single CAR file for the
// moment.
if a.params.Format == "car" {
break
}
}
if it.Err() != nil {
return api.CidUndef, it.Err()
}
clusterRoot, err := a.dgs.Finalize(a.ctx, adderRoot)
if err != nil {
logger.Error("error finalizing adder:", err)
return api.CidUndef, err
}
logger.Infof("%s successfully added to cluster", clusterRoot)
return clusterRoot, nil
}
// A wrapper around the ipfsadd.Adder to satisfy the dagFormatter interface.
type ipfsAdder struct {
*ipfsadd.Adder
}
func newIpfsAdder(ctx context.Context, dgs ClusterDAGService, params api.AddParams, out chan api.AddedOutput) (*ipfsAdder, error) {
iadder, err := ipfsadd.NewAdder(ctx, dgs, dgs.Allocations)
if err != nil {
logger.Error(err)
return nil, err
}
iadder.Trickle = params.Layout == "trickle"
iadder.RawLeaves = params.RawLeaves
iadder.Chunker = params.Chunker
iadder.Out = out
iadder.Progress = params.Progress
iadder.NoCopy = params.NoCopy
// Set up prefi
prefix, err := merkledag.PrefixForCidVersion(params.CidVersion)
if err != nil {
return nil, fmt.Errorf("bad CID Version: %s", err)
}
hashFunCode, ok := multihash.Names[strings.ToLower(params.HashFun)]
if !ok {
return nil, errors.New("hash function name not known")
}
prefix.MhType = hashFunCode
prefix.MhLength = -1
iadder.CidBuilder = &prefix
return &ipfsAdder{
Adder: iadder,
}, nil
}
func (ia *ipfsAdder) Add(name string, f files.Node) (api.Cid, error) {
// In order to set the AddedOutput names right, we use
// OutputPrefix:
//
// When adding a folder, this is the root folder name which is
// prepended to the addedpaths. When adding a single file,
// this is the name of the file which overrides the empty
// AddedOutput name.
//
// After coreunix/add.go was refactored in go-ipfs and we
// followed suit, it no longer receives the name of the
// file/folder being added and does not emit AddedOutput
// events with the right names. We addressed this by adding
// OutputPrefix to our version. go-ipfs modifies emitted
// events before sending to user).
ia.OutputPrefix = name
nd, err := ia.AddAllAndPin(f)
if err != nil {
return api.CidUndef, err
}
return api.NewCid(nd.Cid()), nil
}
// An adder to add CAR files. It is at the moment very basic, and can
// add a single CAR file with a single root. Ideally, it should be able to
// add more complex, or several CARs by wrapping them with a single root.
// But for that we would need to keep state and track an MFS root similarly to
// what the ipfsadder does.
type carAdder struct {
ctx context.Context
dgs ClusterDAGService
params api.AddParams
output chan api.AddedOutput
}
func newCarAdder(ctx context.Context, dgs ClusterDAGService, params api.AddParams, out chan api.AddedOutput) (*carAdder, error) {
return &carAdder{
ctx: ctx,
dgs: dgs,
params: params,
output: out,
}, nil
}
// Add takes a node which should be a CAR file and nothing else and
// adds its blocks using the ClusterDAGService.
func (ca *carAdder) Add(name string, fn files.Node) (api.Cid, error) {
if ca.params.Wrap {
return api.CidUndef, errors.New("cannot wrap a CAR file upload")
}
f, ok := fn.(files.File)
if !ok {
return api.CidUndef, errors.New("expected CAR file is not of type file")
}
carReader, err := car.NewCarReader(f)
if err != nil {
return api.CidUndef, err
}
if len(carReader.Header.Roots) != 1 {
return api.CidUndef, errors.New("only CAR files with a single root are supported")
}
root := carReader.Header.Roots[0]
bytes := uint64(0)
size := uint64(0)
for {
block, err := carReader.Next()
if err != nil && err != io.EOF {
return api.CidUndef, err
} else if block == nil {
break
}
bytes += uint64(len(block.RawData()))
nd, err := ipld.Decode(block)
if err != nil {
return api.CidUndef, err
}
// If the root is in the CAR and the root is a UnixFS
// node, then set the size in the output object.
if nd.Cid().Equals(root) {
ufs, err := unixfs.ExtractFSNode(nd)
if err == nil {
size = ufs.FileSize()
}
}
err = ca.dgs.Add(ca.ctx, nd)
if err != nil {
return api.CidUndef, err
}
}
ca.output <- api.AddedOutput{
Name: name,
Cid: api.NewCid(root),
Bytes: bytes,
Size: size,
Allocations: ca.dgs.Allocations(),
}
return api.NewCid(root), nil
}

View file

@ -1,227 +0,0 @@
package adder
import (
"bytes"
"context"
"fmt"
"mime/multipart"
"sync"
"testing"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/test"
"github.com/ipld/go-car"
peer "github.com/libp2p/go-libp2p/core/peer"
cid "github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
)
type mockCDAGServ struct {
*test.MockDAGService
}
func newMockCDAGServ() *mockCDAGServ {
return &mockCDAGServ{
// write-only DAGs.
MockDAGService: test.NewMockDAGService(true),
}
}
func newReadableMockCDAGServ() *mockCDAGServ {
return &mockCDAGServ{
MockDAGService: test.NewMockDAGService(false),
}
}
// noop
func (dag *mockCDAGServ) Finalize(ctx context.Context, root api.Cid) (api.Cid, error) {
return root, nil
}
func (dag *mockCDAGServ) Allocations() []peer.ID {
return nil
}
func TestAdder(t *testing.T) {
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
mr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mr, mr.Boundary())
p := api.DefaultAddParams()
expectedCids := test.ShardingDirCids[:]
dags := newMockCDAGServ()
adder := New(dags, p, nil)
root, err := adder.FromMultipart(context.Background(), r)
if err != nil {
t.Fatal(err)
}
if root.String() != test.ShardingDirBalancedRootCID {
t.Error("expected the right content root")
}
if len(expectedCids) != len(dags.Nodes) {
t.Fatal("unexpected number of blocks imported")
}
for _, c := range expectedCids {
ci, _ := cid.Decode(c)
_, ok := dags.Nodes[ci]
if !ok {
t.Fatal("unexpected block emitted:", c)
}
}
}
func TestAdder_DoubleStart(t *testing.T) {
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
f := sth.GetTreeSerialFile(t)
p := api.DefaultAddParams()
dags := newMockCDAGServ()
adder := New(dags, p, nil)
_, err := adder.FromFiles(context.Background(), f)
f.Close()
if err != nil {
t.Fatal(err)
}
f = sth.GetTreeSerialFile(t)
_, err = adder.FromFiles(context.Background(), f)
f.Close()
if err == nil {
t.Fatal("expected an error: cannot run importer twice")
}
}
func TestAdder_ContextCancelled(t *testing.T) {
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
lg, closer := sth.GetRandFileReader(t, 100000) // 50 MB
st := sth.GetTreeSerialFile(t)
defer closer.Close()
defer st.Close()
slf := files.NewMapDirectory(map[string]files.Node{
"a": lg,
"b": st,
})
mr := files.NewMultiFileReader(slf, true)
r := multipart.NewReader(mr, mr.Boundary())
p := api.DefaultAddParams()
dags := newMockCDAGServ()
ctx, cancel := context.WithCancel(context.Background())
adder := New(dags, p, nil)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
_, err := adder.FromMultipart(ctx, r)
if err == nil {
t.Error("expected a context canceled error")
}
t.Log(err)
}()
// adder.FromMultipart will finish, if sleep more
time.Sleep(50 * time.Millisecond)
cancel()
wg.Wait()
}
func TestAdder_CAR(t *testing.T) {
// prepare a CAR file
ctx := context.Background()
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
mr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mr, mr.Boundary())
p := api.DefaultAddParams()
dags := newReadableMockCDAGServ()
adder := New(dags, p, nil)
root, err := adder.FromMultipart(ctx, r)
if err != nil {
t.Fatal(err)
}
var carBuf bytes.Buffer
// Make a CAR out of the files we added.
err = car.WriteCar(ctx, dags, []cid.Cid{root.Cid}, &carBuf)
if err != nil {
t.Fatal(err)
}
// Make the CAR look like a multipart.
carFile := files.NewReaderFile(&carBuf)
carDir := files.NewMapDirectory(
map[string]files.Node{"": carFile},
)
carMf := files.NewMultiFileReader(carDir, true)
carMr := multipart.NewReader(carMf, carMf.Boundary())
// Add the car, discarding old dags.
dags = newMockCDAGServ()
p.Format = "car"
adder = New(dags, p, nil)
root2, err := adder.FromMultipart(ctx, carMr)
if err != nil {
t.Fatal(err)
}
if !root.Equals(root2) {
t.Error("Imported CAR file does not have expected root")
}
expectedCids := test.ShardingDirCids[:]
for _, c := range expectedCids {
ci, _ := cid.Decode(c)
_, ok := dags.Nodes[ci]
if !ok {
t.Fatal("unexpected block extracted from CAR:", c)
}
}
}
func TestAdder_LargeFolder(t *testing.T) {
items := 10000 // add 10000 items
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
filesMap := make(map[string]files.Node)
for i := 0; i < items; i++ {
fstr := fmt.Sprintf("file%d", i)
f := files.NewBytesFile([]byte(fstr))
filesMap[fstr] = f
}
slf := files.NewMapDirectory(filesMap)
p := api.DefaultAddParams()
p.Wrap = true
dags := newMockCDAGServ()
adder := New(dags, p, nil)
_, err := adder.FromFiles(context.Background(), slf)
if err != nil {
t.Fatal(err)
}
}

View file

@ -1,135 +0,0 @@
// Package adderutils provides some utilities for adding content to cluster.
package adderutils
import (
"context"
"encoding/json"
"mime/multipart"
"net/http"
"sync"
"github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/adder/sharding"
"github.com/ipfs-cluster/ipfs-cluster/adder/single"
"github.com/ipfs-cluster/ipfs-cluster/api"
logging "github.com/ipfs/go-log/v2"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var logger = logging.Logger("adder")
// AddMultipartHTTPHandler is a helper function to add content
// uploaded using a multipart request. The outputTransform parameter
// allows to customize the http response output format to something
// else than api.AddedOutput objects.
func AddMultipartHTTPHandler(
ctx context.Context,
rpc *rpc.Client,
params api.AddParams,
reader *multipart.Reader,
w http.ResponseWriter,
outputTransform func(api.AddedOutput) interface{},
) (api.Cid, error) {
var dags adder.ClusterDAGService
output := make(chan api.AddedOutput, 200)
if params.Shard {
dags = sharding.New(ctx, rpc, params, output)
} else {
dags = single.New(ctx, rpc, params, params.Local)
}
if outputTransform == nil {
outputTransform = func(in api.AddedOutput) interface{} { return in }
}
// This must be application/json otherwise go-ipfs client
// will break.
w.Header().Set("Content-Type", "application/json")
// Browsers should not cache these responses.
w.Header().Set("Cache-Control", "no-cache")
// We need to ask the clients to close the connection
// (no keep-alive) of things break badly when adding.
// https://github.com/ipfs/go-ipfs-cmds/pull/116
w.Header().Set("Connection", "close")
var wg sync.WaitGroup
if !params.StreamChannels {
// in this case we buffer responses in memory and
// return them as a valid JSON array.
wg.Add(1)
var bufOutput []interface{} // a slice of transformed AddedOutput
go func() {
defer wg.Done()
bufOutput = buildOutput(output, outputTransform)
}()
enc := json.NewEncoder(w)
add := adder.New(dags, params, output)
root, err := add.FromMultipart(ctx, reader)
if err != nil { // Send an error
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
errorResp := api.Error{
Code: http.StatusInternalServerError,
Message: err.Error(),
}
if err := enc.Encode(errorResp); err != nil {
logger.Error(err)
}
wg.Wait()
return root, err
}
wg.Wait()
w.WriteHeader(http.StatusOK)
enc.Encode(bufOutput)
return root, err
}
// handle stream-adding. This should be the default.
// https://github.com/ipfs-shipyard/ipfs-companion/issues/600
w.Header().Set("X-Chunked-Output", "1")
// Used by go-ipfs to signal errors half-way through the stream.
w.Header().Set("Trailer", "X-Stream-Error")
w.WriteHeader(http.StatusOK)
wg.Add(1)
go func() {
defer wg.Done()
streamOutput(w, output, outputTransform)
}()
add := adder.New(dags, params, output)
root, err := add.FromMultipart(ctx, reader)
if err != nil {
logger.Error(err)
// Set trailer with error
w.Header().Set("X-Stream-Error", err.Error())
}
wg.Wait()
return root, err
}
func streamOutput(w http.ResponseWriter, output chan api.AddedOutput, transform func(api.AddedOutput) interface{}) {
flusher, flush := w.(http.Flusher)
enc := json.NewEncoder(w)
for v := range output {
err := enc.Encode(transform(v))
if err != nil {
logger.Error(err)
break
}
if flush {
flusher.Flush()
}
}
}
func buildOutput(output chan api.AddedOutput, transform func(api.AddedOutput) interface{}) []interface{} {
var finalOutput []interface{}
for v := range output {
finalOutput = append(finalOutput, transform(v))
}
return finalOutput
}

View file

@ -1,488 +0,0 @@
// Package ipfsadd is a simplified copy of go-ipfs/core/coreunix/add.go
package ipfsadd
import (
"context"
"errors"
"fmt"
"io"
gopath "path"
"path/filepath"
"github.com/ipfs-cluster/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
chunker "github.com/ipfs/go-ipfs-chunker"
files "github.com/ipfs/go-ipfs-files"
posinfo "github.com/ipfs/go-ipfs-posinfo"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
dag "github.com/ipfs/go-merkledag"
mfs "github.com/ipfs/go-mfs"
unixfs "github.com/ipfs/go-unixfs"
balanced "github.com/ipfs/go-unixfs/importer/balanced"
ihelper "github.com/ipfs/go-unixfs/importer/helpers"
trickle "github.com/ipfs/go-unixfs/importer/trickle"
peer "github.com/libp2p/go-libp2p/core/peer"
)
var log = logging.Logger("coreunix")
// how many bytes of progress to wait before sending a progress update message
const progressReaderIncrement = 1024 * 256
var liveCacheSize = uint64(256 << 10)
// NewAdder Returns a new Adder used for a file add operation.
func NewAdder(ctx context.Context, ds ipld.DAGService, allocs func() []peer.ID) (*Adder, error) {
// Cluster: we don't use pinner nor GCLocker.
return &Adder{
ctx: ctx,
dagService: ds,
allocsFun: allocs,
Progress: false,
Trickle: false,
Chunker: "",
}, nil
}
// Adder holds the switches passed to the `add` command.
type Adder struct {
ctx context.Context
dagService ipld.DAGService
allocsFun func() []peer.ID
Out chan api.AddedOutput
Progress bool
Trickle bool
RawLeaves bool
Silent bool
NoCopy bool
Chunker string
mroot *mfs.Root
tempRoot cid.Cid
CidBuilder cid.Builder
liveNodes uint64
lastFile mfs.FSNode
// Cluster: ipfs does a hack in commands/add.go to set the filenames
// in emitted events correctly. We carry a root folder name (or a
// filename in the case of single files here and emit those events
// correctly from the beginning).
OutputPrefix string
}
func (adder *Adder) mfsRoot() (*mfs.Root, error) {
if adder.mroot != nil {
return adder.mroot, nil
}
rnode := unixfs.EmptyDirNode()
rnode.SetCidBuilder(adder.CidBuilder)
mr, err := mfs.NewRoot(adder.ctx, adder.dagService, rnode, nil)
if err != nil {
return nil, err
}
adder.mroot = mr
return adder.mroot, nil
}
// SetMfsRoot sets `r` as the root for Adder.
func (adder *Adder) SetMfsRoot(r *mfs.Root) {
adder.mroot = r
}
// Constructs a node from reader's data, and adds it. Doesn't pin.
func (adder *Adder) add(reader io.Reader) (ipld.Node, error) {
chnk, err := chunker.FromString(reader, adder.Chunker)
if err != nil {
return nil, err
}
// Cluster: we don't do batching/use BufferedDS.
params := ihelper.DagBuilderParams{
Dagserv: adder.dagService,
RawLeaves: adder.RawLeaves,
Maxlinks: ihelper.DefaultLinksPerBlock,
NoCopy: adder.NoCopy,
CidBuilder: adder.CidBuilder,
}
db, err := params.New(chnk)
if err != nil {
return nil, err
}
var nd ipld.Node
if adder.Trickle {
nd, err = trickle.Layout(db)
} else {
nd, err = balanced.Layout(db)
}
if err != nil {
return nil, err
}
return nd, nil
}
// Cluster: commented as it is unused
// // RootNode returns the mfs root node
// func (adder *Adder) curRootNode() (ipld.Node, error) {
// mr, err := adder.mfsRoot()
// if err != nil {
// return nil, err
// }
// root, err := mr.GetDirectory().GetNode()
// if err != nil {
// return nil, err
// }
// // if one root file, use that hash as root.
// if len(root.Links()) == 1 {
// nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService)
// if err != nil {
// return nil, err
// }
// root = nd
// }
// return root, err
// }
// PinRoot recursively pins the root node of Adder and
// writes the pin state to the backing datastore.
// Cluster: we don't pin. Former Finalize().
func (adder *Adder) PinRoot(root ipld.Node) error {
rnk := root.Cid()
err := adder.dagService.Add(adder.ctx, root)
if err != nil {
return err
}
if adder.tempRoot.Defined() {
adder.tempRoot = rnk
}
return nil
}
func (adder *Adder) outputDirs(path string, fsn mfs.FSNode) error {
switch fsn := fsn.(type) {
case *mfs.File:
return nil
case *mfs.Directory:
names, err := fsn.ListNames(adder.ctx)
if err != nil {
return err
}
for _, name := range names {
child, err := fsn.Child(name)
if err != nil {
// This fails when Child is of type *mfs.File
// because it tries to get them from the DAG
// service (does not implement this and returns
// a "not found" error)
// *mfs.Files are ignored in the recursive call
// anyway.
// For Cluster, we just ignore errors here.
continue
}
childpath := gopath.Join(path, name)
err = adder.outputDirs(childpath, child)
if err != nil {
return err
}
fsn.Uncache(name)
}
nd, err := fsn.GetNode()
if err != nil {
return err
}
return adder.outputDagnode(adder.Out, path, nd)
default:
return fmt.Errorf("unrecognized fsn type: %#v", fsn)
}
}
func (adder *Adder) addNode(node ipld.Node, path string) error {
// patch it into the root
outputName := path
if path == "" {
path = node.Cid().String()
outputName = ""
}
if pi, ok := node.(*posinfo.FilestoreNode); ok {
node = pi.Node
}
mr, err := adder.mfsRoot()
if err != nil {
return err
}
dir := gopath.Dir(path)
if dir != "." {
opts := mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
}
if err := mfs.Mkdir(mr, dir, opts); err != nil {
return err
}
}
if err := mfs.PutNode(mr, path, node); err != nil {
return err
}
// Cluster: cache the last file added.
// This avoids using the DAGService to get the first children
// if the MFS root when not wrapping.
lastFile, err := mfs.NewFile(path, node, nil, adder.dagService)
if err != nil {
return err
}
adder.lastFile = lastFile
if !adder.Silent {
return adder.outputDagnode(adder.Out, outputName, node)
}
return nil
}
// AddAllAndPin adds the given request's files and pin them.
// Cluster: we don'pin. Former AddFiles.
func (adder *Adder) AddAllAndPin(file files.Node) (ipld.Node, error) {
if err := adder.addFileNode("", file, true); err != nil {
return nil, err
}
// get root
mr, err := adder.mfsRoot()
if err != nil {
return nil, err
}
var root mfs.FSNode
rootdir := mr.GetDirectory()
root = rootdir
err = root.Flush()
if err != nil {
return nil, err
}
// if adding a file without wrapping, swap the root to it (when adding a
// directory, mfs root is the directory)
_, dir := file.(files.Directory)
var name string
if !dir {
children, err := rootdir.ListNames(adder.ctx)
if err != nil {
return nil, err
}
if len(children) == 0 {
return nil, fmt.Errorf("expected at least one child dir, got none")
}
// Replace root with the first child
name = children[0]
root, err = rootdir.Child(name)
if err != nil {
// Cluster: use the last file we added
// if we have one.
if adder.lastFile == nil {
return nil, err
}
root = adder.lastFile
}
}
err = mr.Close()
if err != nil {
return nil, err
}
nd, err := root.GetNode()
if err != nil {
return nil, err
}
// output directory events
err = adder.outputDirs(name, root)
if err != nil {
return nil, err
}
// Cluster: call PinRoot which adds the root cid to the DAGService.
// Unsure if this a bug in IPFS when not pinning. Or it would get added
// twice.
return nd, adder.PinRoot(nd)
}
// Cluster: we don't Pause for GC
func (adder *Adder) addFileNode(path string, file files.Node, toplevel bool) error {
defer file.Close()
if adder.liveNodes >= liveCacheSize {
// TODO: A smarter cache that uses some sort of lru cache with an eviction handler
mr, err := adder.mfsRoot()
if err != nil {
return err
}
if err := mr.FlushMemFree(adder.ctx); err != nil {
return err
}
adder.liveNodes = 0
}
adder.liveNodes++
switch f := file.(type) {
case files.Directory:
return adder.addDir(path, f, toplevel)
case *files.Symlink:
return adder.addSymlink(path, f)
case files.File:
return adder.addFile(path, f)
default:
return errors.New("unknown file type")
}
}
func (adder *Adder) addSymlink(path string, l *files.Symlink) error {
sdata, err := unixfs.SymlinkData(l.Target)
if err != nil {
return err
}
dagnode := dag.NodeWithData(sdata)
dagnode.SetCidBuilder(adder.CidBuilder)
err = adder.dagService.Add(adder.ctx, dagnode)
if err != nil {
return err
}
return adder.addNode(dagnode, path)
}
func (adder *Adder) addFile(path string, file files.File) error {
// if the progress flag was specified, wrap the file so that we can send
// progress updates to the client (over the output channel)
var reader io.Reader = file
if adder.Progress {
rdr := &progressReader{file: reader, path: path, out: adder.Out}
if fi, ok := file.(files.FileInfo); ok {
reader = &progressReader2{rdr, fi}
} else {
reader = rdr
}
}
dagnode, err := adder.add(reader)
if err != nil {
return err
}
// patch it into the root
return adder.addNode(dagnode, path)
}
func (adder *Adder) addDir(path string, dir files.Directory, toplevel bool) error {
log.Infof("adding directory: %s", path)
if !(toplevel && path == "") {
mr, err := adder.mfsRoot()
if err != nil {
return err
}
err = mfs.Mkdir(mr, path, mfs.MkdirOpts{
Mkparents: true,
Flush: false,
CidBuilder: adder.CidBuilder,
})
if err != nil {
return err
}
}
it := dir.Entries()
for it.Next() {
fpath := gopath.Join(path, it.Name())
err := adder.addFileNode(fpath, it.Node(), false)
if err != nil {
return err
}
}
return it.Err()
}
// outputDagnode sends dagnode info over the output channel.
// Cluster: we use api.AddedOutput instead of coreiface events
// and make this an adder method to be be able to prefix.
func (adder *Adder) outputDagnode(out chan api.AddedOutput, name string, dn ipld.Node) error {
if out == nil {
return nil
}
s, err := dn.Size()
if err != nil {
return err
}
// When adding things in a folder: "OutputPrefix/name"
// When adding a single file: "OutputPrefix" (name is unset)
// When adding a single thing with no name: ""
// Note: ipfs sets the name of files received on stdin to the CID,
// but cluster does not support stdin-adding so we do not
// account for this here.
name = filepath.Join(adder.OutputPrefix, name)
out <- api.AddedOutput{
Cid: api.NewCid(dn.Cid()),
Name: name,
Size: s,
Allocations: adder.allocsFun(),
}
return nil
}
type progressReader struct {
file io.Reader
path string
out chan api.AddedOutput
bytes int64
lastProgress int64
}
func (i *progressReader) Read(p []byte) (int, error) {
n, err := i.file.Read(p)
i.bytes += int64(n)
if i.bytes-i.lastProgress >= progressReaderIncrement || err == io.EOF {
i.lastProgress = i.bytes
i.out <- api.AddedOutput{
Name: i.path,
Bytes: uint64(i.bytes),
}
}
return n, err
}
type progressReader2 struct {
*progressReader
files.FileInfo
}
func (i *progressReader2) Read(p []byte) (int, error) {
return i.progressReader.Read(p)
}

View file

@ -1,186 +0,0 @@
package sharding
// dag.go defines functions for constructing and parsing ipld-cbor nodes
// of the clusterDAG used to track sharded DAGs in ipfs-cluster
// Most logic goes into handling the edge cases in which clusterDAG
// metadata for a single shard cannot fit within a single shard node. We
// make the following simplifying assumption: a single shard will not track
// more than 35,808,256 links (~2^25). This is the limit at which the current
// shard node format would need 2 levels of indirect nodes to reference
// all of the links. Note that this limit is only reached at shard sizes 7
// times the size of the current default and then only when files are all
// 1 byte in size. In the future we may generalize the shard dag to multiple
// indirect nodes to accommodate much bigger shard sizes. Also note that the
// move to using the identity hash function in cids of very small data
// will improve link density in shard nodes and further reduce the need for
// multiple levels of indirection.
import (
"context"
"fmt"
blocks "github.com/ipfs/go-block-format"
cid "github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
mh "github.com/multiformats/go-multihash"
)
// go-merkledag does this, but it may be moved.
// We include for explicitness.
func init() {
ipld.Register(cid.DagProtobuf, dag.DecodeProtobufBlock)
ipld.Register(cid.Raw, dag.DecodeRawBlock)
ipld.Register(cid.DagCBOR, cbor.DecodeBlock)
}
// MaxLinks is the max number of links that, when serialized fit into a block
const MaxLinks = 5984
const hashFn = mh.SHA2_256
// CborDataToNode parses cbor data into a clusterDAG node while making a few
// checks
func CborDataToNode(raw []byte, format string) (ipld.Node, error) {
if format != "cbor" {
return nil, fmt.Errorf("unexpected shard node format %s", format)
}
shardCid, err := cid.NewPrefixV1(cid.DagCBOR, hashFn).Sum(raw)
if err != nil {
return nil, err
}
shardBlk, err := blocks.NewBlockWithCid(raw, shardCid)
if err != nil {
return nil, err
}
shardNode, err := ipld.Decode(shardBlk)
if err != nil {
return nil, err
}
return shardNode, nil
}
func makeDAGSimple(ctx context.Context, dagObj map[string]cid.Cid) (ipld.Node, error) {
node, err := cbor.WrapObject(
dagObj,
hashFn, mh.DefaultLengths[hashFn],
)
if err != nil {
return nil, err
}
return node, err
}
// makeDAG parses a dagObj which stores all of the node-links a shardDAG
// is responsible for tracking. In general a single node of links may exceed
// the capacity of an ipfs block. In this case an indirect node in the
// shardDAG is constructed that references "leaf shardNodes" that themselves
// carry links to the data nodes being tracked. The head of the output slice
// is always the root of the shardDAG, i.e. the ipld node that should be
// recursively pinned to track the shard
func makeDAG(ctx context.Context, dagObj map[string]cid.Cid) ([]ipld.Node, error) {
// FIXME: We have a 4MB limit on the block size enforced by bitswap:
// https://github.com/libp2p/go-libp2p/core/blob/master/network/network.go#L23
// No indirect node
if len(dagObj) <= MaxLinks {
n, err := makeDAGSimple(ctx, dagObj)
return []ipld.Node{n}, err
}
// Indirect node required
leafNodes := make([]ipld.Node, 0) // shardNodes with links to data
indirectObj := make(map[string]cid.Cid) // shardNode with links to shardNodes
numFullLeaves := len(dagObj) / MaxLinks
for i := 0; i <= numFullLeaves; i++ {
leafObj := make(map[string]cid.Cid)
for j := 0; j < MaxLinks; j++ {
c, ok := dagObj[fmt.Sprintf("%d", i*MaxLinks+j)]
if !ok { // finished with this leaf before filling all the way
if i != numFullLeaves {
panic("bad state, should never be here")
}
break
}
leafObj[fmt.Sprintf("%d", j)] = c
}
leafNode, err := makeDAGSimple(ctx, leafObj)
if err != nil {
return nil, err
}
indirectObj[fmt.Sprintf("%d", i)] = leafNode.Cid()
leafNodes = append(leafNodes, leafNode)
}
indirectNode, err := makeDAGSimple(ctx, indirectObj)
if err != nil {
return nil, err
}
nodes := append([]ipld.Node{indirectNode}, leafNodes...)
return nodes, nil
}
// TODO: decide whether this is worth including. Is precision important for
// most usecases? Is being a little over the shard size a serious problem?
// Is precision worth the cost to maintain complex accounting for metadata
// size (cid sizes will vary in general, cluster dag cbor format may
// grow to vary unpredictably in size)
// byteCount returns the number of bytes the dagObj will occupy when
//serialized into an ipld DAG
/*func byteCount(obj dagObj) uint64 {
// 1 byte map overhead
// for each entry:
// 1 byte indicating text
// 1 byte*(number digits) for key
// 2 bytes for link tag
// 35 bytes for each cid
count := 1
for key := range obj {
count += fixedPerLink
count += len(key)
}
return uint64(count) + indirectCount(len(obj))
}
// indirectCount returns the number of bytes needed to serialize the indirect
// node structure of the shardDAG based on the number of links being tracked.
func indirectCount(linkNum int) uint64 {
q := linkNum / MaxLinks
if q == 0 { // no indirect node needed
return 0
}
dummyIndirect := make(map[string]cid.Cid)
for key := 0; key <= q; key++ {
dummyIndirect[fmt.Sprintf("%d", key)] = nil
}
// Count bytes of entries of single indirect node and add the map
// overhead for all leaf nodes other than the original
return byteCount(dummyIndirect) + uint64(q)
}
// Return the number of bytes added to the total shard node metadata DAG when
// adding a new link to the given dagObj.
func deltaByteCount(obj dagObj) uint64 {
linkNum := len(obj)
q1 := linkNum / MaxLinks
q2 := (linkNum + 1) / MaxLinks
count := uint64(fixedPerLink)
count += uint64(len(fmt.Sprintf("%d", len(obj))))
// new shard nodes created by adding link
if q1 != q2 {
// first new leaf node created, i.e. indirect created too
if q2 == 1 {
count++ // map overhead of indirect node
count += 1 + fixedPerLink // fixedPerLink + len("0")
}
// added to indirect node
count += fixedPerLink
count += uint64(len(fmt.Sprintf("%d", q2)))
// overhead of new leaf node
count++
}
return count
}
*/

View file

@ -1,315 +0,0 @@
// Package sharding implements a sharding ClusterDAGService places
// content in different shards while it's being added, creating
// a final Cluster DAG and pinning it.
package sharding
import (
"context"
"errors"
"fmt"
"time"
"github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/api"
humanize "github.com/dustin/go-humanize"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var logger = logging.Logger("shardingdags")
// DAGService is an implementation of a ClusterDAGService which
// shards content while adding among several IPFS Cluster peers,
// creating a Cluster DAG to track and pin that content selectively
// in the IPFS daemons allocated to it.
type DAGService struct {
adder.BaseDAGService
ctx context.Context
rpcClient *rpc.Client
addParams api.AddParams
output chan<- api.AddedOutput
addedSet *cid.Set
// Current shard being built
currentShard *shard
// Last flushed shard CID
previousShard cid.Cid
// shard tracking
shards map[string]cid.Cid
startTime time.Time
totalSize uint64
}
// New returns a new ClusterDAGService, which uses the given rpc client to perform
// Allocate, IPFSStream and Pin requests to other cluster components.
func New(ctx context.Context, rpc *rpc.Client, opts api.AddParams, out chan<- api.AddedOutput) *DAGService {
// use a default value for this regardless of what is provided.
opts.Mode = api.PinModeRecursive
return &DAGService{
ctx: ctx,
rpcClient: rpc,
addParams: opts,
output: out,
addedSet: cid.NewSet(),
shards: make(map[string]cid.Cid),
startTime: time.Now(),
}
}
// Add puts the given node in its corresponding shard and sends it to the
// destination peers.
func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
// FIXME: This will grow in memory
if !dgs.addedSet.Visit(node.Cid()) {
return nil
}
return dgs.ingestBlock(ctx, node)
}
// Finalize finishes sharding, creates the cluster DAG and pins it along
// with the meta pin for the root node of the content.
func (dgs *DAGService) Finalize(ctx context.Context, dataRoot api.Cid) (api.Cid, error) {
lastCid, err := dgs.flushCurrentShard(ctx)
if err != nil {
return api.NewCid(lastCid), err
}
if !lastCid.Equals(dataRoot.Cid) {
logger.Warnf("the last added CID (%s) is not the IPFS data root (%s). This is only normal when adding a single file without wrapping in directory.", lastCid, dataRoot)
}
clusterDAGNodes, err := makeDAG(ctx, dgs.shards)
if err != nil {
return dataRoot, err
}
// PutDAG to ourselves
blocks := make(chan api.NodeWithMeta, 256)
go func() {
defer close(blocks)
for _, n := range clusterDAGNodes {
select {
case <-ctx.Done():
logger.Error(ctx.Err())
return //abort
case blocks <- adder.IpldNodeToNodeWithMeta(n):
}
}
}()
// Stream these blocks and wait until we are done.
bs := adder.NewBlockStreamer(ctx, dgs.rpcClient, []peer.ID{""}, blocks)
select {
case <-ctx.Done():
return dataRoot, ctx.Err()
case <-bs.Done():
}
if err := bs.Err(); err != nil {
return dataRoot, err
}
clusterDAG := clusterDAGNodes[0].Cid()
dgs.sendOutput(api.AddedOutput{
Name: fmt.Sprintf("%s-clusterDAG", dgs.addParams.Name),
Cid: api.NewCid(clusterDAG),
Size: dgs.totalSize,
Allocations: nil,
})
// Pin the ClusterDAG
clusterDAGPin := api.PinWithOpts(api.NewCid(clusterDAG), dgs.addParams.PinOptions)
clusterDAGPin.ReplicationFactorMin = -1
clusterDAGPin.ReplicationFactorMax = -1
clusterDAGPin.MaxDepth = 0 // pin direct
clusterDAGPin.Name = fmt.Sprintf("%s-clusterDAG", dgs.addParams.Name)
clusterDAGPin.Type = api.ClusterDAGType
clusterDAGPin.Reference = &dataRoot
// Update object with response.
err = adder.Pin(ctx, dgs.rpcClient, clusterDAGPin)
if err != nil {
return dataRoot, err
}
// Pin the META pin
metaPin := api.PinWithOpts(dataRoot, dgs.addParams.PinOptions)
metaPin.Type = api.MetaType
ref := api.NewCid(clusterDAG)
metaPin.Reference = &ref
metaPin.MaxDepth = 0 // irrelevant. Meta-pins are not pinned
err = adder.Pin(ctx, dgs.rpcClient, metaPin)
if err != nil {
return dataRoot, err
}
// Log some stats
dgs.logStats(metaPin.Cid, clusterDAGPin.Cid)
// Consider doing this? Seems like overkill
//
// // Amend ShardPins to reference clusterDAG root hash as a Parent
// shardParents := cid.NewSet()
// shardParents.Add(clusterDAG)
// for shardN, shard := range dgs.shardNodes {
// pin := api.PinWithOpts(shard, dgs.addParams)
// pin.Name := fmt.Sprintf("%s-shard-%s", pin.Name, shardN)
// pin.Type = api.ShardType
// pin.Parents = shardParents
// // FIXME: We don't know anymore the shard pin maxDepth
// // so we'd need to get the pin first.
// err := dgs.pin(pin)
// if err != nil {
// return err
// }
// }
return dataRoot, nil
}
// Allocations returns the current allocations for the current shard.
func (dgs *DAGService) Allocations() []peer.ID {
// FIXME: this is probably not safe in concurrency? However, there is
// no concurrent execution of any code in the DAGService I think.
if dgs.currentShard != nil {
return dgs.currentShard.Allocations()
}
return nil
}
// ingests a block to the current shard. If it get's full, it
// Flushes the shard and retries with a new one.
func (dgs *DAGService) ingestBlock(ctx context.Context, n ipld.Node) error {
shard := dgs.currentShard
// if we have no currentShard, create one
if shard == nil {
logger.Infof("new shard for '%s': #%d", dgs.addParams.Name, len(dgs.shards))
var err error
// important: shards use the DAGService context.
shard, err = newShard(dgs.ctx, ctx, dgs.rpcClient, dgs.addParams.PinOptions)
if err != nil {
return err
}
dgs.currentShard = shard
}
logger.Debugf("ingesting block %s in shard %d (%s)", n.Cid(), len(dgs.shards), dgs.addParams.Name)
// this is not same as n.Size()
size := uint64(len(n.RawData()))
// add the block to it if it fits and return
if shard.Size()+size < shard.Limit() {
shard.AddLink(ctx, n.Cid(), size)
return dgs.currentShard.sendBlock(ctx, n)
}
logger.Debugf("shard %d full: block: %d. shard: %d. limit: %d",
len(dgs.shards),
size,
shard.Size(),
shard.Limit(),
)
// -------
// Below: block DOES NOT fit in shard
// Flush and retry
// if shard is empty, error
if shard.Size() == 0 {
return errors.New("block doesn't fit in empty shard: shard size too small?")
}
_, err := dgs.flushCurrentShard(ctx)
if err != nil {
return err
}
return dgs.ingestBlock(ctx, n) // <-- retry ingest
}
func (dgs *DAGService) logStats(metaPin, clusterDAGPin api.Cid) {
duration := time.Since(dgs.startTime)
seconds := uint64(duration) / uint64(time.Second)
var rate string
if seconds == 0 {
rate = "∞ B"
} else {
rate = humanize.Bytes(dgs.totalSize / seconds)
}
statsFmt := `sharding session successful:
CID: %s
ClusterDAG: %s
Total shards: %d
Total size: %s
Total time: %s
Ingest Rate: %s/s
`
logger.Infof(
statsFmt,
metaPin,
clusterDAGPin,
len(dgs.shards),
humanize.Bytes(dgs.totalSize),
duration,
rate,
)
}
func (dgs *DAGService) sendOutput(ao api.AddedOutput) {
if dgs.output != nil {
dgs.output <- ao
}
}
// flushes the dgs.currentShard and returns the LastLink()
func (dgs *DAGService) flushCurrentShard(ctx context.Context) (cid.Cid, error) {
shard := dgs.currentShard
if shard == nil {
return cid.Undef, errors.New("cannot flush a nil shard")
}
lens := len(dgs.shards)
shardCid, err := shard.Flush(ctx, lens, dgs.previousShard)
if err != nil {
return shardCid, err
}
dgs.totalSize += shard.Size()
dgs.shards[fmt.Sprintf("%d", lens)] = shardCid
dgs.previousShard = shardCid
dgs.currentShard = nil
dgs.sendOutput(api.AddedOutput{
Name: fmt.Sprintf("shard-%d", lens),
Cid: api.NewCid(shardCid),
Size: shard.Size(),
Allocations: shard.Allocations(),
})
return shard.LastLink(), nil
}
// AddMany calls Add for every given node.
func (dgs *DAGService) AddMany(ctx context.Context, nodes []ipld.Node) error {
for _, node := range nodes {
err := dgs.Add(ctx, node)
if err != nil {
return err
}
}
return nil
}

View file

@ -1,271 +0,0 @@
package sharding
import (
"context"
"errors"
"mime/multipart"
"sync"
"testing"
adder "github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/test"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
func init() {
logging.SetLogLevel("shardingdags", "INFO")
logging.SetLogLevel("adder", "INFO")
}
type testRPC struct {
blocks sync.Map
pins sync.Map
}
func (rpcs *testRPC) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error {
defer close(out)
for n := range in {
rpcs.blocks.Store(n.Cid.String(), n.Data)
}
return nil
}
func (rpcs *testRPC) Pin(ctx context.Context, in api.Pin, out *api.Pin) error {
rpcs.pins.Store(in.Cid.String(), in)
*out = in
return nil
}
func (rpcs *testRPC) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
// it does not matter since we use host == nil for RPC, so it uses the
// local one in all cases
*out = []peer.ID{test.PeerID1}
return nil
}
func (rpcs *testRPC) PinGet(ctx context.Context, c api.Cid) (api.Pin, error) {
pI, ok := rpcs.pins.Load(c.String())
if !ok {
return api.Pin{}, errors.New("not found")
}
return pI.(api.Pin), nil
}
func (rpcs *testRPC) BlockGet(ctx context.Context, c api.Cid) ([]byte, error) {
bI, ok := rpcs.blocks.Load(c.String())
if !ok {
return nil, errors.New("not found")
}
return bI.([]byte), nil
}
func makeAdder(t *testing.T, params api.AddParams) (*adder.Adder, *testRPC) {
rpcObj := &testRPC{}
server := rpc.NewServer(nil, "mock")
err := server.RegisterName("Cluster", rpcObj)
if err != nil {
t.Fatal(err)
}
err = server.RegisterName("IPFSConnector", rpcObj)
if err != nil {
t.Fatal(err)
}
client := rpc.NewClientWithServer(nil, "mock", server)
out := make(chan api.AddedOutput, 1)
dags := New(context.Background(), client, params, out)
add := adder.New(dags, params, out)
go func() {
for v := range out {
t.Logf("Output: Name: %s. Cid: %s. Size: %d", v.Name, v.Cid, v.Size)
}
}()
return add, rpcObj
}
func TestFromMultipart(t *testing.T) {
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
t.Run("Test tree", func(t *testing.T) {
p := api.DefaultAddParams()
// Total data is about
p.ShardSize = 1024 * 300 // 300kB
p.Name = "testingFile"
p.Shard = true
p.ReplicationFactorMin = 1
p.ReplicationFactorMax = 2
add, rpcObj := makeAdder(t, p)
_ = rpcObj
mr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mr, mr.Boundary())
rootCid, err := add.FromMultipart(context.Background(), r)
if err != nil {
t.Fatal(err)
}
// Print all pins
// rpcObj.pins.Range(func(k, v interface{}) bool {
// p := v.(*api.Pin)
// j, _ := config.DefaultJSONMarshal(p)
// fmt.Printf("%s", j)
// return true
// })
if rootCid.String() != test.ShardingDirBalancedRootCID {
t.Fatal("bad root CID")
}
// 14 has been obtained by carefully observing the logs
// making sure that splitting happens in the right place.
shardBlocks, err := VerifyShards(t, rootCid, rpcObj, rpcObj, 14)
if err != nil {
t.Fatal(err)
}
for _, ci := range test.ShardingDirCids {
_, ok := shardBlocks[ci]
if !ok {
t.Fatal("shards are missing a block:", ci)
}
}
if len(test.ShardingDirCids) != len(shardBlocks) {
t.Fatal("shards have some extra blocks")
}
for _, ci := range test.ShardingDirCids {
_, ok := shardBlocks[ci]
if !ok {
t.Fatal("shards are missing a block:", ci)
}
}
if len(test.ShardingDirCids) != len(shardBlocks) {
t.Fatal("shards have some extra blocks")
}
})
t.Run("Test file", func(t *testing.T) {
p := api.DefaultAddParams()
// Total data is about
p.ShardSize = 1024 * 1024 * 2 // 2MB
p.Name = "testingFile"
p.Shard = true
p.ReplicationFactorMin = 1
p.ReplicationFactorMax = 2
add, rpcObj := makeAdder(t, p)
_ = rpcObj
mr, closer := sth.GetRandFileMultiReader(t, 1024*50) // 50 MB
defer closer.Close()
r := multipart.NewReader(mr, mr.Boundary())
rootCid, err := add.FromMultipart(context.Background(), r)
if err != nil {
t.Fatal(err)
}
shardBlocks, err := VerifyShards(t, rootCid, rpcObj, rpcObj, 29)
if err != nil {
t.Fatal(err)
}
_ = shardBlocks
})
}
func TestFromMultipart_Errors(t *testing.T) {
type testcase struct {
name string
params api.AddParams
}
tcs := []*testcase{
{
name: "bad chunker",
params: api.AddParams{
Format: "",
IPFSAddParams: api.IPFSAddParams{
Chunker: "aweee",
RawLeaves: false,
},
Hidden: false,
Shard: true,
PinOptions: api.PinOptions{
ReplicationFactorMin: -1,
ReplicationFactorMax: -1,
Name: "test",
ShardSize: 1024 * 1024,
},
},
},
{
name: "shard size too small",
params: api.AddParams{
Format: "",
IPFSAddParams: api.IPFSAddParams{
Chunker: "",
RawLeaves: false,
},
Hidden: false,
Shard: true,
PinOptions: api.PinOptions{
ReplicationFactorMin: -1,
ReplicationFactorMax: -1,
Name: "test",
ShardSize: 200,
},
},
},
{
name: "replication too high",
params: api.AddParams{
Format: "",
IPFSAddParams: api.IPFSAddParams{
Chunker: "",
RawLeaves: false,
},
Hidden: false,
Shard: true,
PinOptions: api.PinOptions{
ReplicationFactorMin: 2,
ReplicationFactorMax: 3,
Name: "test",
ShardSize: 1024 * 1024,
},
},
},
}
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
for _, tc := range tcs {
add, rpcObj := makeAdder(t, tc.params)
_ = rpcObj
f := sth.GetTreeSerialFile(t)
_, err := add.FromFiles(context.Background(), f)
if err == nil {
t.Error(tc.name, ": expected an error")
} else {
t.Log(tc.name, ":", err)
}
f.Close()
}
}

View file

@ -1,166 +0,0 @@
package sharding
import (
"context"
"fmt"
ipld "github.com/ipfs/go-ipld-format"
"github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
humanize "github.com/dustin/go-humanize"
)
// a shard represents a set of blocks (or bucket) which have been assigned
// a peer to be block-put and will be part of the same shard in the
// cluster DAG.
type shard struct {
ctx context.Context
rpc *rpc.Client
allocations []peer.ID
pinOptions api.PinOptions
bs *adder.BlockStreamer
blocks chan api.NodeWithMeta
// dagNode represents a node with links and will be converted
// to Cbor.
dagNode map[string]cid.Cid
currentSize uint64
sizeLimit uint64
}
func newShard(globalCtx context.Context, ctx context.Context, rpc *rpc.Client, opts api.PinOptions) (*shard, error) {
allocs, err := adder.BlockAllocate(ctx, rpc, opts)
if err != nil {
return nil, err
}
if opts.ReplicationFactorMin > 0 && len(allocs) == 0 {
// This would mean that the empty cid is part of the shared state somehow.
panic("allocations for new shard cannot be empty without error")
}
if opts.ReplicationFactorMin < 0 {
logger.Warn("Shard is set to replicate everywhere ,which doesn't make sense for sharding")
}
// TODO (hector): get latest metrics for allocations, adjust sizeLimit
// to minimum. This can be done later.
blocks := make(chan api.NodeWithMeta, 256)
return &shard{
ctx: globalCtx,
rpc: rpc,
allocations: allocs,
pinOptions: opts,
bs: adder.NewBlockStreamer(globalCtx, rpc, allocs, blocks),
blocks: blocks,
dagNode: make(map[string]cid.Cid),
currentSize: 0,
sizeLimit: opts.ShardSize,
}, nil
}
// AddLink tries to add a new block to this shard if it's not full.
// Returns true if the block was added
func (sh *shard) AddLink(ctx context.Context, c cid.Cid, s uint64) {
linkN := len(sh.dagNode)
linkName := fmt.Sprintf("%d", linkN)
logger.Debugf("shard: add link: %s", linkName)
sh.dagNode[linkName] = c
sh.currentSize += s
}
// Allocations returns the peer IDs on which blocks are put for this shard.
func (sh *shard) Allocations() []peer.ID {
if len(sh.allocations) == 1 && sh.allocations[0] == "" {
return nil
}
return sh.allocations
}
func (sh *shard) sendBlock(ctx context.Context, n ipld.Node) error {
select {
case <-ctx.Done():
return ctx.Err()
case sh.blocks <- adder.IpldNodeToNodeWithMeta(n):
return nil
}
}
// Flush completes the allocation of this shard by building a CBOR node
// and adding it to IPFS, then pinning it in cluster. It returns the Cid of the
// shard.
func (sh *shard) Flush(ctx context.Context, shardN int, prev cid.Cid) (cid.Cid, error) {
logger.Debugf("shard %d: flush", shardN)
nodes, err := makeDAG(ctx, sh.dagNode)
if err != nil {
return cid.Undef, err
}
for _, n := range nodes {
err = sh.sendBlock(ctx, n)
if err != nil {
close(sh.blocks)
return cid.Undef, err
}
}
close(sh.blocks)
select {
case <-ctx.Done():
return cid.Undef, ctx.Err()
case <-sh.bs.Done():
}
if err := sh.bs.Err(); err != nil {
return cid.Undef, err
}
rootCid := nodes[0].Cid()
pin := api.PinWithOpts(api.NewCid(rootCid), sh.pinOptions)
pin.Name = fmt.Sprintf("%s-shard-%d", sh.pinOptions.Name, shardN)
// this sets allocations as priority allocation
pin.Allocations = sh.allocations
pin.Type = api.ShardType
ref := api.NewCid(prev)
pin.Reference = &ref
pin.MaxDepth = 1
pin.ShardSize = sh.Size() // use current size, not the limit
if len(nodes) > len(sh.dagNode)+1 { // using an indirect graph
pin.MaxDepth = 2
}
logger.Infof("shard #%d (%s) completed. Total size: %s. Links: %d",
shardN,
rootCid,
humanize.Bytes(sh.Size()),
len(sh.dagNode),
)
return rootCid, adder.Pin(ctx, sh.rpc, pin)
}
// Size returns this shard's current size.
func (sh *shard) Size() uint64 {
return sh.currentSize
}
// Size returns this shard's size limit.
func (sh *shard) Limit() uint64 {
return sh.sizeLimit
}
// Last returns the last added link. When finishing sharding,
// the last link of the last shard is the data root for the
// full sharded DAG (the CID that would have resulted from
// adding the content to a single IPFS daemon).
func (sh *shard) LastLink() cid.Cid {
l := len(sh.dagNode)
lastLink := fmt.Sprintf("%d", l-1)
return sh.dagNode[lastLink]
}

View file

@ -1,106 +0,0 @@
package sharding
import (
"context"
"errors"
"fmt"
"testing"
"github.com/ipfs-cluster/ipfs-cluster/api"
)
// MockPinStore is used in VerifyShards
type MockPinStore interface {
// Gets a pin
PinGet(context.Context, api.Cid) (api.Pin, error)
}
// MockBlockStore is used in VerifyShards
type MockBlockStore interface {
// Gets a block
BlockGet(context.Context, api.Cid) ([]byte, error)
}
// VerifyShards checks that a sharded CID has been correctly formed and stored.
// This is a helper function for testing. It returns a map with all the blocks
// from all shards.
func VerifyShards(t *testing.T, rootCid api.Cid, pins MockPinStore, ipfs MockBlockStore, expectedShards int) (map[string]struct{}, error) {
ctx := context.Background()
metaPin, err := pins.PinGet(ctx, rootCid)
if err != nil {
return nil, fmt.Errorf("meta pin was not pinned: %s", err)
}
if api.PinType(metaPin.Type) != api.MetaType {
return nil, fmt.Errorf("bad MetaPin type")
}
if metaPin.Reference == nil {
return nil, errors.New("metaPin.Reference is unset")
}
clusterPin, err := pins.PinGet(ctx, *metaPin.Reference)
if err != nil {
return nil, fmt.Errorf("cluster pin was not pinned: %s", err)
}
if api.PinType(clusterPin.Type) != api.ClusterDAGType {
return nil, fmt.Errorf("bad ClusterDAGPin type")
}
if !clusterPin.Reference.Equals(metaPin.Cid) {
return nil, fmt.Errorf("clusterDAG should reference the MetaPin")
}
clusterDAGBlock, err := ipfs.BlockGet(ctx, clusterPin.Cid)
if err != nil {
return nil, fmt.Errorf("cluster pin was not stored: %s", err)
}
clusterDAGNode, err := CborDataToNode(clusterDAGBlock, "cbor")
if err != nil {
return nil, err
}
shards := clusterDAGNode.Links()
if len(shards) != expectedShards {
return nil, fmt.Errorf("bad number of shards")
}
shardBlocks := make(map[string]struct{})
var ref api.Cid
// traverse shards in order
for i := 0; i < len(shards); i++ {
sh, _, err := clusterDAGNode.ResolveLink([]string{fmt.Sprintf("%d", i)})
if err != nil {
return nil, err
}
shardPin, err := pins.PinGet(ctx, api.NewCid(sh.Cid))
if err != nil {
return nil, fmt.Errorf("shard was not pinned: %s %s", sh.Cid, err)
}
if ref != api.CidUndef && !shardPin.Reference.Equals(ref) {
t.Errorf("Ref (%s) should point to previous shard (%s)", ref, shardPin.Reference)
}
ref = shardPin.Cid
shardBlock, err := ipfs.BlockGet(ctx, shardPin.Cid)
if err != nil {
return nil, fmt.Errorf("shard block was not stored: %s", err)
}
shardNode, err := CborDataToNode(shardBlock, "cbor")
if err != nil {
return nil, err
}
for _, l := range shardNode.Links() {
ci := l.Cid.String()
_, ok := shardBlocks[ci]
if ok {
return nil, fmt.Errorf("block belongs to two shards: %s", ci)
}
shardBlocks[ci] = struct{}{}
}
}
return shardBlocks, nil
}

View file

@ -1,178 +0,0 @@
// Package single implements a ClusterDAGService that chunks and adds content
// to cluster without sharding, before pinning it.
package single
import (
"context"
adder "github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/api"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var logger = logging.Logger("singledags")
var _ = logger // otherwise unused
// DAGService is an implementation of an adder.ClusterDAGService which
// puts the added blocks directly in the peers allocated to them (without
// sharding).
type DAGService struct {
adder.BaseDAGService
ctx context.Context
rpcClient *rpc.Client
dests []peer.ID
addParams api.AddParams
local bool
bs *adder.BlockStreamer
blocks chan api.NodeWithMeta
recentBlocks *recentBlocks
}
// New returns a new Adder with the given rpc Client. The client is used
// to perform calls to IPFS.BlockStream and Pin content on Cluster.
func New(ctx context.Context, rpc *rpc.Client, opts api.AddParams, local bool) *DAGService {
// ensure don't Add something and pin it in direct mode.
opts.Mode = api.PinModeRecursive
return &DAGService{
ctx: ctx,
rpcClient: rpc,
dests: nil,
addParams: opts,
local: local,
blocks: make(chan api.NodeWithMeta, 256),
recentBlocks: &recentBlocks{},
}
}
// Add puts the given node in the destination peers.
func (dgs *DAGService) Add(ctx context.Context, node ipld.Node) error {
// Avoid adding the same node multiple times in a row.
// This is done by the ipfsadd-er, because some nodes are added
// via dagbuilder, then via MFS, and root nodes once more.
if dgs.recentBlocks.Has(node) {
return nil
}
// FIXME: can't this happen on initialization? Perhaps the point here
// is the adder only allocates and starts streaming when the first
// block arrives and not on creation.
if dgs.dests == nil {
dests, err := adder.BlockAllocate(ctx, dgs.rpcClient, dgs.addParams.PinOptions)
if err != nil {
return err
}
hasLocal := false
localPid := dgs.rpcClient.ID()
for i, d := range dests {
if d == localPid || d == "" {
hasLocal = true
// ensure our allocs do not carry an empty peer
// mostly an issue with testing mocks
dests[i] = localPid
}
}
dgs.dests = dests
if dgs.local {
// If this is a local pin, make sure that the local
// peer is among the allocations..
// UNLESS user-allocations are defined!
if !hasLocal && localPid != "" && len(dgs.addParams.UserAllocations) == 0 {
// replace last allocation with local peer
dgs.dests[len(dgs.dests)-1] = localPid
}
dgs.bs = adder.NewBlockStreamer(dgs.ctx, dgs.rpcClient, []peer.ID{localPid}, dgs.blocks)
} else {
dgs.bs = adder.NewBlockStreamer(dgs.ctx, dgs.rpcClient, dgs.dests, dgs.blocks)
}
}
select {
case <-ctx.Done():
return ctx.Err()
case <-dgs.ctx.Done():
return ctx.Err()
case dgs.blocks <- adder.IpldNodeToNodeWithMeta(node):
dgs.recentBlocks.Add(node)
return nil
}
}
// Finalize pins the last Cid added to this DAGService.
func (dgs *DAGService) Finalize(ctx context.Context, root api.Cid) (api.Cid, error) {
close(dgs.blocks)
select {
case <-dgs.ctx.Done():
return root, ctx.Err()
case <-ctx.Done():
return root, ctx.Err()
case <-dgs.bs.Done():
}
// If the streamer failed to put blocks.
if err := dgs.bs.Err(); err != nil {
return root, err
}
// Do not pin, just block put.
// Why? Because some people are uploading CAR files with partial DAGs
// and ideally they should be pinning only when the last partial CAR
// is uploaded. This gives them that option.
if dgs.addParams.NoPin {
return root, nil
}
// Cluster pin the result
rootPin := api.PinWithOpts(root, dgs.addParams.PinOptions)
rootPin.Allocations = dgs.dests
return root, adder.Pin(ctx, dgs.rpcClient, rootPin)
}
// Allocations returns the add destinations decided by the DAGService.
func (dgs *DAGService) Allocations() []peer.ID {
// using rpc clients without a host results in an empty peer
// which cannot be parsed to peer.ID on deserialization.
if len(dgs.dests) == 1 && dgs.dests[0] == "" {
return nil
}
return dgs.dests
}
// AddMany calls Add for every given node.
func (dgs *DAGService) AddMany(ctx context.Context, nodes []ipld.Node) error {
for _, node := range nodes {
err := dgs.Add(ctx, node)
if err != nil {
return err
}
}
return nil
}
type recentBlocks struct {
blocks [2]cid.Cid
cur int
}
func (rc *recentBlocks) Add(n ipld.Node) {
rc.blocks[rc.cur] = n.Cid()
rc.cur = (rc.cur + 1) % 2
}
func (rc *recentBlocks) Has(n ipld.Node) bool {
c := n.Cid()
return rc.blocks[0].Equals(c) || rc.blocks[1].Equals(c)
}

View file

@ -1,138 +0,0 @@
package single
import (
"context"
"errors"
"mime/multipart"
"sync"
"testing"
adder "github.com/ipfs-cluster/ipfs-cluster/adder"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/test"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
type testIPFSRPC struct {
blocks sync.Map
}
type testClusterRPC struct {
pins sync.Map
}
func (rpcs *testIPFSRPC) BlockStream(ctx context.Context, in <-chan api.NodeWithMeta, out chan<- struct{}) error {
defer close(out)
for n := range in {
rpcs.blocks.Store(n.Cid.String(), n)
}
return nil
}
func (rpcs *testClusterRPC) Pin(ctx context.Context, in api.Pin, out *api.Pin) error {
rpcs.pins.Store(in.Cid.String(), in)
*out = in
return nil
}
func (rpcs *testClusterRPC) BlockAllocate(ctx context.Context, in api.Pin, out *[]peer.ID) error {
if in.ReplicationFactorMin > 1 {
return errors.New("we can only replicate to 1 peer")
}
// it does not matter since we use host == nil for RPC, so it uses the
// local one in all cases.
*out = []peer.ID{test.PeerID1}
return nil
}
func TestAdd(t *testing.T) {
t.Run("balanced", func(t *testing.T) {
clusterRPC := &testClusterRPC{}
ipfsRPC := &testIPFSRPC{}
server := rpc.NewServer(nil, "mock")
err := server.RegisterName("Cluster", clusterRPC)
if err != nil {
t.Fatal(err)
}
err = server.RegisterName("IPFSConnector", ipfsRPC)
if err != nil {
t.Fatal(err)
}
client := rpc.NewClientWithServer(nil, "mock", server)
params := api.DefaultAddParams()
params.Wrap = true
dags := New(context.Background(), client, params, false)
add := adder.New(dags, params, nil)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
mr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mr, mr.Boundary())
rootCid, err := add.FromMultipart(context.Background(), r)
if err != nil {
t.Fatal(err)
}
if rootCid.String() != test.ShardingDirBalancedRootCIDWrapped {
t.Fatal("bad root cid: ", rootCid)
}
expected := test.ShardingDirCids[:]
for _, c := range expected {
_, ok := ipfsRPC.blocks.Load(c)
if !ok {
t.Error("block was not added to IPFS", c)
}
}
_, ok := clusterRPC.pins.Load(test.ShardingDirBalancedRootCIDWrapped)
if !ok {
t.Error("the tree wasn't pinned")
}
})
t.Run("trickle", func(t *testing.T) {
clusterRPC := &testClusterRPC{}
ipfsRPC := &testIPFSRPC{}
server := rpc.NewServer(nil, "mock")
err := server.RegisterName("Cluster", clusterRPC)
if err != nil {
t.Fatal(err)
}
err = server.RegisterName("IPFSConnector", ipfsRPC)
if err != nil {
t.Fatal(err)
}
client := rpc.NewClientWithServer(nil, "mock", server)
params := api.DefaultAddParams()
params.Layout = "trickle"
dags := New(context.Background(), client, params, false)
add := adder.New(dags, params, nil)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
mr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
r := multipart.NewReader(mr, mr.Boundary())
rootCid, err := add.FromMultipart(context.Background(), r)
if err != nil {
t.Fatal(err)
}
if rootCid.String() != test.ShardingDirTrickleRootCID {
t.Fatal("bad root cid")
}
_, ok := clusterRPC.pins.Load(test.ShardingDirTrickleRootCID)
if !ok {
t.Error("the tree wasn't pinned")
}
})
}

View file

@ -1,180 +0,0 @@
package adder
import (
"context"
"errors"
"sync"
"github.com/ipfs-cluster/ipfs-cluster/api"
"go.uber.org/multierr"
cid "github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
// ErrBlockAdder is returned when adding a to multiple destinations
// block fails on all of them.
var ErrBlockAdder = errors.New("failed to put block on all destinations")
// BlockStreamer helps streaming nodes to multiple destinations, as long as
// one of them is still working.
type BlockStreamer struct {
dests []peer.ID
rpcClient *rpc.Client
blocks <-chan api.NodeWithMeta
ctx context.Context
cancel context.CancelFunc
errMu sync.Mutex
err error
}
// NewBlockStreamer creates a BlockStreamer given an rpc client, allocated
// peers and a channel on which the blocks to stream are received.
func NewBlockStreamer(ctx context.Context, rpcClient *rpc.Client, dests []peer.ID, blocks <-chan api.NodeWithMeta) *BlockStreamer {
bsCtx, cancel := context.WithCancel(ctx)
bs := BlockStreamer{
ctx: bsCtx,
cancel: cancel,
dests: dests,
rpcClient: rpcClient,
blocks: blocks,
err: nil,
}
go bs.streamBlocks()
return &bs
}
// Done returns a channel which gets closed when the BlockStreamer has
// finished.
func (bs *BlockStreamer) Done() <-chan struct{} {
return bs.ctx.Done()
}
func (bs *BlockStreamer) setErr(err error) {
bs.errMu.Lock()
bs.err = err
bs.errMu.Unlock()
}
// Err returns any errors that happened after the operation of the
// BlockStreamer, for example when blocks could not be put to all nodes.
func (bs *BlockStreamer) Err() error {
bs.errMu.Lock()
defer bs.errMu.Unlock()
return bs.err
}
func (bs *BlockStreamer) streamBlocks() {
defer bs.cancel()
// Nothing should be sent on out.
// We drain though
out := make(chan struct{})
go func() {
for range out {
}
}()
errs := bs.rpcClient.MultiStream(
bs.ctx,
bs.dests,
"IPFSConnector",
"BlockStream",
bs.blocks,
out,
)
combinedErrors := multierr.Combine(errs...)
// FIXME: replicate everywhere.
if len(multierr.Errors(combinedErrors)) == len(bs.dests) {
logger.Error(combinedErrors)
bs.setErr(ErrBlockAdder)
} else if combinedErrors != nil {
logger.Warning("there were errors streaming blocks, but at least one destination succeeded")
logger.Warning(combinedErrors)
}
}
// IpldNodeToNodeWithMeta converts an ipld.Node to api.NodeWithMeta.
func IpldNodeToNodeWithMeta(n ipld.Node) api.NodeWithMeta {
size, err := n.Size()
if err != nil {
logger.Warn(err)
}
return api.NodeWithMeta{
Cid: api.NewCid(n.Cid()),
Data: n.RawData(),
CumSize: size,
}
}
// BlockAllocate helps allocating blocks to peers.
func BlockAllocate(ctx context.Context, rpc *rpc.Client, pinOpts api.PinOptions) ([]peer.ID, error) {
// Find where to allocate this file
var allocsStr []peer.ID
err := rpc.CallContext(
ctx,
"",
"Cluster",
"BlockAllocate",
api.PinWithOpts(api.CidUndef, pinOpts),
&allocsStr,
)
return allocsStr, err
}
// Pin helps sending local RPC pin requests.
func Pin(ctx context.Context, rpc *rpc.Client, pin api.Pin) error {
if pin.ReplicationFactorMin < 0 {
pin.Allocations = []peer.ID{}
}
logger.Debugf("adder pinning %+v", pin)
var pinResp api.Pin
return rpc.CallContext(
ctx,
"", // use ourself to pin
"Cluster",
"Pin",
pin,
&pinResp,
)
}
// ErrDAGNotFound is returned whenever we try to get a block from the DAGService.
var ErrDAGNotFound = errors.New("dagservice: a Get operation was attempted while cluster-adding (this is likely a bug)")
// BaseDAGService partially implements an ipld.DAGService.
// It provides the methods which are not needed by ClusterDAGServices
// (Get*, Remove*) so that they can save adding this code.
type BaseDAGService struct {
}
// Get always returns errNotFound
func (dag BaseDAGService) Get(ctx context.Context, key cid.Cid) (ipld.Node, error) {
return nil, ErrDAGNotFound
}
// GetMany returns an output channel that always emits an error
func (dag BaseDAGService) GetMany(ctx context.Context, keys []cid.Cid) <-chan *ipld.NodeOption {
out := make(chan *ipld.NodeOption, 1)
out <- &ipld.NodeOption{Err: ErrDAGNotFound}
close(out)
return out
}
// Remove is a nop
func (dag BaseDAGService) Remove(ctx context.Context, key cid.Cid) error {
return nil
}
// RemoveMany is a nop
func (dag BaseDAGService) RemoveMany(ctx context.Context, keys []cid.Cid) error {
return nil
}

View file

@ -1,270 +0,0 @@
package ipfscluster
import (
"context"
"errors"
"fmt"
peer "github.com/libp2p/go-libp2p/core/peer"
"go.opencensus.io/trace"
"github.com/ipfs-cluster/ipfs-cluster/api"
)
// This file gathers allocation logic used when pinning or re-pinning
// to find which peers should be allocated to a Cid. Allocation is constrained
// by ReplicationFactorMin and ReplicationFactorMax parameters obtained
// from the Pin object.
// The allocation process has several steps:
//
// * Find which peers are pinning a CID
// * Obtain the last values for the configured informer metrics from the
// monitor component
// * Divide the metrics between "current" (peers already pinning the CID)
// and "candidates" (peers that could pin the CID), as long as their metrics
// are valid.
// * Given the candidates:
// * Check if we are overpinning an item
// * Check if there are not enough candidates for the "needed" replication
// factor.
// * If there are enough candidates:
// * Call the configured allocator, which sorts the candidates (and
// may veto some depending on the allocation strategy.
// * The allocator returns a list of final candidate peers sorted by
// order of preference.
// * Take as many final candidates from the list as we can, until
// ReplicationFactorMax is reached. Error if there are less than
// ReplicationFactorMin.
// A wrapper to carry peer metrics that have been classified.
type classifiedMetrics struct {
current api.MetricsSet
currentPeers []peer.ID
candidate api.MetricsSet
candidatePeers []peer.ID
priority api.MetricsSet
priorityPeers []peer.ID
}
// allocate finds peers to allocate a hash using the informer and the monitor
// it should only be used with valid replicationFactors (if rplMin and rplMax
// are > 0, then rplMin <= rplMax).
// It always returns allocations, but if no new allocations are needed,
// it will return the current ones. Note that allocate() does not take
// into account if the given CID was previously in a "pin everywhere" mode,
// and will consider such Pins as currently unallocated ones, providing
// new allocations as available.
func (c *Cluster) allocate(ctx context.Context, hash api.Cid, currentPin api.Pin, rplMin, rplMax int, blacklist []peer.ID, priorityList []peer.ID) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/allocate")
defer span.End()
if (rplMin + rplMax) == 0 {
return nil, fmt.Errorf("bad replication factors: %d/%d", rplMin, rplMax)
}
if rplMin < 0 && rplMax < 0 { // allocate everywhere
return []peer.ID{}, nil
}
// Figure out who is holding the CID
var currentAllocs []peer.ID
if currentPin.Defined() {
currentAllocs = currentPin.Allocations
}
// Get Metrics that the allocator is interested on
mSet := make(api.MetricsSet)
metrics := c.allocator.Metrics()
for _, metricName := range metrics {
mSet[metricName] = c.monitor.LatestMetrics(ctx, metricName)
}
// Filter and divide metrics. The resulting sets only have peers that
// have all the metrics needed and are not blacklisted.
classified := c.filterMetrics(
ctx,
mSet,
len(metrics),
currentAllocs,
priorityList,
blacklist,
)
newAllocs, err := c.obtainAllocations(
ctx,
hash,
rplMin,
rplMax,
classified,
)
if err != nil {
return newAllocs, err
}
// if current allocations are above the minimal threshold,
// obtainAllocations returns nil and we just leave things as they are.
// This is what makes repinning do nothing if items are still above
// rmin.
if newAllocs == nil {
newAllocs = currentAllocs
}
return newAllocs, nil
}
// Given metrics from all informers, split them into 3 MetricsSet:
// - Those corresponding to currently allocated peers
// - Those corresponding to priority allocations
// - Those corresponding to "candidate" allocations
// And return also an slice of the peers in those groups.
//
// Peers from untrusted peers are left out if configured.
//
// For a metric/peer to be included in a group, it is necessary that it has
// metrics for all informers.
func (c *Cluster) filterMetrics(ctx context.Context, mSet api.MetricsSet, numMetrics int, currentAllocs, priorityList, blacklist []peer.ID) classifiedMetrics {
curPeersMap := make(map[peer.ID][]api.Metric)
candPeersMap := make(map[peer.ID][]api.Metric)
prioPeersMap := make(map[peer.ID][]api.Metric)
// Divide the metric by current/candidate/prio and by peer
for _, metrics := range mSet {
for _, m := range metrics {
switch {
case containsPeer(blacklist, m.Peer):
// discard blacklisted peers
continue
case c.config.PinOnlyOnTrustedPeers && !c.consensus.IsTrustedPeer(ctx, m.Peer):
// discard peer that are not trusted when
// configured.
continue
case containsPeer(currentAllocs, m.Peer):
curPeersMap[m.Peer] = append(curPeersMap[m.Peer], m)
case containsPeer(priorityList, m.Peer):
prioPeersMap[m.Peer] = append(prioPeersMap[m.Peer], m)
default:
candPeersMap[m.Peer] = append(candPeersMap[m.Peer], m)
}
}
}
fillMetricsSet := func(peersMap map[peer.ID][]api.Metric) (api.MetricsSet, []peer.ID) {
mSet := make(api.MetricsSet)
peers := make([]peer.ID, 0, len(peersMap))
// Put the metrics in their sets if peers have metrics for all
// informers Record peers. This relies on LatestMetrics
// returning exactly one metric per peer. Thus, a peer with
// all the needed metrics should have exactly numMetrics.
// Otherwise, they are ignored.
for p, metrics := range peersMap {
if len(metrics) == numMetrics {
for _, m := range metrics {
mSet[m.Name] = append(mSet[m.Name], m)
}
peers = append(peers, p)
} // otherwise this peer will be ignored.
}
return mSet, peers
}
curSet, curPeers := fillMetricsSet(curPeersMap)
candSet, candPeers := fillMetricsSet(candPeersMap)
prioSet, prioPeers := fillMetricsSet(prioPeersMap)
return classifiedMetrics{
current: curSet,
currentPeers: curPeers,
candidate: candSet,
candidatePeers: candPeers,
priority: prioSet,
priorityPeers: prioPeers,
}
}
// allocationError logs an allocation error
func allocationError(hash api.Cid, needed, wanted int, candidatesValid []peer.ID) error {
logger.Errorf("Not enough candidates to allocate %s:", hash)
logger.Errorf(" Needed: %d", needed)
logger.Errorf(" Wanted: %d", wanted)
logger.Errorf(" Available candidates: %d:", len(candidatesValid))
for _, c := range candidatesValid {
logger.Errorf(" - %s", c.Pretty())
}
errorMsg := "not enough peers to allocate CID. "
errorMsg += fmt.Sprintf("Needed at least: %d. ", needed)
errorMsg += fmt.Sprintf("Wanted at most: %d. ", wanted)
errorMsg += fmt.Sprintf("Available candidates: %d. ", len(candidatesValid))
errorMsg += "See logs for more info."
return errors.New(errorMsg)
}
func (c *Cluster) obtainAllocations(
ctx context.Context,
hash api.Cid,
rplMin, rplMax int,
metrics classifiedMetrics,
) ([]peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "cluster/obtainAllocations")
defer span.End()
nCurrentValid := len(metrics.currentPeers)
nAvailableValid := len(metrics.candidatePeers) + len(metrics.priorityPeers)
needed := rplMin - nCurrentValid // The minimum we need
wanted := rplMax - nCurrentValid // The maximum we want
logger.Debugf("obtainAllocations: current: %d", nCurrentValid)
logger.Debugf("obtainAllocations: available: %d", nAvailableValid)
logger.Debugf("obtainAllocations: candidates: %d", len(metrics.candidatePeers))
logger.Debugf("obtainAllocations: priority: %d", len(metrics.priorityPeers))
logger.Debugf("obtainAllocations: Needed: %d", needed)
logger.Debugf("obtainAllocations: Wanted: %d", wanted)
// Reminder: rplMin <= rplMax AND >0
if wanted < 0 { // allocations above maximum threshold: drop some
// This could be done more intelligently by dropping them
// according to the allocator order (i.e. free-ing peers
// with most used space first).
return metrics.currentPeers[0 : len(metrics.currentPeers)+wanted], nil
}
if needed <= 0 { // allocations are above minimal threshold
// We don't provide any new allocations
return nil, nil
}
if nAvailableValid < needed { // not enough candidates
return nil, allocationError(hash, needed, wanted, append(metrics.priorityPeers, metrics.candidatePeers...))
}
// We can allocate from this point. Use the allocator to decide
// on the priority of candidates grab as many as "wanted"
// the allocator returns a list of peers ordered by priority
finalAllocs, err := c.allocator.Allocate(
ctx,
hash,
metrics.current,
metrics.candidate,
metrics.priority,
)
if err != nil {
return nil, logError(err.Error())
}
logger.Debugf("obtainAllocations: allocate(): %s", finalAllocs)
// check that we have enough as the allocator may have returned
// less candidates than provided.
if got := len(finalAllocs); got < needed {
return nil, allocationError(hash, needed, wanted, finalAllocs)
}
allocationsToUse := minInt(wanted, len(finalAllocs))
// the final result is the currently valid allocations
// along with the ones provided by the allocator
return append(metrics.currentPeers, finalAllocs[0:allocationsToUse]...), nil
}

View file

@ -1,327 +0,0 @@
// Package balanced implements an allocator that can sort allocations
// based on multiple metrics, where metrics may be an arbitrary way to
// partition a set of peers.
//
// For example, allocating by ["tag:region", "disk"] the resulting peer
// candidate order will balanced between regions and ordered by the value of
// the weight of the disk metric.
package balanced
import (
"context"
"fmt"
"sort"
api "github.com/ipfs-cluster/ipfs-cluster/api"
logging "github.com/ipfs/go-log/v2"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var logger = logging.Logger("allocator")
// Allocator is an allocator that partitions metrics and orders
// the final list of allocation by selecting for each partition.
type Allocator struct {
config *Config
rpcClient *rpc.Client
}
// New returns an initialized Allocator.
func New(cfg *Config) (*Allocator, error) {
err := cfg.Validate()
if err != nil {
return nil, err
}
return &Allocator{
config: cfg,
}, nil
}
// SetClient provides us with an rpc.Client which allows
// contacting other components in the cluster.
func (a *Allocator) SetClient(c *rpc.Client) {
a.rpcClient = c
}
// Shutdown is called on cluster shutdown. We just invalidate
// any metrics from this point.
func (a *Allocator) Shutdown(ctx context.Context) error {
a.rpcClient = nil
return nil
}
type partitionedMetric struct {
metricName string
curChoosingIndex int
noMore bool
partitions []*partition // they are in order of their values
}
type partition struct {
value string
weight int64
aggregatedWeight int64
peers map[peer.ID]bool // the bool tracks whether the peer has been picked already out of the partition when doing the final sort.
sub *partitionedMetric // all peers in sub-partitions will have the same value for this metric
}
// Returns a partitionedMetric which has partitions and subpartitions based
// on the metrics and values given by the "by" slice. The partitions
// are ordered based on the cumulative weight.
func partitionMetrics(set api.MetricsSet, by []string) *partitionedMetric {
rootMetric := by[0]
pnedMetric := &partitionedMetric{
metricName: rootMetric,
partitions: partitionValues(set[rootMetric]),
}
// For sorting based on weight (more to less)
lessF := func(i, j int) bool {
wi := pnedMetric.partitions[i].weight
wj := pnedMetric.partitions[j].weight
// if weight is equal, sort by aggregated weight of
// all sub-partitions.
if wi == wj {
awi := pnedMetric.partitions[i].aggregatedWeight
awj := pnedMetric.partitions[j].aggregatedWeight
// If subpartitions weight the same, do strict order
// based on value string
if awi == awj {
return pnedMetric.partitions[i].value < pnedMetric.partitions[j].value
}
return awj < awi
}
// Descending!
return wj < wi
}
if len(by) == 1 { // we are done
sort.Slice(pnedMetric.partitions, lessF)
return pnedMetric
}
// process sub-partitions
for _, partition := range pnedMetric.partitions {
filteredSet := make(api.MetricsSet)
for k, v := range set {
if k == rootMetric { // not needed anymore
continue
}
for _, m := range v {
// only leave metrics for peers in current partition
if _, ok := partition.peers[m.Peer]; ok {
filteredSet[k] = append(filteredSet[k], m)
}
}
}
partition.sub = partitionMetrics(filteredSet, by[1:])
// Add the aggregated weight of the subpartitions
for _, subp := range partition.sub.partitions {
partition.aggregatedWeight += subp.aggregatedWeight
}
}
sort.Slice(pnedMetric.partitions, lessF)
return pnedMetric
}
func partitionValues(metrics []api.Metric) []*partition {
partitions := []*partition{}
if len(metrics) <= 0 {
return partitions
}
// We group peers with the same value in the same partition.
partitionsByValue := make(map[string]*partition)
for _, m := range metrics {
// Sometimes two metrics have the same value / weight, but we
// still want to put them in different partitions. Otherwise
// their weights get added and they form a bucket and
// therefore not they are not selected in order: 3 peers with
// freespace=100 and one peer with freespace=200 would result
// in one of the peers with freespace 100 being chosen first
// because the partition's weight is 300.
//
// We are going to call these metrics (like free-space),
// non-partitionable metrics. This is going to be the default
// (for backwards compat reasons).
//
// The informers must set the Partitionable field accordingly
// when two metrics with the same value must be grouped in the
// same partition.
//
// Note: aggregatedWeight is the same as weight here (sum of
// weight of all metrics in partitions), and gets updated
// later in partitionMetrics with the aggregated weight of
// sub-partitions.
if !m.Partitionable {
partitions = append(partitions, &partition{
value: m.Value,
weight: m.GetWeight(),
aggregatedWeight: m.GetWeight(),
peers: map[peer.ID]bool{
m.Peer: false,
},
})
continue
}
// Any other case, we partition by value.
if p, ok := partitionsByValue[m.Value]; ok {
p.peers[m.Peer] = false
p.weight += m.GetWeight()
p.aggregatedWeight += m.GetWeight()
} else {
partitionsByValue[m.Value] = &partition{
value: m.Value,
weight: m.GetWeight(),
aggregatedWeight: m.GetWeight(),
peers: map[peer.ID]bool{
m.Peer: false,
},
}
}
}
for _, p := range partitionsByValue {
partitions = append(partitions, p)
}
return partitions
}
// Returns a list of peers sorted by never choosing twice from the same
// partition if there is some other partition to choose from.
func (pnedm *partitionedMetric) sortedPeers() []peer.ID {
peers := []peer.ID{}
for {
peer := pnedm.chooseNext()
if peer == "" { // This means we are done.
break
}
peers = append(peers, peer)
}
return peers
}
func (pnedm *partitionedMetric) chooseNext() peer.ID {
lenp := len(pnedm.partitions)
if lenp == 0 {
return ""
}
if pnedm.noMore {
return ""
}
var peer peer.ID
curPartition := pnedm.partitions[pnedm.curChoosingIndex]
done := 0
for {
if curPartition.sub != nil {
// Choose something from the sub-partitionedMetric
peer = curPartition.sub.chooseNext()
} else {
// We are a bottom-partition. Choose one of our peers
for pid, used := range curPartition.peers {
if !used {
peer = pid
curPartition.peers[pid] = true // mark as used
break
}
}
}
// look in next partition next time
pnedm.curChoosingIndex = (pnedm.curChoosingIndex + 1) % lenp
curPartition = pnedm.partitions[pnedm.curChoosingIndex]
done++
if peer != "" {
break
}
// no peer and we have looked in as many partitions as we have
if done == lenp {
pnedm.noMore = true
break
}
}
return peer
}
// Allocate produces a sorted list of cluster peer IDs based on different
// metrics provided for those peer IDs.
// It works as follows:
//
// - First, it buckets each peer metrics based on the AllocateBy list. The
// metric name must match the bucket name, otherwise they are put at the end.
// - Second, based on the AllocateBy order, it orders the first bucket and
// groups peers by ordered value.
// - Third, it selects metrics on the second bucket for the most prioritary
// peers of the first bucket and orders their metrics. Then for the peers in
// second position etc.
// - It repeats the process until there is no more buckets to sort.
// - Finally, it returns the first peer of the first
// - Third, based on the AllocateBy order, it select the first metric
func (a *Allocator) Allocate(
ctx context.Context,
c api.Cid,
current, candidates, priority api.MetricsSet,
) ([]peer.ID, error) {
// For the allocation to work well, there have to be metrics of all
// the types for all the peers. There cannot be a metric of one type
// for a peer that does not appear in the other types.
//
// Removing such occurrences is done in allocate.go, before the
// allocator is called.
//
// Otherwise, the sorting might be funny.
candidatePartition := partitionMetrics(candidates, a.config.AllocateBy)
priorityPartition := partitionMetrics(priority, a.config.AllocateBy)
logger.Debugf("Balanced allocator partitions:\n%s\n", printPartition(candidatePartition, 0))
//fmt.Println(printPartition(candidatePartition, 0))
first := priorityPartition.sortedPeers()
last := candidatePartition.sortedPeers()
return append(first, last...), nil
}
// Metrics returns the names of the metrics that have been registered
// with this allocator.
func (a *Allocator) Metrics() []string {
return a.config.AllocateBy
}
func printPartition(m *partitionedMetric, ind int) string {
str := ""
indent := func() {
for i := 0; i < ind+2; i++ {
str += " "
}
}
for _, p := range m.partitions {
indent()
str += fmt.Sprintf(" | %s:%s - %d - [", m.metricName, p.value, p.weight)
for p, u := range p.peers {
str += fmt.Sprintf("%s|%t, ", p, u)
}
str += "]\n"
if p.sub != nil {
str += printPartition(p.sub, ind+2)
}
}
return str
}

View file

@ -1,155 +0,0 @@
package balanced
import (
"context"
"testing"
"time"
api "github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/test"
peer "github.com/libp2p/go-libp2p/core/peer"
)
func makeMetric(name, value string, weight int64, peer peer.ID, partitionable bool) api.Metric {
return api.Metric{
Name: name,
Value: value,
Weight: weight,
Peer: peer,
Valid: true,
Partitionable: partitionable,
Expire: time.Now().Add(time.Minute).UnixNano(),
}
}
func TestAllocate(t *testing.T) {
alloc, err := New(&Config{
AllocateBy: []string{
"region",
"az",
"pinqueue",
"freespace",
},
})
if err != nil {
t.Fatal(err)
}
candidates := api.MetricsSet{
"abc": []api.Metric{ // don't want anything in results
makeMetric("abc", "a", 0, test.PeerID1, true),
makeMetric("abc", "b", 0, test.PeerID2, true),
},
"region": []api.Metric{
makeMetric("region", "a-us", 0, test.PeerID1, true),
makeMetric("region", "a-us", 0, test.PeerID2, true),
makeMetric("region", "b-eu", 0, test.PeerID3, true),
makeMetric("region", "b-eu", 0, test.PeerID4, true),
makeMetric("region", "b-eu", 0, test.PeerID5, true),
makeMetric("region", "c-au", 0, test.PeerID6, true),
makeMetric("region", "c-au", 0, test.PeerID7, true),
makeMetric("region", "c-au", 0, test.PeerID8, true), // I don't want to see this in results
},
"az": []api.Metric{
makeMetric("az", "us1", 0, test.PeerID1, true),
makeMetric("az", "us2", 0, test.PeerID2, true),
makeMetric("az", "eu1", 0, test.PeerID3, true),
makeMetric("az", "eu1", 0, test.PeerID4, true),
makeMetric("az", "eu2", 0, test.PeerID5, true),
makeMetric("az", "au1", 0, test.PeerID6, true),
makeMetric("az", "au1", 0, test.PeerID7, true),
},
"pinqueue": []api.Metric{
makeMetric("pinqueue", "100", 0, test.PeerID1, true),
makeMetric("pinqueue", "200", 0, test.PeerID2, true),
makeMetric("pinqueue", "100", 0, test.PeerID3, true),
makeMetric("pinqueue", "200", 0, test.PeerID4, true),
makeMetric("pinqueue", "300", 0, test.PeerID5, true),
makeMetric("pinqueue", "100", 0, test.PeerID6, true),
makeMetric("pinqueue", "1000", -1, test.PeerID7, true),
},
"freespace": []api.Metric{
makeMetric("freespace", "100", 100, test.PeerID1, false),
makeMetric("freespace", "500", 500, test.PeerID2, false),
makeMetric("freespace", "200", 200, test.PeerID3, false),
makeMetric("freespace", "400", 400, test.PeerID4, false),
makeMetric("freespace", "10", 10, test.PeerID5, false),
makeMetric("freespace", "50", 50, test.PeerID6, false),
makeMetric("freespace", "600", 600, test.PeerID7, false),
makeMetric("freespace", "10000", 10000, test.PeerID8, false),
},
}
// Regions weights: a-us (pids 1,2): 600. b-eu (pids 3,4,5): 610. c-au (pids 6,7): 649
// Az weights: us1: 100. us2: 500. eu1: 600. eu2: 10. au1: 649
// Based on the algorithm it should choose:
//
// - c-au (most-weight)->au1->pinqueue(0)->pid6
// - b-eu->eu1->pid4
// - a-us->us2->pid2
// - <repeat regions>
// - c-au->au1 (nowhere else to choose)->pid7 (region exausted)
// - b-eu->eu2 (already had in eu1)->pid5
// - a-us->us1 (already had in us2)->pid1
// - <repeat regions>
// - b-eu->eu1->pid3 (only peer left)
peers, err := alloc.Allocate(context.Background(),
test.Cid1,
nil,
candidates,
nil,
)
if err != nil {
t.Fatal(err)
}
if len(peers) < 7 {
t.Fatalf("not enough peers: %s", peers)
}
for i, p := range peers {
t.Logf("%d - %s", i, p)
switch i {
case 0:
if p != test.PeerID6 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
case 1:
if p != test.PeerID4 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
case 2:
if p != test.PeerID2 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
case 3:
if p != test.PeerID7 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
case 4:
if p != test.PeerID5 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
case 5:
if p != test.PeerID1 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
case 6:
if p != test.PeerID3 {
t.Errorf("wrong id in pos %d: %s", i, p)
}
default:
t.Error("too many peers")
}
}
}

View file

@ -1,103 +0,0 @@
package balanced
import (
"encoding/json"
"errors"
"github.com/ipfs-cluster/ipfs-cluster/config"
"github.com/kelseyhightower/envconfig"
)
const configKey = "balanced"
const envConfigKey = "cluster_balanced"
// These are the default values for a Config.
var (
DefaultAllocateBy = []string{"tag:group", "freespace"}
)
// Config allows to initialize the Allocator.
type Config struct {
config.Saver
AllocateBy []string
}
type jsonConfig struct {
AllocateBy []string `json:"allocate_by"`
}
// ConfigKey returns a human-friendly identifier for this
// Config's type.
func (cfg *Config) ConfigKey() string {
return configKey
}
// Default initializes this Config with sensible values.
func (cfg *Config) Default() error {
cfg.AllocateBy = DefaultAllocateBy
return nil
}
// ApplyEnvVars fills in any Config fields found
// as environment variables.
func (cfg *Config) ApplyEnvVars() error {
jcfg := cfg.toJSONConfig()
err := envconfig.Process(envConfigKey, jcfg)
if err != nil {
return err
}
return cfg.applyJSONConfig(jcfg)
}
// Validate checks that the fields of this configuration have
// sensible values.
func (cfg *Config) Validate() error {
if len(cfg.AllocateBy) <= 0 {
return errors.New("metricalloc.allocate_by is invalid")
}
return nil
}
// LoadJSON parses a raw JSON byte-slice as generated by ToJSON().
func (cfg *Config) LoadJSON(raw []byte) error {
jcfg := &jsonConfig{}
err := json.Unmarshal(raw, jcfg)
if err != nil {
return err
}
cfg.Default()
return cfg.applyJSONConfig(jcfg)
}
func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
// When unset, leave default
if len(jcfg.AllocateBy) > 0 {
cfg.AllocateBy = jcfg.AllocateBy
}
return cfg.Validate()
}
// ToJSON generates a human-friendly JSON representation of this Config.
func (cfg *Config) ToJSON() ([]byte, error) {
jcfg := cfg.toJSONConfig()
return config.DefaultJSONMarshal(jcfg)
}
func (cfg *Config) toJSONConfig() *jsonConfig {
return &jsonConfig{
AllocateBy: cfg.AllocateBy,
}
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
return config.DisplayJSON(cfg.toJSONConfig())
}

View file

@ -1,61 +0,0 @@
package balanced
import (
"os"
"testing"
)
var cfgJSON = []byte(`
{
"allocate_by": ["tag", "disk"]
}
`)
func TestLoadJSON(t *testing.T) {
cfg := &Config{}
err := cfg.LoadJSON(cfgJSON)
if err != nil {
t.Fatal(err)
}
}
func TestToJSON(t *testing.T) {
cfg := &Config{}
cfg.LoadJSON(cfgJSON)
newjson, err := cfg.ToJSON()
if err != nil {
t.Fatal(err)
}
cfg = &Config{}
err = cfg.LoadJSON(newjson)
if err != nil {
t.Fatal(err)
}
if len(cfg.AllocateBy) != 2 {
t.Error("configuration was lost in serialization/deserialization")
}
}
func TestDefault(t *testing.T) {
cfg := &Config{}
cfg.Default()
if cfg.Validate() != nil {
t.Fatal("error validating")
}
cfg.AllocateBy = nil
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
}
func TestApplyEnvVars(t *testing.T) {
os.Setenv("CLUSTER_BALANCED_ALLOCATEBY", "a,b,c")
cfg := &Config{}
cfg.ApplyEnvVars()
if len(cfg.AllocateBy) != 3 {
t.Fatal("failed to override allocate_by with env var")
}
}

View file

@ -1,261 +0,0 @@
package api
import (
"errors"
"fmt"
"net/url"
"strconv"
cid "github.com/ipfs/go-cid"
peer "github.com/libp2p/go-libp2p/core/peer"
)
// DefaultShardSize is the shard size for params objects created with DefaultParams().
var DefaultShardSize = uint64(100 * 1024 * 1024) // 100 MB
// AddedOutput carries information for displaying the standard ipfs output
// indicating a node of a file has been added.
type AddedOutput struct {
Name string `json:"name" codec:"n,omitempty"`
Cid Cid `json:"cid" codec:"c"`
Bytes uint64 `json:"bytes,omitempty" codec:"b,omitempty"`
Size uint64 `json:"size,omitempty" codec:"s,omitempty"`
Allocations []peer.ID `json:"allocations,omitempty" codec:"a,omitempty"`
}
// IPFSAddParams groups options specific to the ipfs-adder, which builds
// UnixFS dags with the input files. This struct is embedded in AddParams.
type IPFSAddParams struct {
Layout string
Chunker string
RawLeaves bool
Progress bool
CidVersion int
HashFun string
NoCopy bool
}
// AddParams contains all of the configurable parameters needed to specify the
// importing process of a file being added to an ipfs-cluster
type AddParams struct {
PinOptions
Local bool
Recursive bool
Hidden bool
Wrap bool
Shard bool
StreamChannels bool
Format string // selects with adder
NoPin bool
IPFSAddParams
}
// DefaultAddParams returns a AddParams object with standard defaults
func DefaultAddParams() AddParams {
return AddParams{
Local: false,
Recursive: false,
Hidden: false,
Wrap: false,
Shard: false,
StreamChannels: true,
Format: "unixfs",
NoPin: false,
PinOptions: PinOptions{
ReplicationFactorMin: 0,
ReplicationFactorMax: 0,
Name: "",
Mode: PinModeRecursive,
ShardSize: DefaultShardSize,
Metadata: make(map[string]string),
Origins: nil,
},
IPFSAddParams: IPFSAddParams{
Layout: "", // corresponds to balanced layout
Chunker: "size-262144",
RawLeaves: false,
Progress: false,
CidVersion: 0,
HashFun: "sha2-256",
NoCopy: false,
},
}
}
func parseBoolParam(q url.Values, name string, dest *bool) error {
if v := q.Get(name); v != "" {
b, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("parameter %s invalid", name)
}
*dest = b
}
return nil
}
func parseIntParam(q url.Values, name string, dest *int) error {
if v := q.Get(name); v != "" {
i, err := strconv.Atoi(v)
if err != nil {
return fmt.Errorf("parameter %s invalid", name)
}
*dest = i
}
return nil
}
// AddParamsFromQuery parses the AddParams object from
// a URL.Query().
func AddParamsFromQuery(query url.Values) (AddParams, error) {
params := DefaultAddParams()
opts := &PinOptions{}
err := opts.FromQuery(query)
if err != nil {
return params, err
}
params.PinOptions = *opts
params.PinUpdate.Cid = cid.Undef // hardcode as does not make sense for adding
layout := query.Get("layout")
switch layout {
case "trickle", "balanced", "":
// nothing
default:
return params, errors.New("layout parameter is invalid")
}
params.Layout = layout
chunker := query.Get("chunker")
if chunker != "" {
params.Chunker = chunker
}
hashF := query.Get("hash")
if hashF != "" {
params.HashFun = hashF
}
format := query.Get("format")
switch format {
case "car", "unixfs", "":
default:
return params, errors.New("format parameter is invalid")
}
params.Format = format
err = parseBoolParam(query, "local", &params.Local)
if err != nil {
return params, err
}
err = parseBoolParam(query, "recursive", &params.Recursive)
if err != nil {
return params, err
}
err = parseBoolParam(query, "hidden", &params.Hidden)
if err != nil {
return params, err
}
err = parseBoolParam(query, "wrap-with-directory", &params.Wrap)
if err != nil {
return params, err
}
err = parseBoolParam(query, "shard", &params.Shard)
if err != nil {
return params, err
}
err = parseBoolParam(query, "progress", &params.Progress)
if err != nil {
return params, err
}
err = parseIntParam(query, "cid-version", &params.CidVersion)
if err != nil {
return params, err
}
// This mimics go-ipfs behavior.
if params.CidVersion > 0 {
params.RawLeaves = true
}
// If the raw-leaves param is empty, the default RawLeaves value will
// take place (which may be true or false depending on
// CidVersion). Otherwise, it will be explicitly set.
err = parseBoolParam(query, "raw-leaves", &params.RawLeaves)
if err != nil {
return params, err
}
err = parseBoolParam(query, "stream-channels", &params.StreamChannels)
if err != nil {
return params, err
}
err = parseBoolParam(query, "nocopy", &params.NoCopy)
if err != nil {
return params, err
}
err = parseBoolParam(query, "no-pin", &params.NoPin)
if err != nil {
return params, err
}
return params, nil
}
// ToQueryString returns a url query string (key=value&key2=value2&...)
func (p AddParams) ToQueryString() (string, error) {
pinOptsQuery, err := p.PinOptions.ToQuery()
if err != nil {
return "", err
}
query, err := url.ParseQuery(pinOptsQuery)
if err != nil {
return "", err
}
query.Set("shard", fmt.Sprintf("%t", p.Shard))
query.Set("local", fmt.Sprintf("%t", p.Local))
query.Set("recursive", fmt.Sprintf("%t", p.Recursive))
query.Set("layout", p.Layout)
query.Set("chunker", p.Chunker)
query.Set("raw-leaves", fmt.Sprintf("%t", p.RawLeaves))
query.Set("hidden", fmt.Sprintf("%t", p.Hidden))
query.Set("wrap-with-directory", fmt.Sprintf("%t", p.Wrap))
query.Set("progress", fmt.Sprintf("%t", p.Progress))
query.Set("cid-version", fmt.Sprintf("%d", p.CidVersion))
query.Set("hash", p.HashFun)
query.Set("stream-channels", fmt.Sprintf("%t", p.StreamChannels))
query.Set("nocopy", fmt.Sprintf("%t", p.NoCopy))
query.Set("format", p.Format)
query.Set("no-pin", fmt.Sprintf("%t", p.NoPin))
return query.Encode(), nil
}
// Equals checks if p equals p2.
func (p AddParams) Equals(p2 AddParams) bool {
return p.PinOptions.Equals(p2.PinOptions) &&
p.Local == p2.Local &&
p.Recursive == p2.Recursive &&
p.Shard == p2.Shard &&
p.Layout == p2.Layout &&
p.Chunker == p2.Chunker &&
p.RawLeaves == p2.RawLeaves &&
p.Hidden == p2.Hidden &&
p.Wrap == p2.Wrap &&
p.CidVersion == p2.CidVersion &&
p.HashFun == p2.HashFun &&
p.StreamChannels == p2.StreamChannels &&
p.NoCopy == p2.NoCopy &&
p.Format == p2.Format &&
p.NoPin == p2.NoPin
}

View file

@ -1,102 +0,0 @@
package api
import (
"net/url"
"testing"
)
func TestAddParams_FromQuery(t *testing.T) {
qStr := "layout=balanced&chunker=size-262144&name=test&raw-leaves=true&hidden=true&shard=true&replication-min=2&replication-max=4&shard-size=1"
q, err := url.ParseQuery(qStr)
if err != nil {
t.Fatal(err)
}
p, err := AddParamsFromQuery(q)
if err != nil {
t.Fatal(err)
}
if p.Layout != "balanced" ||
p.Chunker != "size-262144" ||
p.Name != "test" ||
!p.RawLeaves || !p.Hidden || !p.Shard ||
p.ReplicationFactorMin != 2 ||
p.ReplicationFactorMax != 4 ||
p.ShardSize != 1 {
t.Fatal("did not parse the query correctly")
}
}
func TestAddParams_FromQueryRawLeaves(t *testing.T) {
qStr := "cid-version=1"
q, err := url.ParseQuery(qStr)
if err != nil {
t.Fatal(err)
}
p, err := AddParamsFromQuery(q)
if err != nil {
t.Fatal(err)
}
if !p.RawLeaves {
t.Error("RawLeaves should be true with cid-version=1")
}
qStr = "cid-version=1&raw-leaves=false"
q, err = url.ParseQuery(qStr)
if err != nil {
t.Fatal(err)
}
p, err = AddParamsFromQuery(q)
if err != nil {
t.Fatal(err)
}
if p.RawLeaves {
t.Error("RawLeaves should be false when explicitally set")
}
qStr = "cid-version=0&raw-leaves=true"
q, err = url.ParseQuery(qStr)
if err != nil {
t.Fatal(err)
}
p, err = AddParamsFromQuery(q)
if err != nil {
t.Fatal(err)
}
if !p.RawLeaves {
t.Error("RawLeaves should be true when explicitly set")
}
}
func TestAddParams_ToQueryString(t *testing.T) {
p := DefaultAddParams()
p.ReplicationFactorMin = 3
p.ReplicationFactorMax = 6
p.Name = "something"
p.RawLeaves = true
p.ShardSize = 1020
qstr, err := p.ToQueryString()
if err != nil {
t.Fatal(err)
}
q, err := url.ParseQuery(qstr)
if err != nil {
t.Fatal(err)
}
p2, err := AddParamsFromQuery(q)
if err != nil {
t.Fatal(err)
}
if !p.Equals(p2) {
t.Error("generated and parsed params should be equal")
}
}

View file

@ -1,835 +0,0 @@
// Package common implements all the things that an IPFS Cluster API component
// must do, except the actual routes that it handles.
//
// This is meant for re-use when implementing actual REST APIs by saving most
// of the efforts and automatically getting a lot of the setup and things like
// authentication handled.
//
// The API exposes the routes in two ways: the first is through a regular
// HTTP(s) listener. The second is by tunneling HTTP through a libp2p stream
// (thus getting an encrypted channel without the need to setup TLS). Both
// ways can be used at the same time, or disabled.
//
// This is used by rest and pinsvc packages.
package common
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
jwt "github.com/golang-jwt/jwt/v4"
types "github.com/ipfs-cluster/ipfs-cluster/api"
state "github.com/ipfs-cluster/ipfs-cluster/state"
logging "github.com/ipfs/go-log/v2"
gopath "github.com/ipfs/go-path"
libp2p "github.com/libp2p/go-libp2p"
host "github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
gostream "github.com/libp2p/go-libp2p-gostream"
p2phttp "github.com/libp2p/go-libp2p-http"
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
manet "github.com/multiformats/go-multiaddr/net"
handlers "github.com/gorilla/handlers"
mux "github.com/gorilla/mux"
"github.com/rs/cors"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/trace"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// StreamChannelSize is used to define buffer sizes for channels.
const StreamChannelSize = 1024
// Common errors
var (
// ErrNoEndpointEnabled is returned when the API is created but
// no HTTPListenAddr, nor libp2p configuration fields, nor a libp2p
// Host are provided.
ErrNoEndpointsEnabled = errors.New("neither the libp2p nor the HTTP endpoints are enabled")
// ErrHTTPEndpointNotEnabled is returned when trying to perform
// operations that rely on the HTTPEndpoint but it is disabled.
ErrHTTPEndpointNotEnabled = errors.New("the HTTP endpoint is not enabled")
)
// SetStatusAutomatically can be passed to SendResponse(), so that it will
// figure out which http status to set by itself.
const SetStatusAutomatically = -1
// API implements an API and aims to provides
// a RESTful HTTP API for Cluster.
type API struct {
ctx context.Context
cancel func()
config *Config
rpcClient *rpc.Client
rpcReady chan struct{}
router *mux.Router
routes func(*rpc.Client) []Route
server *http.Server
host host.Host
httpListeners []net.Listener
libp2pListener net.Listener
shutdownLock sync.Mutex
shutdown bool
wg sync.WaitGroup
}
// Route defines a REST endpoint supported by this API.
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type jwtToken struct {
Token string `json:"token"`
}
type logWriter struct {
logger *logging.ZapEventLogger
}
func (lw logWriter) Write(b []byte) (int, error) {
lw.logger.Info(string(b))
return len(b), nil
}
// NewAPI creates a new common API component with the given configuration.
func NewAPI(ctx context.Context, cfg *Config, routes func(*rpc.Client) []Route) (*API, error) {
return NewAPIWithHost(ctx, cfg, nil, routes)
}
// NewAPIWithHost creates a new common API component and enables
// the libp2p-http endpoint using the given Host, if not nil.
func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host, routes func(*rpc.Client) []Route) (*API, error) {
err := cfg.Validate()
if err != nil {
return nil, err
}
ctx, cancel := context.WithCancel(ctx)
api := &API{
ctx: ctx,
cancel: cancel,
config: cfg,
host: h,
routes: routes,
rpcReady: make(chan struct{}, 2),
}
// Our handler is a gorilla router wrapped with:
// - a custom strictSlashHandler that uses 307 redirects (#1415)
// - the cors handler,
// - the basic auth handler.
//
// Requests will need to have valid credentials first, except
// cors-preflight requests (OPTIONS). Then requests are handled by
// CORS and potentially need to comply with it. Then they may be
// redirected if the path ends with a "/". Finally they hit one of our
// routes and handlers.
router := mux.NewRouter()
handler := api.authHandler(
cors.New(*cfg.CorsOptions()).
Handler(
strictSlashHandler(router),
),
cfg.Logger,
)
if cfg.Tracing {
handler = &ochttp.Handler{
IsPublicEndpoint: true,
Propagation: &tracecontext.HTTPFormat{},
Handler: handler,
StartOptions: trace.StartOptions{SpanKind: trace.SpanKindServer},
FormatSpanName: func(req *http.Request) string { return req.Host + ":" + req.URL.Path + ":" + req.Method },
}
}
writer, err := cfg.LogWriter()
if err != nil {
cancel()
return nil, err
}
s := &http.Server{
ReadTimeout: cfg.ReadTimeout,
ReadHeaderTimeout: cfg.ReadHeaderTimeout,
WriteTimeout: cfg.WriteTimeout,
IdleTimeout: cfg.IdleTimeout,
Handler: handlers.LoggingHandler(writer, handler),
MaxHeaderBytes: cfg.MaxHeaderBytes,
}
// See: https://github.com/ipfs/go-ipfs/issues/5168
// See: https://github.com/ipfs-cluster/ipfs-cluster/issues/548
// on why this is re-enabled.
s.SetKeepAlivesEnabled(true)
s.MaxHeaderBytes = cfg.MaxHeaderBytes
api.server = s
api.router = router
// Set up api.httpListeners if enabled
err = api.setupHTTP()
if err != nil {
return nil, err
}
// Set up api.libp2pListeners if enabled
err = api.setupLibp2p()
if err != nil {
return nil, err
}
if len(api.httpListeners) == 0 && api.libp2pListener == nil {
return nil, ErrNoEndpointsEnabled
}
api.run(ctx)
return api, nil
}
func (api *API) setupHTTP() error {
if len(api.config.HTTPListenAddr) == 0 {
return nil
}
for _, listenMAddr := range api.config.HTTPListenAddr {
n, addr, err := manet.DialArgs(listenMAddr)
if err != nil {
return err
}
var l net.Listener
if api.config.TLS != nil {
l, err = tls.Listen(n, addr, api.config.TLS)
} else {
l, err = net.Listen(n, addr)
}
if err != nil {
return err
}
api.httpListeners = append(api.httpListeners, l)
}
return nil
}
func (api *API) setupLibp2p() error {
// Make new host. Override any provided existing one
// if we have config for a custom one.
if len(api.config.Libp2pListenAddr) > 0 {
// We use a new host context. We will call
// Close() on shutdown(). Avoids things like:
// https://github.com/ipfs-cluster/ipfs-cluster/issues/853
h, err := libp2p.New(
libp2p.Identity(api.config.PrivateKey),
libp2p.ListenAddrs(api.config.Libp2pListenAddr...),
libp2p.Security(noise.ID, noise.New),
libp2p.Security(libp2ptls.ID, libp2ptls.New),
)
if err != nil {
return err
}
api.host = h
}
if api.host == nil {
return nil
}
l, err := gostream.Listen(api.host, p2phttp.DefaultP2PProtocol)
if err != nil {
return err
}
api.libp2pListener = l
return nil
}
func (api *API) addRoutes() {
for _, route := range api.routes(api.rpcClient) {
api.router.
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(
ochttp.WithRouteTag(
http.HandlerFunc(route.HandlerFunc),
"/"+route.Name,
),
)
}
api.router.NotFoundHandler = ochttp.WithRouteTag(
http.HandlerFunc(api.notFoundHandler),
"/notfound",
)
}
// authHandler takes care of authentication either using basicAuth or JWT bearer tokens.
func (api *API) authHandler(h http.Handler, lggr *logging.ZapEventLogger) http.Handler {
credentials := api.config.BasicAuthCredentials
// If no credentials are set, we do nothing.
if credentials == nil {
return h
}
wrap := func(w http.ResponseWriter, r *http.Request) {
// We let CORS preflight requests pass through the next
// handler.
if r.Method == http.MethodOptions {
h.ServeHTTP(w, r)
return
}
username, password, okBasic := r.BasicAuth()
tokenString, okToken := parseBearerToken(r.Header.Get("Authorization"))
switch {
case okBasic:
ok := verifyBasicAuth(credentials, username, password)
if !ok {
w.Header().Set("WWW-Authenticate", wwwAuthenticate("Basic", "Restricted IPFS Cluster API", "", ""))
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized: access denied"), nil)
return
}
case okToken:
_, err := verifyToken(credentials, tokenString)
if err != nil {
lggr.Debug(err)
w.Header().Set("WWW-Authenticate", wwwAuthenticate("Bearer", "Restricted IPFS Cluster API", "invalid_token", ""))
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized: invalid token"), nil)
return
}
default:
// No authentication provided, but needed
w.Header().Add("WWW-Authenticate", wwwAuthenticate("Bearer", "Restricted IPFS Cluster API", "", ""))
w.Header().Add("WWW-Authenticate", wwwAuthenticate("Basic", "Restricted IPFS Cluster API", "", ""))
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized: no auth provided"), nil)
return
}
// If we are here, authentication worked.
h.ServeHTTP(w, r)
}
return http.HandlerFunc(wrap)
}
func parseBearerToken(authHeader string) (string, bool) {
const prefix = "Bearer "
if len(authHeader) < len(prefix) || !strings.EqualFold(authHeader[:len(prefix)], prefix) {
return "", false
}
return authHeader[len(prefix):], true
}
func wwwAuthenticate(auth, realm, error, description string) string {
str := auth + ` realm="` + realm + `"`
if len(error) > 0 {
str += `, error="` + error + `"`
}
if len(description) > 0 {
str += `, error_description="` + description + `"`
}
return str
}
func verifyBasicAuth(credentials map[string]string, username, password string) bool {
if username == "" || password == "" {
return false
}
for u, p := range credentials {
if u == username && p == password {
return true
}
}
return false
}
// verify that a Bearer JWT token is valid.
func verifyToken(credentials map[string]string, tokenString string) (*jwt.Token, error) {
// The token should be signed with the basic auth credential password
// of the issuer, and should have valid standard claims otherwise.
token, err := jwt.ParseWithClaims(tokenString, &jwt.RegisteredClaims{}, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, errors.New("unexpected token signing method (not HMAC)")
}
if claims, ok := token.Claims.(*jwt.RegisteredClaims); ok {
key, ok := credentials[claims.Issuer]
if !ok {
return nil, errors.New("issuer not found")
}
return []byte(key), nil
}
return nil, errors.New("no issuer set")
})
if err != nil {
return nil, err
}
if !token.Valid {
return nil, errors.New("invalid token")
}
return token, nil
}
// The Gorilla muxer StrictSlash option uses a 301 permanent redirect, which
// results in POST requests becoming GET requests in most clients. Thus we
// use our own middleware that performs a 307 redirect. See issue #1415 for
// more details.
func strictSlashHandler(h http.Handler) http.Handler {
wrap := func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
if strings.HasSuffix(path, "/") {
u, _ := url.Parse(r.URL.String())
u.Path = u.Path[:len(u.Path)-1]
http.Redirect(w, r, u.String(), http.StatusTemporaryRedirect)
return
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(wrap)
}
func (api *API) run(ctx context.Context) {
api.wg.Add(len(api.httpListeners))
for _, l := range api.httpListeners {
go func(l net.Listener) {
defer api.wg.Done()
api.runHTTPServer(ctx, l)
}(l)
}
if api.libp2pListener != nil {
api.wg.Add(1)
go func() {
defer api.wg.Done()
api.runLibp2pServer(ctx)
}()
}
}
// runs in goroutine from run()
func (api *API) runHTTPServer(ctx context.Context, l net.Listener) {
select {
case <-api.rpcReady:
case <-api.ctx.Done():
return
}
maddr, err := manet.FromNetAddr(l.Addr())
if err != nil {
api.config.Logger.Error(err)
}
var authInfo string
if api.config.BasicAuthCredentials != nil {
authInfo = " - authenticated"
}
api.config.Logger.Infof(strings.ToUpper(api.config.ConfigKey)+" (HTTP"+authInfo+"): %s", maddr)
err = api.server.Serve(l)
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
api.config.Logger.Error(err)
}
}
// runs in goroutine from run()
func (api *API) runLibp2pServer(ctx context.Context) {
select {
case <-api.rpcReady:
case <-api.ctx.Done():
return
}
listenMsg := ""
for _, a := range api.host.Addrs() {
listenMsg += fmt.Sprintf(" %s/p2p/%s\n", a, api.host.ID().Pretty())
}
api.config.Logger.Infof(strings.ToUpper(api.config.ConfigKey)+" (libp2p-http): ENABLED. Listening on:\n%s\n", listenMsg)
err := api.server.Serve(api.libp2pListener)
if err != nil && !strings.Contains(err.Error(), "context canceled") {
api.config.Logger.Error(err)
}
}
// Shutdown stops any API listeners.
func (api *API) Shutdown(ctx context.Context) error {
_, span := trace.StartSpan(ctx, "api/Shutdown")
defer span.End()
api.shutdownLock.Lock()
defer api.shutdownLock.Unlock()
if api.shutdown {
api.config.Logger.Debug("already shutdown")
return nil
}
api.config.Logger.Info("stopping Cluster API")
api.cancel()
close(api.rpcReady)
// Cancel any outstanding ops
api.server.SetKeepAlivesEnabled(false)
for _, l := range api.httpListeners {
l.Close()
}
if api.libp2pListener != nil {
api.libp2pListener.Close()
}
api.wg.Wait()
// This means we created the host
if api.config.Libp2pListenAddr != nil {
api.host.Close()
}
api.shutdown = true
return nil
}
// SetClient makes the component ready to perform RPC
// requests.
func (api *API) SetClient(c *rpc.Client) {
api.rpcClient = c
api.addRoutes()
// One notification for http server and one for libp2p server.
api.rpcReady <- struct{}{}
api.rpcReady <- struct{}{}
}
func (api *API) notFoundHandler(w http.ResponseWriter, r *http.Request) {
api.SendResponse(w, http.StatusNotFound, errors.New("not found"), nil)
}
// Context returns the API context
func (api *API) Context() context.Context {
return api.ctx
}
// ParsePinPathOrFail parses a pin path and returns it or makes the request
// fail.
func (api *API) ParsePinPathOrFail(w http.ResponseWriter, r *http.Request) types.PinPath {
vars := mux.Vars(r)
urlpath := "/" + vars["keyType"] + "/" + strings.TrimSuffix(vars["path"], "/")
path, err := gopath.ParsePath(urlpath)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error parsing path: "+err.Error()), nil)
return types.PinPath{}
}
pinPath := types.PinPath{Path: path.String()}
err = pinPath.PinOptions.FromQuery(r.URL.Query())
if err != nil {
api.SendResponse(w, http.StatusBadRequest, err, nil)
}
return pinPath
}
// ParseCidOrFail parses a Cid and returns it or makes the request fail.
func (api *API) ParseCidOrFail(w http.ResponseWriter, r *http.Request) types.Pin {
vars := mux.Vars(r)
hash := vars["hash"]
c, err := types.DecodeCid(hash)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding Cid: "+err.Error()), nil)
return types.Pin{}
}
opts := types.PinOptions{}
err = opts.FromQuery(r.URL.Query())
if err != nil {
api.SendResponse(w, http.StatusBadRequest, err, nil)
}
pin := types.PinWithOpts(c, opts)
pin.MaxDepth = -1 // For now, all pins are recursive
return pin
}
// ParsePidOrFail parses a PID and returns it or makes the request fail.
func (api *API) ParsePidOrFail(w http.ResponseWriter, r *http.Request) peer.ID {
vars := mux.Vars(r)
idStr := vars["peer"]
pid, err := peer.Decode(idStr)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding Peer ID: "+err.Error()), nil)
return ""
}
return pid
}
// GenerateTokenHandler is a handle to obtain a new JWT token
func (api *API) GenerateTokenHandler(w http.ResponseWriter, r *http.Request) {
if api.config.BasicAuthCredentials == nil {
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil)
return
}
var issuer string
// We do not verify as we assume it is already done!
user, _, okBasic := r.BasicAuth()
tokenString, okToken := parseBearerToken(r.Header.Get("Authorization"))
if okBasic {
issuer = user
} else if okToken {
token, err := verifyToken(api.config.BasicAuthCredentials, tokenString)
if err != nil { // I really hope not because it should be verified
api.config.Logger.Error("verify token failed in GetTokenHandler!")
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil)
return
}
if claims, ok := token.Claims.(*jwt.RegisteredClaims); ok {
issuer = claims.Issuer
} else {
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil)
return
}
} else { // no issuer
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil)
return
}
pass, okPass := api.config.BasicAuthCredentials[issuer]
if !okPass { // another place that should never be reached
api.SendResponse(w, http.StatusUnauthorized, errors.New("unauthorized"), nil)
return
}
ss, err := generateSignedTokenString(issuer, pass)
if err != nil {
api.SendResponse(w, SetStatusAutomatically, err, nil)
return
}
tokenObj := jwtToken{Token: ss}
api.SendResponse(w, SetStatusAutomatically, nil, tokenObj)
}
func generateSignedTokenString(issuer, pass string) (string, error) {
key := []byte(pass)
claims := jwt.RegisteredClaims{
Issuer: issuer,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
return token.SignedString(key)
}
// SendResponse wraps all the logic for writing the response to a request:
// * Write configured headers
// * Write application/json content type
// * Write status: determined automatically if given "SetStatusAutomatically"
// * Write an error if there is or write the response if there is
func (api *API) SendResponse(
w http.ResponseWriter,
status int,
err error,
resp interface{},
) {
api.SetHeaders(w)
enc := json.NewEncoder(w)
// Send an error
if err != nil {
if status == SetStatusAutomatically || status < 400 {
if err.Error() == state.ErrNotFound.Error() {
status = http.StatusNotFound
} else {
status = http.StatusInternalServerError
}
}
w.WriteHeader(status)
errorResp := api.config.APIErrorFunc(err, status)
api.config.Logger.Errorf("sending error response: %d: %s", status, err.Error())
if err := enc.Encode(errorResp); err != nil {
api.config.Logger.Error(err)
}
return
}
// Send a body
if resp != nil {
if status == SetStatusAutomatically {
status = http.StatusOK
}
w.WriteHeader(status)
if err = enc.Encode(resp); err != nil {
api.config.Logger.Error(err)
}
return
}
// Empty response
if status == SetStatusAutomatically {
status = http.StatusNoContent
}
w.WriteHeader(status)
}
// StreamIterator is a function that returns the next item. It is used in
// StreamResponse.
type StreamIterator func() (interface{}, bool, error)
// StreamResponse reads from an iterator and sends the response.
func (api *API) StreamResponse(w http.ResponseWriter, next StreamIterator, errCh chan error) {
api.SetHeaders(w)
enc := json.NewEncoder(w)
flusher, flush := w.(http.Flusher)
w.Header().Set("Trailer", "X-Stream-Error")
total := 0
var err error
var ok bool
var item interface{}
for {
item, ok, err = next()
if total == 0 {
if err != nil {
st := http.StatusInternalServerError
w.WriteHeader(st)
errorResp := api.config.APIErrorFunc(err, st)
api.config.Logger.Errorf("sending error response: %d: %s", st, err.Error())
if err := enc.Encode(errorResp); err != nil {
api.config.Logger.Error(err)
}
return
}
if !ok { // but no error.
w.WriteHeader(http.StatusNoContent)
return
}
w.WriteHeader(http.StatusOK)
}
if err != nil {
break
}
// finish just fine
if !ok {
break
}
// we have an item
total++
err = enc.Encode(item)
if err != nil {
api.config.Logger.Error(err)
break
}
if flush {
flusher.Flush()
}
}
if err != nil {
w.Header().Set("X-Stream-Error", err.Error())
} else {
// Due to some Javascript-browser-land stuff, we set the header
// even when there is no error.
w.Header().Set("X-Stream-Error", "")
}
// check for function errors
for funcErr := range errCh {
if funcErr != nil {
w.Header().Add("X-Stream-Error", funcErr.Error())
}
}
}
// SetHeaders sets all the headers that are common to all responses
// from this API. Called automatically from SendResponse().
func (api *API) SetHeaders(w http.ResponseWriter) {
for header, values := range api.config.Headers {
for _, val := range values {
w.Header().Add(header, val)
}
}
w.Header().Add("Content-Type", "application/json")
}
// These functions below are mostly used in tests.
// HTTPAddresses returns the HTTP(s) listening address
// in host:port format. Useful when configured to start
// on a random port (0). Returns error when the HTTP endpoint
// is not enabled.
func (api *API) HTTPAddresses() ([]string, error) {
if len(api.httpListeners) == 0 {
return nil, ErrHTTPEndpointNotEnabled
}
var addrs []string
for _, l := range api.httpListeners {
addrs = append(addrs, l.Addr().String())
}
return addrs, nil
}
// Host returns the libp2p Host used by the API, if any.
// The result is either the host provided during initialization,
// a default Host created with options from the configuration object,
// or nil.
func (api *API) Host() host.Host {
return api.host
}
// Headers returns the configured Headers.
// Useful for testing.
func (api *API) Headers() map[string][]string {
return api.config.Headers
}
// SetKeepAlivesEnabled controls the HTTP server Keep Alive settings. Useful
// for testing.
func (api *API) SetKeepAlivesEnabled(b bool) {
api.server.SetKeepAlivesEnabled(b)
}

View file

@ -1,644 +0,0 @@
package common
import (
"context"
"fmt"
"io"
"math/rand"
"net/http"
"net/http/httputil"
"os"
"path/filepath"
"testing"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/api/common/test"
rpctest "github.com/ipfs-cluster/ipfs-cluster/test"
libp2p "github.com/libp2p/go-libp2p"
rpc "github.com/libp2p/go-libp2p-gorpc"
ma "github.com/multiformats/go-multiaddr"
)
const (
SSLCertFile = "test/server.crt"
SSLKeyFile = "test/server.key"
validUserName = "validUserName"
validUserPassword = "validUserPassword"
adminUserName = "adminUserName"
adminUserPassword = "adminUserPassword"
invalidUserName = "invalidUserName"
invalidUserPassword = "invalidUserPassword"
)
var (
validToken, _ = generateSignedTokenString(validUserName, validUserPassword)
invalidToken, _ = generateSignedTokenString(invalidUserName, invalidUserPassword)
)
func routes(c *rpc.Client) []Route {
return []Route{
{
"Test",
"GET",
"/test",
func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
w.Write([]byte(`{ "thisis": "atest" }`))
},
},
}
}
func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API {
ctx := context.Background()
apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
h, err := libp2p.New(libp2p.ListenAddrs(apiMAddr))
if err != nil {
t.Fatal(err)
}
cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr}
rest, err := NewAPIWithHost(ctx, cfg, h, routes)
if err != nil {
t.Fatalf("should be able to create a new %s API: %s", name, err)
}
// No keep alive for tests
rest.server.SetKeepAlivesEnabled(false)
rest.SetClient(rpctest.NewMockRPCClient(t))
return rest
}
func testAPI(t *testing.T) *API {
cfg := newDefaultTestConfig(t)
cfg.CORSAllowedOrigins = []string{test.ClientOrigin}
cfg.CORSAllowedMethods = []string{"GET", "POST", "DELETE"}
//cfg.CORSAllowedHeaders = []string{"Content-Type"}
cfg.CORSMaxAge = 10 * time.Minute
return testAPIwithConfig(t, cfg, "basic")
}
func testHTTPSAPI(t *testing.T) *API {
cfg := newDefaultTestConfig(t)
cfg.PathSSLCertFile = SSLCertFile
cfg.PathSSLKeyFile = SSLKeyFile
var err error
cfg.TLS, err = newTLSConfig(cfg.PathSSLCertFile, cfg.PathSSLKeyFile)
if err != nil {
t.Fatal(err)
}
return testAPIwithConfig(t, cfg, "https")
}
func testAPIwithBasicAuth(t *testing.T) *API {
cfg := newDefaultTestConfig(t)
cfg.BasicAuthCredentials = map[string]string{
validUserName: validUserPassword,
adminUserName: adminUserPassword,
}
return testAPIwithConfig(t, cfg, "Basic Authentication")
}
func TestAPIShutdown(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
err := rest.Shutdown(ctx)
if err != nil {
t.Error("should shutdown cleanly: ", err)
}
// test shutting down twice
rest.Shutdown(ctx)
}
func TestHTTPSTestEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
httpsrest := testHTTPSAPI(t)
defer rest.Shutdown(ctx)
defer httpsrest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
r := make(map[string]string)
test.MakeGet(t, rest, url(rest)+"/test", &r)
if r["thisis"] != "atest" {
t.Error("expected correct body")
}
}
httpstf := func(t *testing.T, url test.URLFunc) {
r := make(map[string]string)
test.MakeGet(t, httpsrest, url(httpsrest)+"/test", &r)
if r["thisis"] != "atest" {
t.Error("expected correct body")
}
}
test.BothEndpoints(t, tf)
test.HTTPSEndPoint(t, httpstf)
}
func TestAPILogging(t *testing.T) {
ctx := context.Background()
cfg := newDefaultTestConfig(t)
logFile, err := filepath.Abs("http.log")
if err != nil {
t.Fatal(err)
}
cfg.HTTPLogFile = logFile
rest := testAPIwithConfig(t, cfg, "log_enabled")
defer os.Remove(cfg.HTTPLogFile)
info, err := os.Stat(cfg.HTTPLogFile)
if err != nil {
t.Fatal(err)
}
if info.Size() > 0 {
t.Errorf("expected empty log file")
}
id := api.ID{}
test.MakeGet(t, rest, test.HTTPURL(rest)+"/test", &id)
info, err = os.Stat(cfg.HTTPLogFile)
if err != nil {
t.Fatal(err)
}
size1 := info.Size()
if size1 == 0 {
t.Error("did not expect an empty log file")
}
// Restart API and make sure that logs are being appended
rest.Shutdown(ctx)
rest = testAPIwithConfig(t, cfg, "log_enabled")
defer rest.Shutdown(ctx)
test.MakeGet(t, rest, test.HTTPURL(rest)+"/id", &id)
info, err = os.Stat(cfg.HTTPLogFile)
if err != nil {
t.Fatal(err)
}
size2 := info.Size()
if size2 == 0 {
t.Error("did not expect an empty log file")
}
if !(size2 > size1) {
t.Error("logs were not appended")
}
}
func TestNotFoundHandler(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
bytes := make([]byte, 10)
for i := 0; i < 10; i++ {
bytes[i] = byte(65 + rand.Intn(25)) //A=65 and Z = 65+25
}
var errResp api.Error
test.MakePost(t, rest, url(rest)+"/"+string(bytes), []byte{}, &errResp)
if errResp.Code != 404 {
t.Errorf("expected error not found: %+v", errResp)
}
var errResp1 api.Error
test.MakeGet(t, rest, url(rest)+"/"+string(bytes), &errResp1)
if errResp1.Code != 404 {
t.Errorf("expected error not found: %+v", errResp)
}
}
test.BothEndpoints(t, tf)
}
func TestCORS(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
type testcase struct {
method string
path string
}
tf := func(t *testing.T, url test.URLFunc) {
reqHeaders := make(http.Header)
reqHeaders.Set("Origin", "myorigin")
reqHeaders.Set("Access-Control-Request-Headers", "Content-Type")
for _, tc := range []testcase{
{"GET", "/test"},
// testcase{},
} {
reqHeaders.Set("Access-Control-Request-Method", tc.method)
headers := test.MakeOptions(t, rest, url(rest)+tc.path, reqHeaders)
aorigin := headers.Get("Access-Control-Allow-Origin")
amethods := headers.Get("Access-Control-Allow-Methods")
aheaders := headers.Get("Access-Control-Allow-Headers")
acreds := headers.Get("Access-Control-Allow-Credentials")
maxage := headers.Get("Access-Control-Max-Age")
if aorigin != "myorigin" {
t.Error("Bad ACA-Origin:", aorigin)
}
if amethods != tc.method {
t.Error("Bad ACA-Methods:", amethods)
}
if aheaders != "Content-Type" {
t.Error("Bad ACA-Headers:", aheaders)
}
if acreds != "true" {
t.Error("Bad ACA-Credentials:", acreds)
}
if maxage != "600" {
t.Error("Bad AC-Max-Age:", maxage)
}
}
}
test.BothEndpoints(t, tf)
}
type responseChecker func(*http.Response) error
type requestShaper func(*http.Request) error
type httpTestcase struct {
method string
path string
header http.Header
body io.ReadCloser
shaper requestShaper
checker responseChecker
}
func httpStatusCodeChecker(resp *http.Response, expectedStatus int) error {
if resp.StatusCode == expectedStatus {
return nil
}
return fmt.Errorf("unexpected HTTP status code: %d", resp.StatusCode)
}
func assertHTTPStatusIsUnauthoriazed(resp *http.Response) error {
return httpStatusCodeChecker(resp, http.StatusUnauthorized)
}
func assertHTTPStatusIsTooLarge(resp *http.Response) error {
return httpStatusCodeChecker(resp, http.StatusRequestHeaderFieldsTooLarge)
}
func makeHTTPStatusNegatedAssert(checker responseChecker) responseChecker {
return func(resp *http.Response) error {
if checker(resp) == nil {
return fmt.Errorf("unexpected HTTP status code: %d", resp.StatusCode)
}
return nil
}
}
func (tc *httpTestcase) getTestFunction(api *API) test.Func {
return func(t *testing.T, prefixMaker test.URLFunc) {
h := test.MakeHost(t, api)
defer h.Close()
url := prefixMaker(api) + tc.path
c := test.HTTPClient(t, h, test.IsHTTPS(url))
req, err := http.NewRequest(tc.method, url, tc.body)
if err != nil {
t.Fatal("Failed to assemble a HTTP request: ", err)
}
if tc.header != nil {
req.Header = tc.header
}
if tc.shaper != nil {
err := tc.shaper(req)
if err != nil {
t.Fatal("Failed to shape a HTTP request: ", err)
}
}
resp, err := c.Do(req)
if err != nil {
t.Fatal("Failed to make a HTTP request: ", err)
}
if tc.checker != nil {
if err := tc.checker(resp); err != nil {
r, e := httputil.DumpRequest(req, true)
if e != nil {
t.Errorf("Assertion failed with: %q", err)
} else {
t.Errorf("Assertion failed with: %q on request: \n%.100s", err, r)
}
}
}
}
}
func makeBasicAuthRequestShaper(username, password string) requestShaper {
return func(req *http.Request) error {
req.SetBasicAuth(username, password)
return nil
}
}
func makeTokenAuthRequestShaper(token string) requestShaper {
return func(req *http.Request) error {
req.Header.Set("Authorization", "Bearer "+token)
return nil
}
}
func makeLongHeaderShaper(size int) requestShaper {
return func(req *http.Request) error {
for sz := size; sz > 0; sz -= 8 {
req.Header.Add("Foo", "bar")
}
return nil
}
}
func TestBasicAuth(t *testing.T) {
ctx := context.Background()
rest := testAPIwithBasicAuth(t)
defer rest.Shutdown(ctx)
for _, tc := range []httpTestcase{
{},
{
method: "",
path: "",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "POST",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "DELETE",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "HEAD",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "OPTIONS", // Always allowed for CORS
path: "/foo",
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "PUT",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "TRACE",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "CONNECT",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "BAR",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(invalidUserName, invalidUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, invalidUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(invalidUserName, validUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(adminUserName, validUserPassword),
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "POST",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "DELETE",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "BAR",
path: "/foo",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "GET",
path: "/test",
shaper: makeBasicAuthRequestShaper(validUserName, validUserPassword),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
} {
test.BothEndpoints(t, tc.getTestFunction(rest))
}
}
func TestTokenAuth(t *testing.T) {
ctx := context.Background()
rest := testAPIwithBasicAuth(t)
defer rest.Shutdown(ctx)
for _, tc := range []httpTestcase{
{},
{
method: "",
path: "",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "POST",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "DELETE",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "HEAD",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "OPTIONS", // Always allowed for CORS
path: "/foo",
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "PUT",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "TRACE",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "CONNECT",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "BAR",
path: "/foo",
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeTokenAuthRequestShaper(invalidToken),
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeTokenAuthRequestShaper(invalidToken),
checker: assertHTTPStatusIsUnauthoriazed,
},
{
method: "GET",
path: "/foo",
shaper: makeTokenAuthRequestShaper(validToken),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "POST",
path: "/foo",
shaper: makeTokenAuthRequestShaper(validToken),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "DELETE",
path: "/foo",
shaper: makeTokenAuthRequestShaper(validToken),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "BAR",
path: "/foo",
shaper: makeTokenAuthRequestShaper(validToken),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
{
method: "GET",
path: "/test",
shaper: makeTokenAuthRequestShaper(validToken),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsUnauthoriazed),
},
} {
test.BothEndpoints(t, tc.getTestFunction(rest))
}
}
func TestLimitMaxHeaderSize(t *testing.T) {
maxHeaderBytes := 4 * DefaultMaxHeaderBytes
cfg := newTestConfig()
cfg.MaxHeaderBytes = maxHeaderBytes
ctx := context.Background()
rest := testAPIwithConfig(t, cfg, "http with maxHeaderBytes")
defer rest.Shutdown(ctx)
for _, tc := range []httpTestcase{
{
method: "GET",
path: "/foo",
shaper: makeLongHeaderShaper(maxHeaderBytes * 2),
checker: assertHTTPStatusIsTooLarge,
},
{
method: "GET",
path: "/foo",
shaper: makeLongHeaderShaper(maxHeaderBytes / 2),
checker: makeHTTPStatusNegatedAssert(assertHTTPStatusIsTooLarge),
},
} {
test.BothEndpoints(t, tc.getTestFunction(rest))
}
}

View file

@ -1,480 +0,0 @@
package common
import (
"crypto/tls"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"time"
logging "github.com/ipfs/go-log/v2"
crypto "github.com/libp2p/go-libp2p/core/crypto"
peer "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/kelseyhightower/envconfig"
"github.com/rs/cors"
"github.com/ipfs-cluster/ipfs-cluster/config"
)
const minMaxHeaderBytes = 4096
const defaultMaxHeaderBytes = minMaxHeaderBytes
// Config provides common API configuration values and allows to customize its
// behavior. It implements most of the config.ComponentConfig interface
// (except the Default() and ConfigKey() methods). Config should be embedded
// in a Config object that implements the missing methods and sets the
// meta options.
type Config struct {
config.Saver
// These are meta-options and should be set by actual Config
// implementations as early as possible.
DefaultFunc func(*Config) error
ConfigKey string
EnvConfigKey string
Logger *logging.ZapEventLogger
RequestLogger *logging.ZapEventLogger
APIErrorFunc func(err error, status int) error
// Listen address for the HTTP REST API endpoint.
HTTPListenAddr []ma.Multiaddr
// TLS configuration for the HTTP listener
TLS *tls.Config
// pathSSLCertFile is a path to a certificate file used to secure the
// HTTP API endpoint. We track it so we can write it in the JSON.
PathSSLCertFile string
// pathSSLKeyFile is a path to the private key corresponding to the
// SSLKeyFile. We track it so we can write it in the JSON.
PathSSLKeyFile string
// Maximum duration before timing out reading a full request
ReadTimeout time.Duration
// Maximum duration before timing out reading the headers of a request
ReadHeaderTimeout time.Duration
// Maximum duration before timing out write of the response
WriteTimeout time.Duration
// Server-side amount of time a Keep-Alive connection will be
// kept idle before being reused
IdleTimeout time.Duration
// Maximum cumulative size of HTTP request headers in bytes
// accepted by the server
MaxHeaderBytes int
// Listen address for the Libp2p REST API endpoint.
Libp2pListenAddr []ma.Multiaddr
// ID and PrivateKey are used to create a libp2p host if we
// want the API component to do it (not by default).
ID peer.ID
PrivateKey crypto.PrivKey
// BasicAuthCredentials is a map of username-password pairs
// which are authorized to use Basic Authentication
BasicAuthCredentials map[string]string
// HTTPLogFile is path of the file that would save HTTP API logs. If this
// path is empty, HTTP logs would be sent to standard output. This path
// should either be absolute or relative to cluster base directory. Its
// default value is empty.
HTTPLogFile string
// Headers provides customization for the headers returned
// by the API on existing routes.
Headers map[string][]string
// CORS header management
CORSAllowedOrigins []string
CORSAllowedMethods []string
CORSAllowedHeaders []string
CORSExposedHeaders []string
CORSAllowCredentials bool
CORSMaxAge time.Duration
// Tracing flag used to skip tracing specific paths when not enabled.
Tracing bool
}
type jsonConfig struct {
HTTPListenMultiaddress config.Strings `json:"http_listen_multiaddress"`
SSLCertFile string `json:"ssl_cert_file,omitempty"`
SSLKeyFile string `json:"ssl_key_file,omitempty"`
ReadTimeout string `json:"read_timeout"`
ReadHeaderTimeout string `json:"read_header_timeout"`
WriteTimeout string `json:"write_timeout"`
IdleTimeout string `json:"idle_timeout"`
MaxHeaderBytes int `json:"max_header_bytes"`
Libp2pListenMultiaddress config.Strings `json:"libp2p_listen_multiaddress,omitempty"`
ID string `json:"id,omitempty"`
PrivateKey string `json:"private_key,omitempty" hidden:"true"`
BasicAuthCredentials map[string]string `json:"basic_auth_credentials" hidden:"true"`
HTTPLogFile string `json:"http_log_file"`
Headers map[string][]string `json:"headers"`
CORSAllowedOrigins []string `json:"cors_allowed_origins"`
CORSAllowedMethods []string `json:"cors_allowed_methods"`
CORSAllowedHeaders []string `json:"cors_allowed_headers"`
CORSExposedHeaders []string `json:"cors_exposed_headers"`
CORSAllowCredentials bool `json:"cors_allow_credentials"`
CORSMaxAge string `json:"cors_max_age"`
}
// GetHTTPLogPath gets full path of the file where http logs should be
// saved.
func (cfg *Config) GetHTTPLogPath() string {
if filepath.IsAbs(cfg.HTTPLogFile) {
return cfg.HTTPLogFile
}
if cfg.BaseDir == "" {
return ""
}
return filepath.Join(cfg.BaseDir, cfg.HTTPLogFile)
}
// ApplyEnvVars fills in any Config fields found as environment variables.
func (cfg *Config) ApplyEnvVars() error {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return err
}
err = envconfig.Process(cfg.EnvConfigKey, jcfg)
if err != nil {
return err
}
return cfg.applyJSONConfig(jcfg)
}
// Validate makes sure that all fields in this Config have
// working values, at least in appearance.
func (cfg *Config) Validate() error {
if cfg.Logger == nil || cfg.RequestLogger == nil {
return errors.New("config loggers not set")
}
switch {
case cfg.ReadTimeout < 0:
return errors.New(cfg.ConfigKey + ".read_timeout is invalid")
case cfg.ReadHeaderTimeout < 0:
return errors.New(cfg.ConfigKey + ".read_header_timeout is invalid")
case cfg.WriteTimeout < 0:
return errors.New(cfg.ConfigKey + ".write_timeout is invalid")
case cfg.IdleTimeout < 0:
return errors.New(cfg.ConfigKey + ".idle_timeout invalid")
case cfg.MaxHeaderBytes < minMaxHeaderBytes:
return fmt.Errorf(cfg.ConfigKey+".max_header_bytes must be not less then %d", minMaxHeaderBytes)
case cfg.BasicAuthCredentials != nil && len(cfg.BasicAuthCredentials) == 0:
return errors.New(cfg.ConfigKey + ".basic_auth_creds should be null or have at least one entry")
case (cfg.PathSSLCertFile != "" || cfg.PathSSLKeyFile != "") && cfg.TLS == nil:
return errors.New(cfg.ConfigKey + ": missing TLS configuration")
case (cfg.CORSMaxAge < 0):
return errors.New(cfg.ConfigKey + ".cors_max_age is invalid")
}
return cfg.validateLibp2p()
}
func (cfg *Config) validateLibp2p() error {
if cfg.ID != "" || cfg.PrivateKey != nil || len(cfg.Libp2pListenAddr) > 0 {
// if one is set, all should be
if cfg.ID == "" || cfg.PrivateKey == nil || len(cfg.Libp2pListenAddr) == 0 {
return errors.New("all ID, private_key and libp2p_listen_multiaddress should be set")
}
if !cfg.ID.MatchesPrivateKey(cfg.PrivateKey) {
return errors.New(cfg.ConfigKey + ".ID does not match private_key")
}
}
return nil
}
// LoadJSON parses a raw JSON byte slice created by ToJSON() and sets the
// configuration fields accordingly.
func (cfg *Config) LoadJSON(raw []byte) error {
jcfg := &jsonConfig{}
err := json.Unmarshal(raw, jcfg)
if err != nil {
cfg.Logger.Error(cfg.ConfigKey + ": error unmarshaling config")
return err
}
if cfg.DefaultFunc == nil {
return errors.New("default config generation not set. This is a bug")
}
cfg.DefaultFunc(cfg)
return cfg.applyJSONConfig(jcfg)
}
func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
err := cfg.loadHTTPOptions(jcfg)
if err != nil {
return err
}
err = cfg.loadLibp2pOptions(jcfg)
if err != nil {
return err
}
// Other options
cfg.BasicAuthCredentials = jcfg.BasicAuthCredentials
cfg.HTTPLogFile = jcfg.HTTPLogFile
cfg.Headers = jcfg.Headers
return cfg.Validate()
}
func (cfg *Config) loadHTTPOptions(jcfg *jsonConfig) error {
if addresses := jcfg.HTTPListenMultiaddress; len(addresses) > 0 {
cfg.HTTPListenAddr = make([]ma.Multiaddr, 0, len(addresses))
for _, addr := range addresses {
httpAddr, err := ma.NewMultiaddr(addr)
if err != nil {
err = fmt.Errorf("error parsing %s.http_listen_multiaddress: %s", cfg.ConfigKey, err)
return err
}
cfg.HTTPListenAddr = append(cfg.HTTPListenAddr, httpAddr)
}
}
err := cfg.tlsOptions(jcfg)
if err != nil {
return err
}
if jcfg.MaxHeaderBytes == 0 {
cfg.MaxHeaderBytes = defaultMaxHeaderBytes
} else {
cfg.MaxHeaderBytes = jcfg.MaxHeaderBytes
}
// CORS
cfg.CORSAllowedOrigins = jcfg.CORSAllowedOrigins
cfg.CORSAllowedMethods = jcfg.CORSAllowedMethods
cfg.CORSAllowedHeaders = jcfg.CORSAllowedHeaders
cfg.CORSExposedHeaders = jcfg.CORSExposedHeaders
cfg.CORSAllowCredentials = jcfg.CORSAllowCredentials
if jcfg.CORSMaxAge == "" { // compatibility
jcfg.CORSMaxAge = "0s"
}
return config.ParseDurations(
cfg.ConfigKey,
&config.DurationOpt{Duration: jcfg.ReadTimeout, Dst: &cfg.ReadTimeout, Name: "read_timeout"},
&config.DurationOpt{Duration: jcfg.ReadHeaderTimeout, Dst: &cfg.ReadHeaderTimeout, Name: "read_header_timeout"},
&config.DurationOpt{Duration: jcfg.WriteTimeout, Dst: &cfg.WriteTimeout, Name: "write_timeout"},
&config.DurationOpt{Duration: jcfg.IdleTimeout, Dst: &cfg.IdleTimeout, Name: "idle_timeout"},
&config.DurationOpt{Duration: jcfg.CORSMaxAge, Dst: &cfg.CORSMaxAge, Name: "cors_max_age"},
)
}
func (cfg *Config) tlsOptions(jcfg *jsonConfig) error {
cert := jcfg.SSLCertFile
key := jcfg.SSLKeyFile
if cert+key == "" {
return nil
}
cfg.PathSSLCertFile = cert
cfg.PathSSLKeyFile = key
if !filepath.IsAbs(cert) {
cert = filepath.Join(cfg.BaseDir, cert)
}
if !filepath.IsAbs(key) {
key = filepath.Join(cfg.BaseDir, key)
}
cfg.Logger.Debug("baseDir: ", cfg.BaseDir)
cfg.Logger.Debug("cert path: ", cert)
cfg.Logger.Debug("key path: ", key)
tlsCfg, err := newTLSConfig(cert, key)
if err != nil {
return err
}
cfg.TLS = tlsCfg
return nil
}
func (cfg *Config) loadLibp2pOptions(jcfg *jsonConfig) error {
if addresses := jcfg.Libp2pListenMultiaddress; len(addresses) > 0 {
cfg.Libp2pListenAddr = make([]ma.Multiaddr, 0, len(addresses))
for _, addr := range addresses {
libp2pAddr, err := ma.NewMultiaddr(addr)
if err != nil {
err = fmt.Errorf("error parsing %s.libp2p_listen_multiaddress: %s", cfg.ConfigKey, err)
return err
}
cfg.Libp2pListenAddr = append(cfg.Libp2pListenAddr, libp2pAddr)
}
}
if jcfg.PrivateKey != "" {
pkb, err := base64.StdEncoding.DecodeString(jcfg.PrivateKey)
if err != nil {
return fmt.Errorf("error decoding %s.private_key: %s", cfg.ConfigKey, err)
}
pKey, err := crypto.UnmarshalPrivateKey(pkb)
if err != nil {
return fmt.Errorf("error parsing %s.private_key ID: %s", cfg.ConfigKey, err)
}
cfg.PrivateKey = pKey
}
if jcfg.ID != "" {
id, err := peer.Decode(jcfg.ID)
if err != nil {
return fmt.Errorf("error parsing %s.ID: %s", cfg.ConfigKey, err)
}
cfg.ID = id
}
return nil
}
// ToJSON produce a human-friendly JSON representation of the Config
// object.
func (cfg *Config) ToJSON() (raw []byte, err error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return
}
raw, err = config.DefaultJSONMarshal(jcfg)
return
}
func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
// Multiaddress String() may panic
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%s", r)
}
}()
httpAddresses := make([]string, 0, len(cfg.HTTPListenAddr))
for _, addr := range cfg.HTTPListenAddr {
httpAddresses = append(httpAddresses, addr.String())
}
libp2pAddresses := make([]string, 0, len(cfg.Libp2pListenAddr))
for _, addr := range cfg.Libp2pListenAddr {
libp2pAddresses = append(libp2pAddresses, addr.String())
}
jcfg = &jsonConfig{
HTTPListenMultiaddress: httpAddresses,
SSLCertFile: cfg.PathSSLCertFile,
SSLKeyFile: cfg.PathSSLKeyFile,
ReadTimeout: cfg.ReadTimeout.String(),
ReadHeaderTimeout: cfg.ReadHeaderTimeout.String(),
WriteTimeout: cfg.WriteTimeout.String(),
IdleTimeout: cfg.IdleTimeout.String(),
MaxHeaderBytes: cfg.MaxHeaderBytes,
BasicAuthCredentials: cfg.BasicAuthCredentials,
HTTPLogFile: cfg.HTTPLogFile,
Headers: cfg.Headers,
CORSAllowedOrigins: cfg.CORSAllowedOrigins,
CORSAllowedMethods: cfg.CORSAllowedMethods,
CORSAllowedHeaders: cfg.CORSAllowedHeaders,
CORSExposedHeaders: cfg.CORSExposedHeaders,
CORSAllowCredentials: cfg.CORSAllowCredentials,
CORSMaxAge: cfg.CORSMaxAge.String(),
}
if cfg.ID != "" {
jcfg.ID = cfg.ID.String()
}
if cfg.PrivateKey != nil {
pkeyBytes, err := crypto.MarshalPrivateKey(cfg.PrivateKey)
if err == nil {
pKey := base64.StdEncoding.EncodeToString(pkeyBytes)
jcfg.PrivateKey = pKey
}
}
if len(libp2pAddresses) > 0 {
jcfg.Libp2pListenMultiaddress = libp2pAddresses
}
return
}
// CorsOptions returns cors.Options setup from the configured values.
func (cfg *Config) CorsOptions() *cors.Options {
maxAgeSeconds := int(cfg.CORSMaxAge / time.Second)
return &cors.Options{
AllowedOrigins: cfg.CORSAllowedOrigins,
AllowedMethods: cfg.CORSAllowedMethods,
AllowedHeaders: cfg.CORSAllowedHeaders,
ExposedHeaders: cfg.CORSExposedHeaders,
AllowCredentials: cfg.CORSAllowCredentials,
MaxAge: maxAgeSeconds,
Debug: false,
}
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}
// LogWriter returns a writer to write logs to. If a log path is configured,
// it creates a file. Otherwise, uses the given logger.
func (cfg *Config) LogWriter() (io.Writer, error) {
if cfg.HTTPLogFile != "" {
f, err := os.OpenFile(cfg.GetHTTPLogPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
return f, nil
}
return logWriter{
logger: cfg.RequestLogger,
}, nil
}
func newTLSConfig(certFile, keyFile string) (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, errors.New("Error loading TLS certficate/key: " + err.Error())
}
// based on https://github.com/denji/golang-tls
return &tls.Config{
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
Certificates: []tls.Certificate{cert},
}, nil
}

View file

@ -1,335 +0,0 @@
package common
import (
"context"
"encoding/json"
"os"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
types "github.com/ipfs-cluster/ipfs-cluster/api"
crypto "github.com/libp2p/go-libp2p/core/crypto"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
ma "github.com/multiformats/go-multiaddr"
)
// Default testing values
var (
DefaultReadTimeout = 0 * time.Second
DefaultReadHeaderTimeout = 5 * time.Second
DefaultWriteTimeout = 0 * time.Second
DefaultIdleTimeout = 120 * time.Second
DefaultMaxHeaderBytes = minMaxHeaderBytes
DefaultHTTPListenAddrs = []string{"/ip4/127.0.0.1/tcp/9094"}
DefaultHeaders = map[string][]string{}
DefaultCORSAllowedOrigins = []string{"*"}
DefaultCORSAllowedMethods = []string{}
DefaultCORSAllowedHeaders = []string{}
DefaultCORSExposedHeaders = []string{
"Content-Type",
"X-Stream-Output",
"X-Chunked-Output",
"X-Content-Length",
}
DefaultCORSAllowCredentials = true
DefaultCORSMaxAge time.Duration // 0. Means always.
)
func defaultFunc(cfg *Config) error {
// http
addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs))
for _, def := range DefaultHTTPListenAddrs {
httpListen, err := ma.NewMultiaddr(def)
if err != nil {
return err
}
addrs = append(addrs, httpListen)
}
cfg.HTTPListenAddr = addrs
cfg.PathSSLCertFile = ""
cfg.PathSSLKeyFile = ""
cfg.ReadTimeout = DefaultReadTimeout
cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout
cfg.WriteTimeout = DefaultWriteTimeout
cfg.IdleTimeout = DefaultIdleTimeout
cfg.MaxHeaderBytes = DefaultMaxHeaderBytes
// libp2p
cfg.ID = ""
cfg.PrivateKey = nil
cfg.Libp2pListenAddr = nil
// Auth
cfg.BasicAuthCredentials = nil
// Logs
cfg.HTTPLogFile = ""
// Headers
cfg.Headers = DefaultHeaders
cfg.CORSAllowedOrigins = DefaultCORSAllowedOrigins
cfg.CORSAllowedMethods = DefaultCORSAllowedMethods
cfg.CORSAllowedHeaders = DefaultCORSAllowedHeaders
cfg.CORSExposedHeaders = DefaultCORSExposedHeaders
cfg.CORSAllowCredentials = DefaultCORSAllowCredentials
cfg.CORSMaxAge = DefaultCORSMaxAge
return nil
}
var cfgJSON = []byte(`
{
"listen_multiaddress": "/ip4/127.0.0.1/tcp/12122",
"ssl_cert_file": "test/server.crt",
"ssl_key_file": "test/server.key",
"read_timeout": "30s",
"read_header_timeout": "5s",
"write_timeout": "1m0s",
"idle_timeout": "2m0s",
"max_header_bytes": 16384,
"basic_auth_credentials": null,
"http_log_file": "",
"cors_allowed_origins": ["myorigin"],
"cors_allowed_methods": ["GET"],
"cors_allowed_headers": ["X-Custom"],
"cors_exposed_headers": ["X-Chunked-Output"],
"cors_allow_credentials": false,
"cors_max_age": "1s"
}
`)
func newTestConfig() *Config {
cfg := &Config{}
cfg.ConfigKey = "testapi"
cfg.EnvConfigKey = "cluster_testapi"
cfg.Logger = logging.Logger("testapi")
cfg.RequestLogger = logging.Logger("testapilog")
cfg.DefaultFunc = defaultFunc
cfg.APIErrorFunc = func(err error, status int) error {
return types.Error{Code: status, Message: err.Error()}
}
return cfg
}
func newDefaultTestConfig(t *testing.T) *Config {
t.Helper()
cfg := newTestConfig()
if err := defaultFunc(cfg); err != nil {
t.Fatal(err)
}
return cfg
}
func TestLoadEmptyJSON(t *testing.T) {
cfg := newTestConfig()
err := cfg.LoadJSON([]byte(`{}`))
if err != nil {
t.Fatal(err)
}
}
func TestLoadJSON(t *testing.T) {
cfg := newTestConfig()
err := cfg.LoadJSON(cfgJSON)
if err != nil {
t.Fatal(err)
}
if cfg.ReadTimeout != 30*time.Second ||
cfg.WriteTimeout != time.Minute ||
cfg.ReadHeaderTimeout != 5*time.Second ||
cfg.IdleTimeout != 2*time.Minute {
t.Error("error parsing timeouts")
}
j := &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.HTTPListenMultiaddress = []string{"abc"}
tst, _ := json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error decoding listen multiaddress")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.ReadTimeout = "-1"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error in read_timeout")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.BasicAuthCredentials = make(map[string]string)
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error with empty basic auth map")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.SSLCertFile = "abc"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error with TLS configuration")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.ID = "abc"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error with ID")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.Libp2pListenMultiaddress = []string{"abc"}
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error with libp2p address")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.PrivateKey = "abc"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error with private key")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.MaxHeaderBytes = minMaxHeaderBytes - 1
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error with MaxHeaderBytes")
}
}
func TestApplyEnvVars(t *testing.T) {
username := "admin"
password := "thisaintmypassword"
user1 := "user1"
user1pass := "user1passwd"
os.Setenv("CLUSTER_TESTAPI_BASICAUTHCREDENTIALS", username+":"+password+","+user1+":"+user1pass)
cfg := newDefaultTestConfig(t)
err := cfg.ApplyEnvVars()
if err != nil {
t.Fatal(err)
}
if _, ok := cfg.BasicAuthCredentials[username]; !ok {
t.Fatalf("username '%s' not set in BasicAuthCreds map: %v", username, cfg.BasicAuthCredentials)
}
if _, ok := cfg.BasicAuthCredentials[user1]; !ok {
t.Fatalf("username '%s' not set in BasicAuthCreds map: %v", user1, cfg.BasicAuthCredentials)
}
if gotpasswd := cfg.BasicAuthCredentials[username]; gotpasswd != password {
t.Errorf("password not what was set in env var, got: %s, want: %s", gotpasswd, password)
}
if gotpasswd := cfg.BasicAuthCredentials[user1]; gotpasswd != user1pass {
t.Errorf("password not what was set in env var, got: %s, want: %s", gotpasswd, user1pass)
}
}
func TestLibp2pConfig(t *testing.T) {
ctx := context.Background()
cfg := newDefaultTestConfig(t)
priv, pub, err := crypto.GenerateKeyPair(crypto.RSA, 2048)
if err != nil {
t.Fatal(err)
}
pid, err := peer.IDFromPublicKey(pub)
if err != nil {
t.Fatal(err)
}
cfg.ID = pid
cfg.PrivateKey = priv
addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfg.HTTPListenAddr = []ma.Multiaddr{addr}
cfg.Libp2pListenAddr = []ma.Multiaddr{addr}
err = cfg.Validate()
if err != nil {
t.Error(err)
}
cfgJSON, err := cfg.ToJSON()
if err != nil {
t.Fatal(err)
}
err = cfg.LoadJSON(cfgJSON)
if err != nil {
t.Fatal(err)
}
// Test creating a new API with a libp2p config
rest, err := NewAPI(ctx, cfg,
func(c *rpc.Client) []Route { return nil })
if err != nil {
t.Fatal(err)
}
defer rest.Shutdown(ctx)
badPid, _ := peer.Decode("QmTQ6oKHDwFjzr4ihirVCLJe8CxanxD3ZjGRYzubFuNDjE")
cfg.ID = badPid
err = cfg.Validate()
if err == nil {
t.Error("expected id-privkey mismatch")
}
cfg.ID = pid
cfg.PrivateKey = nil
err = cfg.Validate()
if err == nil {
t.Error("expected missing private key error")
}
}
func TestToJSON(t *testing.T) {
cfg := newTestConfig()
cfg.LoadJSON(cfgJSON)
newjson, err := cfg.ToJSON()
if err != nil {
t.Fatal(err)
}
cfg = newTestConfig()
err = cfg.LoadJSON(newjson)
if err != nil {
t.Fatal(err)
}
}
func TestDefault(t *testing.T) {
cfg := newDefaultTestConfig(t)
if cfg.Validate() != nil {
t.Fatal("error validating")
}
err := defaultFunc(cfg)
if err != nil {
t.Fatal(err)
}
cfg.IdleTimeout = -1
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
}

View file

@ -1,296 +0,0 @@
// Package test provides utility methods to test APIs based on the common
// API.
package test
import (
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"reflect"
"strings"
"testing"
"github.com/libp2p/go-libp2p"
p2phttp "github.com/libp2p/go-libp2p-http"
"github.com/libp2p/go-libp2p/core/host"
peerstore "github.com/libp2p/go-libp2p/core/peerstore"
)
var (
// SSLCertFile is the location of the certificate file.
// Used in HTTPClient to set the right certificate when
// creating an HTTPs client. Might need adjusting depending
// on where the tests are running.
SSLCertFile = "test/server.crt"
// ClientOrigin sets the Origin header for requests to this.
ClientOrigin = "myorigin"
)
// ProcessResp puts a response into a given type or fails the test.
func ProcessResp(t *testing.T, httpResp *http.Response, err error, resp interface{}) {
if err != nil {
t.Fatal("error making request: ", err)
}
body, err := io.ReadAll(httpResp.Body)
defer httpResp.Body.Close()
if err != nil {
t.Fatal("error reading body: ", err)
}
if len(body) != 0 {
err = json.Unmarshal(body, resp)
if err != nil {
t.Error(string(body))
t.Fatal("error parsing json: ", err)
}
}
}
// ProcessStreamingResp decodes a streaming response into the given type
// and fails the test on error.
func ProcessStreamingResp(t *testing.T, httpResp *http.Response, err error, resp interface{}, trailerError bool) {
if err != nil {
t.Fatal("error making streaming request: ", err)
}
if httpResp.StatusCode > 399 {
// normal response with error
ProcessResp(t, httpResp, err, resp)
return
}
defer httpResp.Body.Close()
dec := json.NewDecoder(httpResp.Body)
// If we passed a slice we fill it in, otherwise we just decode
// on top of the passed value.
tResp := reflect.TypeOf(resp)
if tResp.Elem().Kind() == reflect.Slice {
vSlice := reflect.MakeSlice(reflect.TypeOf(resp).Elem(), 0, 1000)
vType := tResp.Elem().Elem()
for {
v := reflect.New(vType)
err := dec.Decode(v.Interface())
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
vSlice = reflect.Append(vSlice, v.Elem())
}
reflect.ValueOf(resp).Elem().Set(vSlice)
} else {
for {
err := dec.Decode(resp)
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
}
}
trailerValues := httpResp.Trailer.Values("X-Stream-Error")
if trailerError && len(trailerValues) <= 1 && trailerValues[0] == "" {
t.Error("expected trailer error")
}
if !trailerError && len(trailerValues) >= 2 {
t.Error("got trailer error: ", trailerValues)
}
}
// CheckHeaders checks that all the headers are set to what is expected.
func CheckHeaders(t *testing.T, expected map[string][]string, url string, headers http.Header) {
for k, v := range expected {
if strings.Join(v, ",") != strings.Join(headers[k], ",") {
t.Errorf("%s does not show configured headers: %s", url, k)
}
}
if headers.Get("Content-Type") != "application/json" {
t.Errorf("%s is not application/json", url)
}
if eh := headers.Get("Access-Control-Expose-Headers"); eh == "" {
t.Error("AC-Expose-Headers not set")
}
}
// API represents what an API is to us.
type API interface {
HTTPAddresses() ([]string, error)
Host() host.Host
Headers() map[string][]string
}
// URLFunc is a function that given an API returns a url string.
type URLFunc func(a API) string
// HTTPURL returns the http endpoint of the API.
func HTTPURL(a API) string {
u, _ := a.HTTPAddresses()
return fmt.Sprintf("http://%s", u[0])
}
// P2pURL returns the libp2p endpoint of the API.
func P2pURL(a API) string {
return fmt.Sprintf("libp2p://%s", a.Host().ID().String())
}
// HttpsURL returns the HTTPS endpoint of the API
func httpsURL(a API) string {
u, _ := a.HTTPAddresses()
return fmt.Sprintf("https://%s", u[0])
}
// IsHTTPS returns true if a url string uses HTTPS.
func IsHTTPS(url string) bool {
return strings.HasPrefix(url, "https")
}
// HTTPClient returns a client that supporst both http/https and
// libp2p-tunneled-http.
func HTTPClient(t *testing.T, h host.Host, isHTTPS bool) *http.Client {
tr := &http.Transport{}
if isHTTPS {
certpool := x509.NewCertPool()
cert, err := os.ReadFile(SSLCertFile)
if err != nil {
t.Fatal("error reading cert for https client: ", err)
}
certpool.AppendCertsFromPEM(cert)
tr = &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: certpool,
}}
}
if h != nil {
tr.RegisterProtocol("libp2p", p2phttp.NewTransport(h))
}
return &http.Client{Transport: tr}
}
// MakeHost makes a libp2p host that knows how to talk to the given API.
func MakeHost(t *testing.T, api API) host.Host {
h, err := libp2p.New()
if err != nil {
t.Fatal(err)
}
h.Peerstore().AddAddrs(
api.Host().ID(),
api.Host().Addrs(),
peerstore.PermanentAddrTTL,
)
return h
}
// MakeGet performs a GET request against the API.
func MakeGet(t *testing.T, api API, url string, resp interface{}) {
h := MakeHost(t, api)
defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodGet, url, nil)
req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req)
ProcessResp(t, httpResp, err, resp)
CheckHeaders(t, api.Headers(), url, httpResp.Header)
}
// MakePost performs a POST request against the API with the given body.
func MakePost(t *testing.T, api API, url string, body []byte, resp interface{}) {
MakePostWithContentType(t, api, url, body, "application/json", resp)
}
// MakePostWithContentType performs a POST with the given body and content-type.
func MakePostWithContentType(t *testing.T, api API, url string, body []byte, contentType string, resp interface{}) {
h := MakeHost(t, api)
defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodPost, url, bytes.NewReader(body))
req.Header.Set("Content-Type", contentType)
req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req)
ProcessResp(t, httpResp, err, resp)
CheckHeaders(t, api.Headers(), url, httpResp.Header)
}
// MakeDelete performs a DELETE request against the given API.
func MakeDelete(t *testing.T, api API, url string, resp interface{}) {
h := MakeHost(t, api)
defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodDelete, url, bytes.NewReader([]byte{}))
req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req)
ProcessResp(t, httpResp, err, resp)
CheckHeaders(t, api.Headers(), url, httpResp.Header)
}
// MakeOptions performs an OPTIONS request against the given api.
func MakeOptions(t *testing.T, api API, url string, reqHeaders http.Header) http.Header {
h := MakeHost(t, api)
defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodOptions, url, nil)
req.Header = reqHeaders
httpResp, err := c.Do(req)
ProcessResp(t, httpResp, err, nil)
return httpResp.Header
}
// MakeStreamingPost performs a POST request and uses ProcessStreamingResp
func MakeStreamingPost(t *testing.T, api API, url string, body io.Reader, contentType string, resp interface{}) {
h := MakeHost(t, api)
defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodPost, url, body)
req.Header.Set("Content-Type", contentType)
req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req)
ProcessStreamingResp(t, httpResp, err, resp, false)
CheckHeaders(t, api.Headers(), url, httpResp.Header)
}
// MakeStreamingGet performs a GET request and uses ProcessStreamingResp
func MakeStreamingGet(t *testing.T, api API, url string, resp interface{}, trailerError bool) {
h := MakeHost(t, api)
defer h.Close()
c := HTTPClient(t, h, IsHTTPS(url))
req, _ := http.NewRequest(http.MethodGet, url, nil)
req.Header.Set("Origin", ClientOrigin)
httpResp, err := c.Do(req)
ProcessStreamingResp(t, httpResp, err, resp, trailerError)
CheckHeaders(t, api.Headers(), url, httpResp.Header)
}
// Func is a function that runs a test with a given URL.
type Func func(t *testing.T, url URLFunc)
// BothEndpoints runs a test.Func against the http and p2p endpoints.
func BothEndpoints(t *testing.T, test Func) {
t.Run("in-parallel", func(t *testing.T) {
t.Run("http", func(t *testing.T) {
t.Parallel()
test(t, HTTPURL)
})
t.Run("libp2p", func(t *testing.T) {
t.Parallel()
test(t, P2pURL)
})
})
}
// HTTPSEndPoint runs the given test.Func against an HTTPs endpoint.
func HTTPSEndPoint(t *testing.T, test Func) {
t.Run("in-parallel", func(t *testing.T) {
t.Run("https", func(t *testing.T) {
t.Parallel()
test(t, httpsURL)
})
})
}

View file

@ -1,24 +0,0 @@
-----BEGIN CERTIFICATE-----
MIID7TCCAtWgAwIBAgIJAMqpHdKRMzMLMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
VQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8xDzANBgNVBAcMBmdvbGRlbjEMMAoG
A1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3IgNzEMMAoGA1UEAwwDQm9iMSAwHgYJ
KoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9yZzAeFw0xNzA3MjExNjA5NTlaFw0y
NzA3MTkxNjA5NTlaMIGCMQswCQYDVQQGEwJVUzERMA8GA1UECAwIQ29sb3JhZG8x
DzANBgNVBAcMBmdvbGRlbjEMMAoGA1UECgwDQ1NNMREwDwYDVQQLDAhTZWN0b3Ig
NzEMMAoGA1UEAwwDQm9iMSAwHgYJKoZIhvcNAQkBFhFtaW5pc3RlckBtb3N3Lm9y
ZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALuoP8PehGItmKPi3+8S
IV1qz8C3FiK85X/INxYLjyuzvpmDROtlkOvdmPCJrveKDZF7ECQpwIGApFbnKCCW
3zdOPQmAVzm4N8bvnzFtM9mTm8qKb9SwRi6ZLZ/qXo98t8C7CV6FaNKUkIw0lUes
ZiXEcmknrlPy3svaDQVoSOH8L38d0g4geqiNrMmZDaGe8FAYdpCoeYDIm/u0Ag9y
G3+XAbETxWhkfTyH3XcQ/Izg0wG9zFY8y/fyYwC+C7+xF75x4gbIzHAY2iFS2ua7
GTKa2GZhOXtMuzJ6cf+TZW460Z+O+PkA1aH01WrGL7iCW/6Cn9gPRKL+IP6iyDnh
9HMCAwEAAaNkMGIwDwYDVR0RBAgwBocEfwAAATAdBgNVHQ4EFgQU9mXv8mv/LlAa
jwr8X9hzk52cBagwHwYDVR0jBBgwFoAU9mXv8mv/LlAajwr8X9hzk52cBagwDwYD
VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAIxqpKYzF6A9RlLso0lkF
nYfcyeVAvi03IBdiTNnpOe6ROa4gNwKH/JUJMCRDPzm/x78+srCmrcCCAJJTcqgi
b84vq3DegGPg2NXbn9qVUA1SdiXFelqMFwLitDn2KKizihEN4L5PEArHuDaNvLI+
kMr+yZSALWTdtfydj211c7hTBvFqO8l5MYDXCmfoS9sqniorlNHIaBim/SNfDsi6
8hAhvfRvk3e6dPjAPrIZYdQR5ROGewtD4F/anXgKY2BmBtWwd6gbGeMnnVi1SGRP
0UHc4O9aq9HrAOFL/72WVk/kyyPyJ/GtSaPYL1OFS12R/l0hNi+pER7xDtLOVHO2
iw==
-----END CERTIFICATE-----

View file

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAu6g/w96EYi2Yo+Lf7xIhXWrPwLcWIrzlf8g3FguPK7O+mYNE
62WQ692Y8Imu94oNkXsQJCnAgYCkVucoIJbfN049CYBXObg3xu+fMW0z2ZObyopv
1LBGLpktn+pej3y3wLsJXoVo0pSQjDSVR6xmJcRyaSeuU/Ley9oNBWhI4fwvfx3S
DiB6qI2syZkNoZ7wUBh2kKh5gMib+7QCD3Ibf5cBsRPFaGR9PIfddxD8jODTAb3M
VjzL9/JjAL4Lv7EXvnHiBsjMcBjaIVLa5rsZMprYZmE5e0y7Mnpx/5NlbjrRn474
+QDVofTVasYvuIJb/oKf2A9Eov4g/qLIOeH0cwIDAQABAoIBAAOYreArG45mIU7C
wlfqmQkZSvH+kEYKKLvSMnwRrKTBxR1cDq4UPDrI/G1ftiK4Wpo3KZAH3NCejoe7
1mEJgy2kKjdMZl+M0ETXws1Hsn6w/YNcM9h3qGCsPtuZukY1ta/T5dIR7HhcsIh/
WX0OKMcAhNDPGeAx/2MYwrcf0IXELx0+eP1fuBllkajH14J8+ZkVrBMDhqppn8Iq
f9poVNQliJtN7VkL6lJ60HwoVNGEhFaOYphn3CR/sCc6xl+/CzV4h6c5X/RIUfDs
kjgl9mlPFuWq9S19Z+XVfLSE+sYd6LDrh0IZEx9s0OfOjucH2bUAuKNDnCq0wW70
FzH6KoECgYEA4ZOcAMgujk8goL8nleNjuEq7d8pThAsuAy5vq9oyol8oe+p1pXHR
SHP6wHyhXeTS5g1Ej+QV6f0v9gVFS2pFqTXymc9Gxald3trcnheodZXx63YbxHm2
H7mYWyZvq05A0qRLmmqCoSRJHUOkH2wVqgj9KsVYP1anIhdykbycansCgYEA1Pdp
uAfWt/GLZ7B0q3JPlVvusf97wBIUcoaxLHGKopvfsaFp0EY3NRxLSTaZ0NPOxTHh
W6xaIlBmKllyt6q8W609A8hrXayV1yYnVE44b5UEMhVlfRFeEdf9Sp4YdQJ8r1J0
QA89jHCjf8VocP5pSJz5tXvWHhmaotXBthFgWGkCgYEAiy7dwenCOBKAqk5n6Wb9
X3fVBguzzjRrtpDPXHTsax1VyGeZIXUB0bemD2CW3G1U55dmJ3ZvQwnyrtT/tZGj
280qnFa1bz6aaegW2gD082CKfWNJrMgAZMDKTeuAWW2WN6Ih9+wiH7VY25Kh0LWL
BHg5ZUuQsLwRscpP6bY7uMMCgYEAwY23hK2DJZyfEXcbIjL7R4jNMPM82nzUHp5x
6i2rTUyTitJj5Anc5SU4+2pnc5b9RtWltva22Jbvs6+mBm1jUYLqgESn5/QSHv8r
IYER47+wl4BAw+GD+H2wVB/JpJbFEWbEBvCTBM/emSKmYIOo1njsrlfFa4fjtfjG
XJ4ATXkCgYEAzeSrCCVrfPMLCmOijIYD1F7TMFthosW2JJie3bcHZMu2QEM8EIif
YzkUvMaDAXJ4VniTHkDf3ubRoUi3DwLbvJIPnoOlx3jmzz6KYiEd+uXx40Yrebb0
V9GB2S2q1RY7wsFoCqT/mq8usQkjr3ulYMJqeIWnCTWgajXWqAHH/Mw=
-----END RSA PRIVATE KEY-----

View file

@ -1,344 +0,0 @@
package ipfsproxy
import (
"encoding/json"
"errors"
"fmt"
"path/filepath"
"time"
"github.com/kelseyhightower/envconfig"
ma "github.com/multiformats/go-multiaddr"
"github.com/ipfs-cluster/ipfs-cluster/config"
)
const (
configKey = "ipfsproxy"
envConfigKey = "cluster_ipfsproxy"
minMaxHeaderBytes = 4096
)
// DefaultListenAddrs contains the default listeners for the proxy.
var DefaultListenAddrs = []string{
"/ip4/127.0.0.1/tcp/9095",
}
// Default values for Config.
const (
DefaultNodeAddr = "/ip4/127.0.0.1/tcp/5001"
DefaultNodeHTTPS = false
DefaultReadTimeout = 0
DefaultReadHeaderTimeout = 5 * time.Second
DefaultWriteTimeout = 0
DefaultIdleTimeout = 60 * time.Second
DefaultExtractHeadersPath = "/api/v0/version"
DefaultExtractHeadersTTL = 5 * time.Minute
DefaultMaxHeaderBytes = minMaxHeaderBytes
)
// Config allows to customize behavior of IPFSProxy.
// It implements the config.ComponentConfig interface.
type Config struct {
config.Saver
// Listen parameters for the IPFS Proxy.
ListenAddr []ma.Multiaddr
// Host/Port for the IPFS daemon.
NodeAddr ma.Multiaddr
// Should we talk to the IPFS API over HTTPS? (experimental, untested)
NodeHTTPS bool
// LogFile is path of the file that would save Proxy API logs. If this
// path is empty, logs would be sent to standard output. This path
// should either be absolute or relative to cluster base directory. Its
// default value is empty.
LogFile string
// Maximum duration before timing out reading a full request
ReadTimeout time.Duration
// Maximum duration before timing out reading the headers of a request
ReadHeaderTimeout time.Duration
// Maximum duration before timing out write of the response
WriteTimeout time.Duration
// Maximum cumulative size of HTTP request headers in bytes
// accepted by the server
MaxHeaderBytes int
// Server-side amount of time a Keep-Alive connection will be
// kept idle before being reused
IdleTimeout time.Duration
// A list of custom headers that should be extracted from
// IPFS daemon responses and re-used in responses from hijacked paths.
// This is only useful if the user has configured custom headers
// in the IPFS daemon. CORS-related headers are already
// taken care of by the proxy.
ExtractHeadersExtra []string
// If the user wants to extract some extra custom headers configured
// on the IPFS daemon so that they are used in hijacked responses,
// this request path will be used. Defaults to /version. This will
// trigger a single request to extract those headers and remember them
// for future requests (until TTL expires).
ExtractHeadersPath string
// Establishes how long we should remember extracted headers before we
// refresh them with a new request. 0 means always.
ExtractHeadersTTL time.Duration
// Tracing flag used to skip tracing specific paths when not enabled.
Tracing bool
}
type jsonConfig struct {
ListenMultiaddress config.Strings `json:"listen_multiaddress"`
NodeMultiaddress string `json:"node_multiaddress"`
NodeHTTPS bool `json:"node_https,omitempty"`
LogFile string `json:"log_file"`
ReadTimeout string `json:"read_timeout"`
ReadHeaderTimeout string `json:"read_header_timeout"`
WriteTimeout string `json:"write_timeout"`
IdleTimeout string `json:"idle_timeout"`
MaxHeaderBytes int `json:"max_header_bytes"`
ExtractHeadersExtra []string `json:"extract_headers_extra,omitempty"`
ExtractHeadersPath string `json:"extract_headers_path,omitempty"`
ExtractHeadersTTL string `json:"extract_headers_ttl,omitempty"`
}
// getLogPath gets full path of the file where proxy logs should be
// saved.
func (cfg *Config) getLogPath() string {
if filepath.IsAbs(cfg.LogFile) {
return cfg.LogFile
}
if cfg.BaseDir == "" {
return ""
}
return filepath.Join(cfg.BaseDir, cfg.LogFile)
}
// ConfigKey provides a human-friendly identifier for this type of Config.
func (cfg *Config) ConfigKey() string {
return configKey
}
// Default sets the fields of this Config to sensible default values.
func (cfg *Config) Default() error {
proxy := make([]ma.Multiaddr, 0, len(DefaultListenAddrs))
for _, def := range DefaultListenAddrs {
a, err := ma.NewMultiaddr(def)
if err != nil {
return err
}
proxy = append(proxy, a)
}
node, err := ma.NewMultiaddr(DefaultNodeAddr)
if err != nil {
return err
}
cfg.ListenAddr = proxy
cfg.NodeAddr = node
cfg.LogFile = ""
cfg.ReadTimeout = DefaultReadTimeout
cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout
cfg.WriteTimeout = DefaultWriteTimeout
cfg.IdleTimeout = DefaultIdleTimeout
cfg.ExtractHeadersExtra = nil
cfg.ExtractHeadersPath = DefaultExtractHeadersPath
cfg.ExtractHeadersTTL = DefaultExtractHeadersTTL
cfg.MaxHeaderBytes = DefaultMaxHeaderBytes
return nil
}
// ApplyEnvVars fills in any Config fields found
// as environment variables.
func (cfg *Config) ApplyEnvVars() error {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return err
}
err = envconfig.Process(envConfigKey, jcfg)
if err != nil {
return err
}
return cfg.applyJSONConfig(jcfg)
}
// Validate checks that the fields of this Config have sensible values,
// at least in appearance.
func (cfg *Config) Validate() error {
var err error
if len(cfg.ListenAddr) == 0 {
err = errors.New("ipfsproxy.listen_multiaddress not set")
}
if cfg.NodeAddr == nil {
err = errors.New("ipfsproxy.node_multiaddress not set")
}
if cfg.ReadTimeout < 0 {
err = errors.New("ipfsproxy.read_timeout is invalid")
}
if cfg.ReadHeaderTimeout < 0 {
err = errors.New("ipfsproxy.read_header_timeout is invalid")
}
if cfg.WriteTimeout < 0 {
err = errors.New("ipfsproxy.write_timeout is invalid")
}
if cfg.IdleTimeout < 0 {
err = errors.New("ipfsproxy.idle_timeout invalid")
}
if cfg.ExtractHeadersPath == "" {
err = errors.New("ipfsproxy.extract_headers_path should not be empty")
}
if cfg.ExtractHeadersTTL < 0 {
err = errors.New("ipfsproxy.extract_headers_ttl is invalid")
}
if cfg.MaxHeaderBytes < minMaxHeaderBytes {
err = fmt.Errorf("ipfsproxy.max_header_size must be greater or equal to %d", minMaxHeaderBytes)
}
return err
}
// LoadJSON parses a JSON representation of this Config as generated by ToJSON.
func (cfg *Config) LoadJSON(raw []byte) error {
jcfg := &jsonConfig{}
err := json.Unmarshal(raw, jcfg)
if err != nil {
logger.Error("Error unmarshaling ipfsproxy config")
return err
}
err = cfg.Default()
if err != nil {
return fmt.Errorf("error setting config to default values: %s", err)
}
return cfg.applyJSONConfig(jcfg)
}
func (cfg *Config) applyJSONConfig(jcfg *jsonConfig) error {
if addresses := jcfg.ListenMultiaddress; len(addresses) > 0 {
cfg.ListenAddr = make([]ma.Multiaddr, 0, len(addresses))
for _, a := range addresses {
proxyAddr, err := ma.NewMultiaddr(a)
if err != nil {
return fmt.Errorf("error parsing proxy listen_multiaddress: %s", err)
}
cfg.ListenAddr = append(cfg.ListenAddr, proxyAddr)
}
}
if jcfg.NodeMultiaddress != "" {
nodeAddr, err := ma.NewMultiaddr(jcfg.NodeMultiaddress)
if err != nil {
return fmt.Errorf("error parsing ipfs node_multiaddress: %s", err)
}
cfg.NodeAddr = nodeAddr
}
config.SetIfNotDefault(jcfg.NodeHTTPS, &cfg.NodeHTTPS)
config.SetIfNotDefault(jcfg.LogFile, &cfg.LogFile)
err := config.ParseDurations(
"ipfsproxy",
&config.DurationOpt{Duration: jcfg.ReadTimeout, Dst: &cfg.ReadTimeout, Name: "read_timeout"},
&config.DurationOpt{Duration: jcfg.ReadHeaderTimeout, Dst: &cfg.ReadHeaderTimeout, Name: "read_header_timeout"},
&config.DurationOpt{Duration: jcfg.WriteTimeout, Dst: &cfg.WriteTimeout, Name: "write_timeout"},
&config.DurationOpt{Duration: jcfg.IdleTimeout, Dst: &cfg.IdleTimeout, Name: "idle_timeout"},
&config.DurationOpt{Duration: jcfg.ExtractHeadersTTL, Dst: &cfg.ExtractHeadersTTL, Name: "extract_header_ttl"},
)
if err != nil {
return err
}
if jcfg.MaxHeaderBytes == 0 {
cfg.MaxHeaderBytes = DefaultMaxHeaderBytes
} else {
cfg.MaxHeaderBytes = jcfg.MaxHeaderBytes
}
if extra := jcfg.ExtractHeadersExtra; len(extra) > 0 {
cfg.ExtractHeadersExtra = extra
}
config.SetIfNotDefault(jcfg.ExtractHeadersPath, &cfg.ExtractHeadersPath)
return cfg.Validate()
}
// ToJSON generates a human-friendly JSON representation of this Config.
func (cfg *Config) ToJSON() (raw []byte, err error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return
}
raw, err = config.DefaultJSONMarshal(jcfg)
return
}
func (cfg *Config) toJSONConfig() (jcfg *jsonConfig, err error) {
// Multiaddress String() may panic
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%s", r)
}
}()
jcfg = &jsonConfig{}
addresses := make([]string, 0, len(cfg.ListenAddr))
for _, a := range cfg.ListenAddr {
addresses = append(addresses, a.String())
}
// Set all configuration fields
jcfg.ListenMultiaddress = addresses
jcfg.NodeMultiaddress = cfg.NodeAddr.String()
jcfg.ReadTimeout = cfg.ReadTimeout.String()
jcfg.ReadHeaderTimeout = cfg.ReadHeaderTimeout.String()
jcfg.WriteTimeout = cfg.WriteTimeout.String()
jcfg.IdleTimeout = cfg.IdleTimeout.String()
jcfg.MaxHeaderBytes = cfg.MaxHeaderBytes
jcfg.NodeHTTPS = cfg.NodeHTTPS
jcfg.LogFile = cfg.LogFile
jcfg.ExtractHeadersExtra = cfg.ExtractHeadersExtra
if cfg.ExtractHeadersPath != DefaultExtractHeadersPath {
jcfg.ExtractHeadersPath = cfg.ExtractHeadersPath
}
if ttl := cfg.ExtractHeadersTTL; ttl != DefaultExtractHeadersTTL {
jcfg.ExtractHeadersTTL = ttl.String()
}
return
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toJSONConfig()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}

View file

@ -1,158 +0,0 @@
package ipfsproxy
import (
"encoding/json"
"os"
"testing"
"time"
)
var cfgJSON = []byte(`
{
"listen_multiaddress": "/ip4/127.0.0.1/tcp/9095",
"node_multiaddress": "/ip4/127.0.0.1/tcp/5001",
"log_file": "",
"read_timeout": "10m0s",
"read_header_timeout": "5s",
"write_timeout": "10m0s",
"idle_timeout": "1m0s",
"max_header_bytes": 16384,
"extract_headers_extra": [],
"extract_headers_path": "/api/v0/version",
"extract_headers_ttl": "5m"
}
`)
func TestLoadEmptyJSON(t *testing.T) {
cfg := &Config{}
err := cfg.LoadJSON([]byte(`{}`))
if err != nil {
t.Fatal(err)
}
}
func TestLoadJSON(t *testing.T) {
cfg := &Config{}
err := cfg.LoadJSON(cfgJSON)
if err != nil {
t.Fatal(err)
}
j := &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.ListenMultiaddress = []string{"abc"}
tst, _ := json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error decoding listen_multiaddress")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.NodeMultiaddress = "abc"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error in node_multiaddress")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.ReadTimeout = "-aber"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error in read_timeout")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.ExtractHeadersTTL = "-10"
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error in extract_headers_ttl")
}
j = &jsonConfig{}
json.Unmarshal(cfgJSON, j)
j.MaxHeaderBytes = minMaxHeaderBytes - 1
tst, _ = json.Marshal(j)
err = cfg.LoadJSON(tst)
if err == nil {
t.Error("expected error in extract_headers_ttl")
}
}
func TestToJSON(t *testing.T) {
cfg := &Config{}
cfg.LoadJSON(cfgJSON)
newjson, err := cfg.ToJSON()
if err != nil {
t.Fatal(err)
}
cfg = &Config{}
err = cfg.LoadJSON(newjson)
if err != nil {
t.Fatal(err)
}
}
func TestDefault(t *testing.T) {
cfg := &Config{}
cfg.Default()
if cfg.Validate() != nil {
t.Fatal("error validating")
}
cfg.NodeAddr = nil
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ListenAddr = nil
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ReadTimeout = -1
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ReadHeaderTimeout = -2
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.IdleTimeout = -1
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.WriteTimeout = -3
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ExtractHeadersPath = ""
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
}
func TestApplyEnvVars(t *testing.T) {
os.Setenv("CLUSTER_IPFSPROXY_IDLETIMEOUT", "22s")
cfg := &Config{}
cfg.Default()
cfg.ApplyEnvVars()
if cfg.IdleTimeout != 22*time.Second {
t.Error("failed to override idle_timeout with env var")
}
}

View file

@ -1,193 +0,0 @@
package ipfsproxy
import (
"fmt"
"net/http"
"time"
"github.com/ipfs-cluster/ipfs-cluster/version"
)
// This file has the collection of header-related functions
// We will extract all these from a pre-flight OPTIONs request to IPFS to
// use in the respose of a hijacked request (usually POST).
var corsHeaders = []string{
// These two must be returned as IPFS would return them
// for a request with the same origin.
"Access-Control-Allow-Origin",
"Vary", // seems more correctly set in OPTIONS than other requests.
// This is returned by OPTIONS so we can take it, even if ipfs sets
// it for nothing by default.
"Access-Control-Allow-Credentials",
// Unfortunately this one should not come with OPTIONS by default,
// but only with the real request itself.
// We use extractHeadersDefault for it, even though I think
// IPFS puts it in OPTIONS responses too. In any case, ipfs
// puts it on all requests as of 0.4.18, so it should be OK.
// "Access-Control-Expose-Headers",
// Only for preflight responses, we do not need
// these since we will simply proxy OPTIONS requests and not
// handle them.
//
// They are here for reference about other CORS related headers.
// "Access-Control-Max-Age",
// "Access-Control-Allow-Methods",
// "Access-Control-Allow-Headers",
}
// This can be used to hardcode header extraction from the proxy if we ever
// need to. It is appended to config.ExtractHeaderExtra.
// Maybe "X-Ipfs-Gateway" is a good candidate.
var extractHeadersDefault = []string{
"Access-Control-Expose-Headers",
}
const ipfsHeadersTimestampKey = "proxyHeadersTS"
// ipfsHeaders returns all the headers we want to extract-once from IPFS: a
// concatenation of extractHeadersDefault and config.ExtractHeadersExtra.
func (proxy *Server) ipfsHeaders() []string {
return append(extractHeadersDefault, proxy.config.ExtractHeadersExtra...)
}
// rememberIPFSHeaders extracts headers and stores them for re-use with
// setIPFSHeaders.
func (proxy *Server) rememberIPFSHeaders(hdrs http.Header) {
for _, h := range proxy.ipfsHeaders() {
proxy.ipfsHeadersStore.Store(h, hdrs[h])
}
// use the sync map to store the ts
proxy.ipfsHeadersStore.Store(ipfsHeadersTimestampKey, time.Now())
}
// returns whether we can consider that whatever headers we are
// storing have a valid TTL still.
func (proxy *Server) headersWithinTTL() bool {
ttl := proxy.config.ExtractHeadersTTL
if ttl == 0 {
return true
}
tsRaw, ok := proxy.ipfsHeadersStore.Load(ipfsHeadersTimestampKey)
if !ok {
return false
}
ts, ok := tsRaw.(time.Time)
if !ok {
return false
}
lifespan := time.Since(ts)
return lifespan < ttl
}
// setIPFSHeaders adds the known IPFS Headers to the destination
// and returns true if we could set all the headers in the list and
// the TTL has not expired.
// False is used to determine if we need to make a request to try
// to extract these headers.
func (proxy *Server) setIPFSHeaders(dest http.Header) bool {
r := true
if !proxy.headersWithinTTL() {
r = false
// still set those headers we can set in the destination.
// We do our best there, since maybe the ipfs daemon
// is down and what we have now is all we can use.
}
for _, h := range proxy.ipfsHeaders() {
v, ok := proxy.ipfsHeadersStore.Load(h)
if !ok {
r = false
continue
}
dest[h] = v.([]string)
}
return r
}
// copyHeadersFromIPFSWithRequest makes a request to IPFS as used by the proxy
// and copies the given list of hdrs from the response to the dest http.Header
// object.
func (proxy *Server) copyHeadersFromIPFSWithRequest(
hdrs []string,
dest http.Header, req *http.Request,
) error {
res, err := proxy.ipfsRoundTripper.RoundTrip(req)
if err != nil {
logger.Error("error making request for header extraction to ipfs: ", err)
return err
}
for _, h := range hdrs {
dest[h] = res.Header[h]
}
return nil
}
// setHeaders sets some headers for all hijacked endpoints:
// - First, we fix CORs headers by making an OPTIONS request to IPFS with the
// same Origin. Our objective is to get headers for non-preflight requests
// only (the ones we hijack).
// - Second, we add any of the one-time-extracted headers that we deem necessary
// or the user needs from IPFS (in case of custom headers).
// This may trigger a single POST request to ExtractHeaderPath if they
// were not extracted before or TTL has expired.
// - Third, we set our own headers.
func (proxy *Server) setHeaders(dest http.Header, srcRequest *http.Request) {
proxy.setCORSHeaders(dest, srcRequest)
proxy.setAdditionalIpfsHeaders(dest, srcRequest)
proxy.setClusterProxyHeaders(dest, srcRequest)
}
// see setHeaders
func (proxy *Server) setCORSHeaders(dest http.Header, srcRequest *http.Request) {
// Fix CORS headers by making an OPTIONS request
// The request URL only has a valid Path(). See http.Request docs.
srcURL := fmt.Sprintf("%s%s", proxy.nodeAddr, srcRequest.URL.Path)
req, err := http.NewRequest(http.MethodOptions, srcURL, nil)
if err != nil { // this should really not happen.
logger.Error(err)
return
}
req.Header["Origin"] = srcRequest.Header["Origin"]
req.Header.Set("Access-Control-Request-Method", srcRequest.Method)
// error is logged. We proceed if request failed.
proxy.copyHeadersFromIPFSWithRequest(corsHeaders, dest, req)
}
// see setHeaders
func (proxy *Server) setAdditionalIpfsHeaders(dest http.Header, srcRequest *http.Request) {
// Avoid re-requesting these if we have them
if ok := proxy.setIPFSHeaders(dest); ok {
return
}
srcURL := fmt.Sprintf("%s%s", proxy.nodeAddr, proxy.config.ExtractHeadersPath)
req, err := http.NewRequest(http.MethodPost, srcURL, nil)
if err != nil {
logger.Error("error extracting additional headers from ipfs", err)
return
}
// error is logged. We proceed if request failed.
proxy.copyHeadersFromIPFSWithRequest(
proxy.ipfsHeaders(),
dest,
req,
)
proxy.rememberIPFSHeaders(dest)
}
// see setHeaders
func (proxy *Server) setClusterProxyHeaders(dest http.Header, srcRequest *http.Request) {
dest.Set("Content-Type", "application/json")
dest.Set("Server", fmt.Sprintf("ipfs-cluster/ipfsproxy/%s", version.Version))
}

View file

@ -1,819 +0,0 @@
// Package ipfsproxy implements the Cluster API interface by providing an
// IPFS HTTP interface as exposed by the go-ipfs daemon.
//
// In this API, select endpoints like pin*, add*, and repo* endpoints are used
// to instead perform cluster operations. Requests for any other endpoints are
// passed to the underlying IPFS daemon.
package ipfsproxy
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/ipfs-cluster/ipfs-cluster/adder/adderutils"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/rpcutil"
"github.com/tv42/httpunix"
handlers "github.com/gorilla/handlers"
mux "github.com/gorilla/mux"
cid "github.com/ipfs/go-cid"
cmd "github.com/ipfs/go-ipfs-cmds"
logging "github.com/ipfs/go-log/v2"
path "github.com/ipfs/go-path"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
"github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/trace"
)
// DNSTimeout is used when resolving DNS multiaddresses in this module
var DNSTimeout = 5 * time.Second
var (
logger = logging.Logger("ipfsproxy")
proxyLogger = logging.Logger("ipfsproxylog")
)
// Server offers an IPFS API, hijacking some interesting requests
// and forwarding the rest to the ipfs daemon
// it proxies HTTP requests to the configured IPFS
// daemon. It is able to intercept these requests though, and
// perform extra operations on them.
type Server struct {
ctx context.Context
cancel func()
config *Config
nodeScheme string
nodeAddr string
rpcClient *rpc.Client
rpcReady chan struct{}
listeners []net.Listener // proxy listener
server *http.Server // proxy server
ipfsRoundTripper http.RoundTripper // allows to talk to IPFS
ipfsHeadersStore sync.Map
shutdownLock sync.Mutex
shutdown bool
wg sync.WaitGroup
}
type ipfsPinType struct {
Type string
}
type ipfsPinLsResp struct {
Keys map[string]ipfsPinType
}
type ipfsPinOpResp struct {
Pins []string
}
// From https://github.com/ipfs/go-ipfs/blob/master/core/coreunix/add.go#L49
type ipfsAddResp struct {
Name string
Hash string `json:",omitempty"`
Bytes int64 `json:",omitempty"`
Size string `json:",omitempty"`
}
type logWriter struct {
}
func (lw logWriter) Write(b []byte) (int, error) {
proxyLogger.Infof(string(b))
return len(b), nil
}
// New returns and ipfs Proxy component
func New(cfg *Config) (*Server, error) {
err := cfg.Validate()
if err != nil {
return nil, err
}
nodeMAddr := cfg.NodeAddr
// dns multiaddresses need to be resolved first
if madns.Matches(nodeMAddr) {
ctx, cancel := context.WithTimeout(context.Background(), DNSTimeout)
defer cancel()
resolvedAddrs, err := madns.Resolve(ctx, cfg.NodeAddr)
if err != nil {
logger.Error(err)
return nil, err
}
nodeMAddr = resolvedAddrs[0]
}
_, nodeAddr, err := manet.DialArgs(nodeMAddr)
if err != nil {
return nil, err
}
nodeScheme := "http"
if cfg.NodeHTTPS {
nodeScheme = "https"
}
isUnixSocket := false
var unixTransport *httpunix.Transport
if unixSocketPath, err := nodeMAddr.ValueForProtocol(multiaddr.P_UNIX); err == nil {
unixTransport = &httpunix.Transport{}
unixTransport.RegisterLocation("ipfsproxyunix", unixSocketPath)
nodeAddr = "ipfsproxyunix"
nodeScheme = nodeScheme + "+unix"
isUnixSocket = true
}
var listeners []net.Listener
for _, addr := range cfg.ListenAddr {
proxyNet, proxyAddr, err := manet.DialArgs(addr)
if err != nil {
return nil, err
}
l, err := net.Listen(proxyNet, proxyAddr)
if err != nil {
return nil, err
}
listeners = append(listeners, l)
}
nodeHTTPAddr := fmt.Sprintf("%s://%s", nodeScheme, nodeAddr)
proxyURL, err := url.Parse(nodeHTTPAddr)
if err != nil {
return nil, err
}
var handler http.Handler
router := mux.NewRouter()
handler = router
if cfg.Tracing {
handler = &ochttp.Handler{
IsPublicEndpoint: true,
Propagation: &tracecontext.HTTPFormat{},
Handler: router,
StartOptions: trace.StartOptions{SpanKind: trace.SpanKindServer},
FormatSpanName: func(req *http.Request) string {
return "proxy:" + req.Host + ":" + req.URL.Path + ":" + req.Method
},
}
}
var writer io.Writer
if cfg.LogFile != "" {
f, err := os.OpenFile(cfg.getLogPath(), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
writer = f
} else {
writer = logWriter{}
}
s := &http.Server{
ReadTimeout: cfg.ReadTimeout,
WriteTimeout: cfg.WriteTimeout,
ReadHeaderTimeout: cfg.ReadHeaderTimeout,
IdleTimeout: cfg.IdleTimeout,
Handler: handlers.LoggingHandler(writer, handler),
MaxHeaderBytes: cfg.MaxHeaderBytes,
}
// See: https://github.com/ipfs/go-ipfs/issues/5168
// See: https://github.com/ipfs-cluster/ipfs-cluster/issues/548
// on why this is re-enabled.
s.SetKeepAlivesEnabled(true) // A reminder that this can be changed
reverseProxy := httputil.NewSingleHostReverseProxy(proxyURL)
if isUnixSocket {
t := &http.Transport{}
t.RegisterProtocol(httpunix.Scheme, unixTransport)
reverseProxy.Transport = t
} else {
reverseProxy.Transport = http.DefaultTransport
}
ctx, cancel := context.WithCancel(context.Background())
proxy := &Server{
ctx: ctx,
config: cfg,
cancel: cancel,
nodeAddr: nodeHTTPAddr,
nodeScheme: nodeScheme,
rpcReady: make(chan struct{}, 1),
listeners: listeners,
server: s,
ipfsRoundTripper: reverseProxy.Transport,
}
// Ideally, we should only intercept POST requests, but
// people may be calling the API with GET or worse, PUT
// because IPFS has been allowing this traditionally.
// The main idea here is that we do not intercept
// OPTIONS requests (or HEAD).
hijackSubrouter := router.
Methods(http.MethodPost, http.MethodGet, http.MethodPut).
PathPrefix("/api/v0").
Subrouter()
// Add hijacked routes
hijackSubrouter.
Path("/pin/add/{arg}").
HandlerFunc(slashHandler(proxy.pinHandler)).
Name("PinAddSlash") // supports people using the API wrong.
hijackSubrouter.
Path("/pin/add").
HandlerFunc(proxy.pinHandler).
Name("PinAdd")
hijackSubrouter.
Path("/pin/rm/{arg}").
HandlerFunc(slashHandler(proxy.unpinHandler)).
Name("PinRmSlash") // supports people using the API wrong.
hijackSubrouter.
Path("/pin/rm").
HandlerFunc(proxy.unpinHandler).
Name("PinRm")
hijackSubrouter.
Path("/pin/ls/{arg}").
HandlerFunc(slashHandler(proxy.pinLsHandler)).
Name("PinLsSlash") // supports people using the API wrong.
hijackSubrouter.
Path("/pin/ls").
HandlerFunc(proxy.pinLsHandler).
Name("PinLs")
hijackSubrouter.
Path("/pin/update").
HandlerFunc(proxy.pinUpdateHandler).
Name("PinUpdate")
hijackSubrouter.
Path("/add").
HandlerFunc(proxy.addHandler).
Name("Add")
hijackSubrouter.
Path("/repo/stat").
HandlerFunc(proxy.repoStatHandler).
Name("RepoStat")
hijackSubrouter.
Path("/repo/gc").
HandlerFunc(proxy.repoGCHandler).
Name("RepoGC")
// Everything else goes to the IPFS daemon.
router.PathPrefix("/").Handler(reverseProxy)
go proxy.run()
return proxy, nil
}
// SetClient makes the component ready to perform RPC
// requests.
func (proxy *Server) SetClient(c *rpc.Client) {
proxy.rpcClient = c
proxy.rpcReady <- struct{}{}
}
// Shutdown stops any listeners and stops the component from taking
// any requests.
func (proxy *Server) Shutdown(ctx context.Context) error {
proxy.shutdownLock.Lock()
defer proxy.shutdownLock.Unlock()
if proxy.shutdown {
logger.Debug("already shutdown")
return nil
}
logger.Info("stopping IPFS Proxy")
proxy.cancel()
close(proxy.rpcReady)
proxy.server.SetKeepAlivesEnabled(false)
for _, l := range proxy.listeners {
l.Close()
}
proxy.wg.Wait()
proxy.shutdown = true
return nil
}
// launches proxy when we receive the rpcReady signal.
func (proxy *Server) run() {
<-proxy.rpcReady
// Do not shutdown while launching threads
// -- prevents race conditions with proxy.wg.
proxy.shutdownLock.Lock()
defer proxy.shutdownLock.Unlock()
// This launches the proxy
proxy.wg.Add(len(proxy.listeners))
for _, l := range proxy.listeners {
go func(l net.Listener) {
defer proxy.wg.Done()
maddr, err := manet.FromNetAddr(l.Addr())
if err != nil {
logger.Error(err)
}
logger.Infof(
"IPFS Proxy: %s -> %s",
maddr,
proxy.config.NodeAddr,
)
err = proxy.server.Serve(l) // hangs here
if err != nil && !strings.Contains(err.Error(), "closed network connection") {
logger.Error(err)
}
}(l)
}
}
// ipfsErrorResponder writes an http error response just like IPFS would.
func ipfsErrorResponder(w http.ResponseWriter, errMsg string, code int) {
res := cmd.Errorf(cmd.ErrNormal, errMsg)
resBytes, _ := json.Marshal(res)
if code > 0 {
w.WriteHeader(code)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
w.Write(resBytes)
}
func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Request) {
proxy.setHeaders(w.Header(), r)
q := r.URL.Query()
arg := q.Get("arg")
p, err := path.ParsePath(arg)
if err != nil {
ipfsErrorResponder(w, "Error parsing IPFS Path: "+err.Error(), -1)
return
}
pinPath := api.PinPath{Path: p.String()}
pinPath.Mode = api.PinModeFromString(q.Get("type"))
var pin api.Pin
err = proxy.rpcClient.Call(
"",
"Cluster",
op,
pinPath,
&pin,
)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
res := ipfsPinOpResp{
Pins: []string{pin.Cid.String()},
}
resBytes, _ := json.Marshal(res)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
}
func (proxy *Server) pinHandler(w http.ResponseWriter, r *http.Request) {
proxy.pinOpHandler("PinPath", w, r)
}
func (proxy *Server) unpinHandler(w http.ResponseWriter, r *http.Request) {
proxy.pinOpHandler("UnpinPath", w, r)
}
func (proxy *Server) pinLsHandler(w http.ResponseWriter, r *http.Request) {
proxy.setHeaders(w.Header(), r)
arg := r.URL.Query().Get("arg")
stream := false
streamArg := r.URL.Query().Get("stream")
streamArg2 := r.URL.Query().Get("s")
if streamArg == "true" || streamArg2 == "true" {
stream = true
}
if arg != "" {
c, err := api.DecodeCid(arg)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
var pin api.Pin
err = proxy.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PinGet",
c,
&pin,
)
if err != nil {
ipfsErrorResponder(w, fmt.Sprintf("Error: path '%s' is not pinned", arg), -1)
return
}
if stream {
ipinfo := api.IPFSPinInfo{
Cid: api.Cid(pin.Cid),
Type: pin.Mode.ToIPFSPinStatus(),
}
resBytes, _ := json.Marshal(ipinfo)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
} else {
pinLs := ipfsPinLsResp{}
pinLs.Keys = make(map[string]ipfsPinType)
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
resBytes, _ := json.Marshal(pinLs)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
}
} else {
in := make(chan struct{})
close(in)
pins := make(chan api.Pin)
var err error
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
err = proxy.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"Pins",
in,
pins,
)
}()
if stream {
w.Header().Set("Trailer", "X-Stream-Error")
w.WriteHeader(http.StatusOK)
for pin := range pins {
ipinfo := api.IPFSPinInfo{
Cid: api.Cid(pin.Cid),
Type: pin.Mode.ToIPFSPinStatus(),
}
resBytes, _ := json.Marshal(ipinfo)
w.Write(resBytes)
}
wg.Wait()
if err != nil {
w.Header().Add("X-Stream-Error", err.Error())
return
}
} else {
pinLs := ipfsPinLsResp{}
pinLs.Keys = make(map[string]ipfsPinType)
for pin := range pins {
pinLs.Keys[pin.Cid.String()] = ipfsPinType{
Type: "recursive",
}
}
wg.Wait()
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
resBytes, _ := json.Marshal(pinLs)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
}
}
}
func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "ipfsproxy/pinUpdateHandler")
defer span.End()
proxy.setHeaders(w.Header(), r)
// Check that we have enough arguments and mimic ipfs response when not
q := r.URL.Query()
args := q["arg"]
if len(args) == 0 {
ipfsErrorResponder(w, "argument \"from-path\" is required", http.StatusBadRequest)
return
}
if len(args) == 1 {
ipfsErrorResponder(w, "argument \"to-path\" is required", http.StatusBadRequest)
return
}
unpin := !(q.Get("unpin") == "false")
from := args[0]
to := args[1]
// Parse paths (we will need to resolve them)
pFrom, err := path.ParsePath(from)
if err != nil {
ipfsErrorResponder(w, "error parsing \"from-path\" argument: "+err.Error(), -1)
return
}
pTo, err := path.ParsePath(to)
if err != nil {
ipfsErrorResponder(w, "error parsing \"to-path\" argument: "+err.Error(), -1)
return
}
// Resolve the FROM argument
var fromCid api.Cid
err = proxy.rpcClient.CallContext(
ctx,
"",
"IPFSConnector",
"Resolve",
pFrom.String(),
&fromCid,
)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
// Do a PinPath setting PinUpdate
pinPath := api.PinPath{Path: pTo.String()}
pinPath.PinUpdate = fromCid
var pin api.Pin
err = proxy.rpcClient.Call(
"",
"Cluster",
"PinPath",
pinPath,
&pin,
)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
// If unpin != "false", unpin the FROM argument
// (it was already resolved).
var pinObj api.Pin
if unpin {
err = proxy.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Unpin",
api.PinCid(fromCid),
&pinObj,
)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
}
res := ipfsPinOpResp{
Pins: []string{fromCid.String(), pin.Cid.String()},
}
resBytes, _ := json.Marshal(res)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
}
func (proxy *Server) addHandler(w http.ResponseWriter, r *http.Request) {
proxy.setHeaders(w.Header(), r)
reader, err := r.MultipartReader()
if err != nil {
ipfsErrorResponder(w, "error reading request: "+err.Error(), -1)
return
}
q := r.URL.Query()
if q.Get("only-hash") == "true" {
ipfsErrorResponder(w, "only-hash is not supported when adding to cluster", -1)
}
// Luckily, most IPFS add query params are compatible with cluster's
// /add params. We can parse most of them directly from the query.
params, err := api.AddParamsFromQuery(q)
if err != nil {
ipfsErrorResponder(w, "error parsing options:"+err.Error(), -1)
return
}
trickle := q.Get("trickle")
if trickle == "true" {
params.Layout = "trickle"
}
nopin := q.Get("pin") == "false"
if nopin {
params.NoPin = true
}
logger.Warnf("Proxy/add does not support all IPFS params. Current options: %+v", params)
outputTransform := func(in api.AddedOutput) interface{} {
cidStr := ""
if in.Cid.Defined() {
cidStr = in.Cid.String()
}
r := &ipfsAddResp{
Name: in.Name,
Hash: cidStr,
Bytes: int64(in.Bytes),
}
if in.Size != 0 {
r.Size = strconv.FormatUint(in.Size, 10)
}
return r
}
_, err = adderutils.AddMultipartHTTPHandler(
proxy.ctx,
proxy.rpcClient,
params,
reader,
w,
outputTransform,
)
if err != nil {
logger.Error(err)
}
}
func (proxy *Server) repoStatHandler(w http.ResponseWriter, r *http.Request) {
proxy.setHeaders(w.Header(), r)
peers := make([]peer.ID, 0)
err := proxy.rpcClient.Call(
"",
"Consensus",
"Peers",
struct{}{},
&peers,
)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
ctxs, cancels := rpcutil.CtxsWithCancel(proxy.ctx, len(peers))
defer rpcutil.MultiCancel(cancels)
repoStats := make([]*api.IPFSRepoStat, len(peers))
repoStatsIfaces := make([]interface{}, len(repoStats))
for i := range repoStats {
repoStats[i] = &api.IPFSRepoStat{}
repoStatsIfaces[i] = repoStats[i]
}
errs := proxy.rpcClient.MultiCall(
ctxs,
peers,
"IPFSConnector",
"RepoStat",
struct{}{},
repoStatsIfaces,
)
totalStats := api.IPFSRepoStat{}
for i, err := range errs {
if err != nil {
if rpc.IsAuthorizationError(err) {
logger.Debug(err)
continue
}
logger.Errorf("%s repo/stat errored: %s", peers[i], err)
continue
}
totalStats.RepoSize += repoStats[i].RepoSize
totalStats.StorageMax += repoStats[i].StorageMax
}
resBytes, _ := json.Marshal(totalStats)
w.WriteHeader(http.StatusOK)
w.Write(resBytes)
}
type ipfsRepoGCResp struct {
Key cid.Cid `json:",omitempty"`
Error string `json:",omitempty"`
}
func (proxy *Server) repoGCHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
streamErrors := queryValues.Get("stream-errors") == "true"
// ignoring `quiet` since it only affects text output
proxy.setHeaders(w.Header(), r)
w.Header().Set("Trailer", "X-Stream-Error")
var repoGC api.GlobalRepoGC
err := proxy.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RepoGC",
struct{}{},
&repoGC,
)
if err != nil {
ipfsErrorResponder(w, err.Error(), -1)
return
}
w.WriteHeader(http.StatusOK)
enc := json.NewEncoder(w)
var ipfsRepoGC ipfsRepoGCResp
mError := multiError{}
for _, gc := range repoGC.PeerMap {
for _, key := range gc.Keys {
if streamErrors {
ipfsRepoGC = ipfsRepoGCResp{Key: key.Key.Cid, Error: key.Error}
} else {
ipfsRepoGC = ipfsRepoGCResp{Key: key.Key.Cid}
if key.Error != "" {
mError.add(key.Error)
}
}
// Cluster tags start with small letter, but IPFS tags with capital letter.
if err := enc.Encode(ipfsRepoGC); err != nil {
logger.Error(err)
}
}
}
mErrStr := mError.Error()
if !streamErrors && mErrStr != "" {
w.Header().Set("X-Stream-Error", mErrStr)
}
}
// slashHandler returns a handler which converts a /a/b/c/<argument> request
// into an /a/b/c/<argument>?arg=<argument> one. And uses the given origHandler
// for it. Our handlers expect that arguments are passed in the ?arg query
// value.
func slashHandler(origHandler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
warnMsg := "You are using an undocumented form of the IPFS API. "
warnMsg += "Consider passing your command arguments"
warnMsg += "with the '?arg=' query parameter"
logger.Error(warnMsg)
vars := mux.Vars(r)
arg := vars["arg"]
// IF we needed to modify the request path, we could do
// something along these lines. This is not the case
// at the moment. We just need to set the query argument.
//
// route := mux.CurrentRoute(r)
// path, err := route.GetPathTemplate()
// if err != nil {
// // I'd like to panic, but I don' want to kill a full
// // peer just because of a buggy use.
// logger.Critical("BUG: wrong use of slashHandler")
// origHandler(w, r) // proceed as nothing
// return
// }
// fixedPath := strings.TrimSuffix(path, "/{arg}")
// r.URL.Path = url.PathEscape(fixedPath)
// r.URL.RawPath = fixedPath
q := r.URL.Query()
q.Set("arg", arg)
r.URL.RawQuery = q.Encode()
origHandler(w, r)
}
}

View file

@ -1,898 +0,0 @@
package ipfsproxy
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/test"
cmd "github.com/ipfs/go-ipfs-cmds"
logging "github.com/ipfs/go-log/v2"
ma "github.com/multiformats/go-multiaddr"
)
func init() {
_ = logging.Logger
}
func testIPFSProxyWithConfig(t *testing.T, cfg *Config) (*Server, *test.IpfsMock) {
mock := test.NewIpfsMock(t)
nodeMAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d",
mock.Addr, mock.Port))
proxyMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfg.NodeAddr = nodeMAddr
cfg.ListenAddr = []ma.Multiaddr{proxyMAddr}
cfg.ExtractHeadersExtra = []string{
test.IpfsCustomHeaderName,
test.IpfsTimeHeaderName,
}
proxy, err := New(cfg)
if err != nil {
t.Fatal("creating an IPFSProxy should work: ", err)
}
proxy.server.SetKeepAlivesEnabled(false)
proxy.SetClient(test.NewMockRPCClient(t))
return proxy, mock
}
func testIPFSProxy(t *testing.T) (*Server, *test.IpfsMock) {
cfg := &Config{}
cfg.Default()
return testIPFSProxyWithConfig(t, cfg)
}
func TestIPFSProxyVersion(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
res, err := http.Post(fmt.Sprintf("%s/version", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
defer res.Body.Close()
resBytes, _ := io.ReadAll(res.Body)
if res.StatusCode != http.StatusOK {
t.Error("the request should have succeeded")
t.Fatal(string(resBytes))
}
var resp struct {
Version string
}
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
if resp.Version != "m.o.c.k" {
t.Error("wrong version")
}
}
func TestIPFSProxyPin(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
type args struct {
urlPath string
testCid string
statusCode int
}
tests := []struct {
name string
args args
want api.Cid
wantErr bool
}{
{
"pin good cid query arg",
args{
"/pin/add?arg=",
test.Cid1.String(),
http.StatusOK,
},
test.Cid1,
false,
},
{
"pin good path query arg",
args{
"/pin/add?arg=",
test.PathIPFS2,
http.StatusOK,
},
test.CidResolved,
false,
},
{
"pin good cid url arg",
args{
"/pin/add/",
test.Cid1.String(),
http.StatusOK,
},
test.Cid1,
false,
},
{
"pin bad cid query arg",
args{
"/pin/add?arg=",
test.ErrorCid.String(),
http.StatusInternalServerError,
},
api.CidUndef,
true,
},
{
"pin bad cid url arg",
args{
"/pin/add/",
test.ErrorCid.String(),
http.StatusInternalServerError,
},
api.CidUndef,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
u := fmt.Sprintf(
"%s%s%s",
proxyURL(proxy),
tt.args.urlPath,
tt.args.testCid,
)
res, err := http.Post(u, "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res.Body.Close()
if res.StatusCode != tt.args.statusCode {
t.Errorf("statusCode: got = %v, want %v", res.StatusCode, tt.args.statusCode)
}
resBytes, _ := io.ReadAll(res.Body)
switch tt.wantErr {
case false:
var resp ipfsPinOpResp
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
if len(resp.Pins) != 1 {
t.Fatalf("wrong number of pins: got = %d, want %d", len(resp.Pins), 1)
}
if resp.Pins[0] != tt.want.String() {
t.Errorf("wrong pin cid: got = %s, want = %s", resp.Pins[0], tt.want)
}
case true:
var respErr cmd.Error
err = json.Unmarshal(resBytes, &respErr)
if err != nil {
t.Fatal(err)
}
if respErr.Message != test.ErrBadCid.Error() {
t.Errorf("wrong response: got = %s, want = %s", respErr.Message, test.ErrBadCid.Error())
}
}
})
}
}
func TestIPFSProxyUnpin(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
type args struct {
urlPath string
testCid string
statusCode int
}
tests := []struct {
name string
args args
want api.Cid
wantErr bool
}{
{
"unpin good cid query arg",
args{
"/pin/rm?arg=",
test.Cid1.String(),
http.StatusOK,
},
test.Cid1,
false,
},
{
"unpin good path query arg",
args{
"/pin/rm?arg=",
test.PathIPFS2,
http.StatusOK,
},
test.CidResolved,
false,
},
{
"unpin good cid url arg",
args{
"/pin/rm/",
test.Cid1.String(),
http.StatusOK,
},
test.Cid1,
false,
},
{
"unpin bad cid query arg",
args{
"/pin/rm?arg=",
test.ErrorCid.String(),
http.StatusInternalServerError,
},
api.CidUndef,
true,
},
{
"unpin bad cid url arg",
args{
"/pin/rm/",
test.ErrorCid.String(),
http.StatusInternalServerError,
},
api.CidUndef,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
u := fmt.Sprintf("%s%s%s", proxyURL(proxy), tt.args.urlPath, tt.args.testCid)
res, err := http.Post(u, "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res.Body.Close()
if res.StatusCode != tt.args.statusCode {
t.Errorf("statusCode: got = %v, want %v", res.StatusCode, tt.args.statusCode)
}
resBytes, _ := io.ReadAll(res.Body)
switch tt.wantErr {
case false:
var resp ipfsPinOpResp
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
if len(resp.Pins) != 1 {
t.Fatalf("wrong number of pins: got = %d, want %d", len(resp.Pins), 1)
}
if resp.Pins[0] != tt.want.String() {
t.Errorf("wrong pin cid: got = %s, want = %s", resp.Pins[0], tt.want)
}
case true:
var respErr cmd.Error
err = json.Unmarshal(resBytes, &respErr)
if err != nil {
t.Fatal(err)
}
if respErr.Message != test.ErrBadCid.Error() {
t.Errorf("wrong response: got = %s, want = %s", respErr.Message, test.ErrBadCid.Error())
}
}
})
}
}
func TestIPFSProxyPinUpdate(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
t.Run("pin/update bad args", func(t *testing.T) {
res, err := http.Post(fmt.Sprintf("%s/pin/update", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal("request should complete: ", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusBadRequest {
t.Error("request should not be successful with a no arguments")
}
res2, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s", proxyURL(proxy), test.PathIPFS1), "", nil)
if err != nil {
t.Fatal("request should complete: ", err)
}
defer res2.Body.Close()
if res2.StatusCode != http.StatusBadRequest {
t.Error("request should not be successful with a single argument")
}
})
t.Run("pin/update", func(t *testing.T) {
res, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s&arg=%s", proxyURL(proxy), test.PathIPFS1, test.PathIPFS2), "", nil)
if err != nil {
t.Fatal("request should complete: ", err)
}
defer res.Body.Close()
var resp ipfsPinOpResp
resBytes, _ := io.ReadAll(res.Body)
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
if len(resp.Pins) != 2 ||
resp.Pins[0] != test.Cid2.String() ||
resp.Pins[1] != test.CidResolved.String() { // always resolve to the same
t.Errorf("bad response: %s", string(resBytes))
}
})
t.Run("pin/update check unpin happens", func(t *testing.T) {
// passing an errorCid to unpin should return an error
// when unpinning.
res, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s&arg=%s", proxyURL(proxy), test.ErrorCid, test.PathIPFS2), "", nil)
if err != nil {
t.Fatal("request should complete: ", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusInternalServerError {
t.Fatal("request should error")
}
resBytes, _ := io.ReadAll(res.Body)
var respErr cmd.Error
err = json.Unmarshal(resBytes, &respErr)
if err != nil {
t.Fatal(err)
}
if respErr.Message != test.ErrBadCid.Error() {
t.Error("expected a bad cid error:", respErr.Message)
}
})
t.Run("pin/update check pin happens", func(t *testing.T) {
// passing an errorCid to pin, with unpin=false should return
// an error when pinning
res, err := http.Post(fmt.Sprintf("%s/pin/update?arg=%s&arg=%s&unpin=false", proxyURL(proxy), test.Cid1, test.ErrorCid), "", nil)
if err != nil {
t.Fatal("request should complete: ", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusInternalServerError {
t.Fatal("request should error")
}
resBytes, _ := io.ReadAll(res.Body)
var respErr cmd.Error
err = json.Unmarshal(resBytes, &respErr)
if err != nil {
t.Fatal(err)
}
if respErr.Message != test.ErrBadCid.Error() {
t.Error("expected a bad cid error:", respErr.Message)
}
})
}
func TestIPFSProxyPinLs(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
t.Run("pin/ls query arg", func(t *testing.T) {
res, err := http.Post(fmt.Sprintf("%s/pin/ls?arg=%s", proxyURL(proxy), test.Cid1), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
t.Error("the request should have succeeded")
}
resBytes, _ := io.ReadAll(res.Body)
var resp ipfsPinLsResp
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
_, ok := resp.Keys[test.Cid1.String()]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")
}
})
t.Run("pin/ls url arg", func(t *testing.T) {
res, err := http.Post(fmt.Sprintf("%s/pin/ls/%s", proxyURL(proxy), test.Cid1), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
t.Error("the request should have succeeded")
}
resBytes, _ := io.ReadAll(res.Body)
var resp ipfsPinLsResp
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
_, ok := resp.Keys[test.Cid1.String()]
if len(resp.Keys) != 1 || !ok {
t.Error("wrong response")
}
})
t.Run("pin/ls all no arg", func(t *testing.T) {
res2, err := http.Post(fmt.Sprintf("%s/pin/ls", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res2.Body.Close()
if res2.StatusCode != http.StatusOK {
t.Error("the request should have succeeded")
}
resBytes, _ := io.ReadAll(res2.Body)
var resp ipfsPinLsResp
err = json.Unmarshal(resBytes, &resp)
if err != nil {
t.Fatal(err)
}
if len(resp.Keys) != 3 {
t.Error("wrong response")
}
})
t.Run("pin/ls bad cid query arg", func(t *testing.T) {
res3, err := http.Post(fmt.Sprintf("%s/pin/ls?arg=%s", proxyURL(proxy), test.ErrorCid), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res3.Body.Close()
if res3.StatusCode != http.StatusInternalServerError {
t.Error("the request should have failed")
}
})
}
func TestProxyRepoStat(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
res, err := http.Post(fmt.Sprintf("%s/repo/stat", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
t.Error("request should have succeeded")
}
resBytes, _ := io.ReadAll(res.Body)
var stat api.IPFSRepoStat
err = json.Unmarshal(resBytes, &stat)
if err != nil {
t.Fatal(err)
}
// The mockRPC returns 3 peers. Since no host is set,
// all calls are local.
if stat.RepoSize != 6000 || stat.StorageMax != 300000 {
t.Errorf("expected different stats: %+v", stat)
}
}
func TestProxyRepoGC(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
type testcase struct {
name string
streamErrors bool
}
testcases := []testcase{
{
name: "With streaming errors",
streamErrors: true,
},
{
name: "Without streaming errors",
streamErrors: false,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
res1, err := http.Post(fmt.Sprintf("%s/repo/gc?stream-errors=%t", proxyURL(proxy), tc.streamErrors), "", nil)
if err != nil {
t.Fatal(err)
}
defer res1.Body.Close()
if res1.StatusCode != http.StatusOK {
t.Error("request should have succeeded")
}
var repoGC []ipfsRepoGCResp
dec := json.NewDecoder(res1.Body)
for {
resp := ipfsRepoGCResp{}
if err := dec.Decode(&resp); err != nil {
if err == io.EOF {
break
}
t.Error(err)
}
repoGC = append(repoGC, resp)
}
if !repoGC[0].Key.Equals(test.Cid1.Cid) {
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, repoGC[0].Key)
}
xStreamError, ok := res1.Trailer["X-Stream-Error"]
if !ok {
t.Error("trailer header X-Stream-Error not set")
}
if tc.streamErrors {
if repoGC[4].Error != test.ErrLinkNotFound.Error() {
t.Error("expected a different error")
}
if len(xStreamError) != 0 {
t.Error("expected X-Stream-Error header to be empty")
}
} else {
if repoGC[4].Error != "" {
t.Error("did not expect to stream error")
}
if len(xStreamError) == 0 || xStreamError[0] != (test.ErrLinkNotFound.Error()+";") {
t.Error("expected X-Stream-Error header with link not found error")
}
}
})
}
}
func TestProxyAdd(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
type testcase struct {
query string
expectedCid string
}
testcases := []testcase{
{
query: "",
expectedCid: test.ShardingDirBalancedRootCID,
},
{
query: "progress=true",
expectedCid: test.ShardingDirBalancedRootCID,
},
{
query: "wrap-with-directory=true",
expectedCid: test.ShardingDirBalancedRootCIDWrapped,
},
{
query: "trickle=true",
expectedCid: test.ShardingDirTrickleRootCID,
},
}
reqs := make([]*http.Request, len(testcases))
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
for i, tc := range testcases {
mr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
cType := "multipart/form-data; boundary=" + mr.Boundary()
url := fmt.Sprintf("%s/add?"+tc.query, proxyURL(proxy))
req, _ := http.NewRequest("POST", url, mr)
req.Header.Set("Content-Type", cType)
reqs[i] = req
}
for i, tc := range testcases {
t.Run(tc.query, func(t *testing.T) {
res, err := http.DefaultClient.Do(reqs[i])
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
t.Fatalf("Bad response status: got = %d, want = %d", res.StatusCode, http.StatusOK)
}
var resp ipfsAddResp
dec := json.NewDecoder(res.Body)
for dec.More() {
err := dec.Decode(&resp)
if err != nil {
t.Fatal(err)
}
}
if resp.Hash != tc.expectedCid {
t.Logf("%+v", resp.Hash)
t.Error("expected CID does not match")
}
})
}
}
func TestProxyAddError(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
res, err := http.Post(fmt.Sprintf("%s/add?recursive=true", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if res.StatusCode != http.StatusInternalServerError {
t.Errorf("wrong status code: got = %d, want = %d", res.StatusCode, http.StatusInternalServerError)
}
}
func TestProxyError(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
defer proxy.Shutdown(ctx)
res, err := http.Post(fmt.Sprintf("%s/bad/command", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal("should have succeeded: ", err)
}
defer res.Body.Close()
if res.StatusCode != 404 {
t.Error("should have respected the status code")
}
}
func proxyURL(c *Server) string {
addr := c.listeners[0].Addr()
return fmt.Sprintf("http://%s/api/v0", addr.String())
}
func TestIPFSProxy(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
defer mock.Close()
if err := proxy.Shutdown(ctx); err != nil {
t.Error("expected a clean shutdown")
}
if err := proxy.Shutdown(ctx); err != nil {
t.Error("expected a second clean shutdown")
}
}
func TestHeaderExtraction(t *testing.T) {
ctx := context.Background()
proxy, mock := testIPFSProxy(t)
proxy.config.ExtractHeadersTTL = time.Second
defer mock.Close()
defer proxy.Shutdown(ctx)
req, err := http.NewRequest("POST", fmt.Sprintf("%s/pin/ls", proxyURL(proxy)), nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Origin", test.IpfsACAOrigin)
res, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
res.Body.Close()
for k, v := range res.Header {
t.Logf("%s: %s", k, v)
}
if h := res.Header.Get("Access-Control-Allow-Origin"); h != test.IpfsACAOrigin {
t.Error("We did not find out the AC-Allow-Origin header: ", h)
}
for _, h := range corsHeaders {
if v := res.Header.Get(h); v == "" {
t.Error("We did not set CORS header: ", h)
}
}
if res.Header.Get(test.IpfsCustomHeaderName) != test.IpfsCustomHeaderValue {
t.Error("the proxy should have extracted custom headers from ipfs")
}
if !strings.HasPrefix(res.Header.Get("Server"), "ipfs-cluster") {
t.Error("wrong value for Server header")
}
// Test ExtractHeaderTTL
t1 := res.Header.Get(test.IpfsTimeHeaderName)
res, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
t2 := res.Header.Get(test.IpfsTimeHeaderName)
if t1 != t2 {
t.Error("should have cached the headers during TTL")
}
time.Sleep(1200 * time.Millisecond)
res, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
res.Body.Close()
t3 := res.Header.Get(test.IpfsTimeHeaderName)
if t3 == t2 {
t.Error("should have refreshed the headers after TTL")
}
}
func TestAttackHeaderSize(t *testing.T) {
const testHeaderSize = minMaxHeaderBytes * 4
ctx := context.Background()
cfg := &Config{}
cfg.Default()
cfg.MaxHeaderBytes = testHeaderSize
proxy, mock := testIPFSProxyWithConfig(t, cfg)
defer mock.Close()
defer proxy.Shutdown(ctx)
type testcase struct {
headerSize int
expectedStatus int
}
testcases := []testcase{
{testHeaderSize / 2, http.StatusNotFound},
{testHeaderSize * 2, http.StatusRequestHeaderFieldsTooLarge},
}
req, err := http.NewRequest("POST", fmt.Sprintf("%s/foo", proxyURL(proxy)), nil)
if err != nil {
t.Fatal(err)
}
for _, tc := range testcases {
for size := 0; size < tc.headerSize; size += 8 {
req.Header.Add("Foo", "bar")
}
res, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
res.Body.Close()
if res.StatusCode != tc.expectedStatus {
t.Errorf("proxy returned unexpected status %d, expected status code was %d",
res.StatusCode, tc.expectedStatus)
}
}
}
func TestProxyLogging(t *testing.T) {
ctx := context.Background()
cfg := &Config{}
cfg.Default()
logFile, err := filepath.Abs("proxy.log")
if err != nil {
t.Fatal(err)
}
cfg.LogFile = logFile
proxy, mock := testIPFSProxyWithConfig(t, cfg)
defer os.Remove(cfg.LogFile)
info, err := os.Stat(cfg.LogFile)
if err != nil {
t.Fatal(err)
}
if info.Size() > 0 {
t.Errorf("expected empty log file")
}
res, err := http.Post(fmt.Sprintf("%s/version", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
res.Body.Close()
info, err = os.Stat(cfg.LogFile)
if err != nil {
t.Fatal(err)
}
size1 := info.Size()
if size1 == 0 {
t.Error("did not expect an empty log file")
}
// Restart proxy and make sure that logs are being appended
mock.Close()
proxy.Shutdown(ctx)
proxy, mock = testIPFSProxyWithConfig(t, cfg)
defer mock.Close()
defer proxy.Shutdown(ctx)
res1, err := http.Post(fmt.Sprintf("%s/version", proxyURL(proxy)), "", nil)
if err != nil {
t.Fatal("should forward requests to ipfs host: ", err)
}
res1.Body.Close()
info, err = os.Stat(cfg.LogFile)
if err != nil {
t.Fatal(err)
}
size2 := info.Size()
if size2 == 0 {
t.Error("did not expect an empty log file")
}
if !(size2 > size1) {
t.Error("logs were not appended")
}
}

View file

@ -1,19 +0,0 @@
package ipfsproxy
import (
"strings"
)
// MultiError contains the results of multiple errors.
type multiError struct {
err strings.Builder
}
func (e *multiError) add(err string) {
e.err.WriteString(err)
e.err.WriteString("; ")
}
func (e *multiError) Error() string {
return e.err.String()
}

View file

@ -1,3 +0,0 @@
// Package pb provides protobuf definitions for serialized types in Cluster.
//go:generate protoc -I=. --go_out=. types.proto
package pb

View file

@ -1,495 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.19.2
// source: types.proto
package pb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Pin_PinType int32
const (
Pin_BadType Pin_PinType = 0 // 1 << iota
Pin_DataType Pin_PinType = 1 // 2 << iota
Pin_MetaType Pin_PinType = 2
Pin_ClusterDAGType Pin_PinType = 3
Pin_ShardType Pin_PinType = 4
)
// Enum value maps for Pin_PinType.
var (
Pin_PinType_name = map[int32]string{
0: "BadType",
1: "DataType",
2: "MetaType",
3: "ClusterDAGType",
4: "ShardType",
}
Pin_PinType_value = map[string]int32{
"BadType": 0,
"DataType": 1,
"MetaType": 2,
"ClusterDAGType": 3,
"ShardType": 4,
}
)
func (x Pin_PinType) Enum() *Pin_PinType {
p := new(Pin_PinType)
*p = x
return p
}
func (x Pin_PinType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Pin_PinType) Descriptor() protoreflect.EnumDescriptor {
return file_types_proto_enumTypes[0].Descriptor()
}
func (Pin_PinType) Type() protoreflect.EnumType {
return &file_types_proto_enumTypes[0]
}
func (x Pin_PinType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Pin_PinType.Descriptor instead.
func (Pin_PinType) EnumDescriptor() ([]byte, []int) {
return file_types_proto_rawDescGZIP(), []int{0, 0}
}
type Pin struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Cid []byte `protobuf:"bytes,1,opt,name=Cid,proto3" json:"Cid,omitempty"`
Type Pin_PinType `protobuf:"varint,2,opt,name=Type,proto3,enum=api.pb.Pin_PinType" json:"Type,omitempty"`
Allocations [][]byte `protobuf:"bytes,3,rep,name=Allocations,proto3" json:"Allocations,omitempty"`
MaxDepth int32 `protobuf:"zigzag32,4,opt,name=MaxDepth,proto3" json:"MaxDepth,omitempty"`
Reference []byte `protobuf:"bytes,5,opt,name=Reference,proto3" json:"Reference,omitempty"`
Options *PinOptions `protobuf:"bytes,6,opt,name=Options,proto3" json:"Options,omitempty"`
Timestamp uint64 `protobuf:"varint,7,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"`
}
func (x *Pin) Reset() {
*x = Pin{}
if protoimpl.UnsafeEnabled {
mi := &file_types_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Pin) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Pin) ProtoMessage() {}
func (x *Pin) ProtoReflect() protoreflect.Message {
mi := &file_types_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Pin.ProtoReflect.Descriptor instead.
func (*Pin) Descriptor() ([]byte, []int) {
return file_types_proto_rawDescGZIP(), []int{0}
}
func (x *Pin) GetCid() []byte {
if x != nil {
return x.Cid
}
return nil
}
func (x *Pin) GetType() Pin_PinType {
if x != nil {
return x.Type
}
return Pin_BadType
}
func (x *Pin) GetAllocations() [][]byte {
if x != nil {
return x.Allocations
}
return nil
}
func (x *Pin) GetMaxDepth() int32 {
if x != nil {
return x.MaxDepth
}
return 0
}
func (x *Pin) GetReference() []byte {
if x != nil {
return x.Reference
}
return nil
}
func (x *Pin) GetOptions() *PinOptions {
if x != nil {
return x.Options
}
return nil
}
func (x *Pin) GetTimestamp() uint64 {
if x != nil {
return x.Timestamp
}
return 0
}
type PinOptions struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ReplicationFactorMin int32 `protobuf:"zigzag32,1,opt,name=ReplicationFactorMin,proto3" json:"ReplicationFactorMin,omitempty"`
ReplicationFactorMax int32 `protobuf:"zigzag32,2,opt,name=ReplicationFactorMax,proto3" json:"ReplicationFactorMax,omitempty"`
Name string `protobuf:"bytes,3,opt,name=Name,proto3" json:"Name,omitempty"`
ShardSize uint64 `protobuf:"varint,4,opt,name=ShardSize,proto3" json:"ShardSize,omitempty"`
// Deprecated: Do not use.
Metadata map[string]string `protobuf:"bytes,6,rep,name=Metadata,proto3" json:"Metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
PinUpdate []byte `protobuf:"bytes,7,opt,name=PinUpdate,proto3" json:"PinUpdate,omitempty"`
ExpireAt uint64 `protobuf:"varint,8,opt,name=ExpireAt,proto3" json:"ExpireAt,omitempty"`
Origins [][]byte `protobuf:"bytes,9,rep,name=Origins,proto3" json:"Origins,omitempty"`
SortedMetadata []*Metadata `protobuf:"bytes,10,rep,name=SortedMetadata,proto3" json:"SortedMetadata,omitempty"`
}
func (x *PinOptions) Reset() {
*x = PinOptions{}
if protoimpl.UnsafeEnabled {
mi := &file_types_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PinOptions) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PinOptions) ProtoMessage() {}
func (x *PinOptions) ProtoReflect() protoreflect.Message {
mi := &file_types_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PinOptions.ProtoReflect.Descriptor instead.
func (*PinOptions) Descriptor() ([]byte, []int) {
return file_types_proto_rawDescGZIP(), []int{1}
}
func (x *PinOptions) GetReplicationFactorMin() int32 {
if x != nil {
return x.ReplicationFactorMin
}
return 0
}
func (x *PinOptions) GetReplicationFactorMax() int32 {
if x != nil {
return x.ReplicationFactorMax
}
return 0
}
func (x *PinOptions) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *PinOptions) GetShardSize() uint64 {
if x != nil {
return x.ShardSize
}
return 0
}
// Deprecated: Do not use.
func (x *PinOptions) GetMetadata() map[string]string {
if x != nil {
return x.Metadata
}
return nil
}
func (x *PinOptions) GetPinUpdate() []byte {
if x != nil {
return x.PinUpdate
}
return nil
}
func (x *PinOptions) GetExpireAt() uint64 {
if x != nil {
return x.ExpireAt
}
return 0
}
func (x *PinOptions) GetOrigins() [][]byte {
if x != nil {
return x.Origins
}
return nil
}
func (x *PinOptions) GetSortedMetadata() []*Metadata {
if x != nil {
return x.SortedMetadata
}
return nil
}
type Metadata struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"`
}
func (x *Metadata) Reset() {
*x = Metadata{}
if protoimpl.UnsafeEnabled {
mi := &file_types_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Metadata) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Metadata) ProtoMessage() {}
func (x *Metadata) ProtoReflect() protoreflect.Message {
mi := &file_types_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
func (*Metadata) Descriptor() ([]byte, []int) {
return file_types_proto_rawDescGZIP(), []int{2}
}
func (x *Metadata) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *Metadata) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
var File_types_proto protoreflect.FileDescriptor
var file_types_proto_rawDesc = []byte{
0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61,
0x70, 0x69, 0x2e, 0x70, 0x62, 0x22, 0xbf, 0x02, 0x0a, 0x03, 0x50, 0x69, 0x6e, 0x12, 0x10, 0x0a,
0x03, 0x43, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x43, 0x69, 0x64, 0x12,
0x27, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e,
0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x54, 0x79,
0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x6c, 0x6c, 0x6f,
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x41,
0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x4d, 0x61,
0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x11, 0x52, 0x08, 0x4d, 0x61,
0x78, 0x44, 0x65, 0x70, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65,
0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72,
0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50,
0x69, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18,
0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x22, 0x55, 0x0a, 0x07, 0x50, 0x69, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x42,
0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61,
0x54, 0x79, 0x70, 0x65, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x54, 0x79,
0x70, 0x65, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x44,
0x41, 0x47, 0x54, 0x79, 0x70, 0x65, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72,
0x64, 0x54, 0x79, 0x70, 0x65, 0x10, 0x04, 0x22, 0xb9, 0x03, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x4f,
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x69, 0x6e, 0x18, 0x01,
0x20, 0x01, 0x28, 0x11, 0x52, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x69, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65,
0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d,
0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x11, 0x52, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x78, 0x12, 0x12,
0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61,
0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x18,
0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x69, 0x7a, 0x65,
0x12, 0x40, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x69, 0x6e, 0x4f,
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x69, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18,
0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x50, 0x69, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x18, 0x08, 0x20, 0x01,
0x28, 0x04, 0x52, 0x08, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07,
0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x4f,
0x72, 0x69, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x38, 0x0a, 0x0e, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64,
0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x52, 0x0e, 0x53, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08,
0x05, 0x10, 0x06, 0x22, 0x32, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
0x10, 0x0a, 0x03, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x4b, 0x65,
0x79, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_types_proto_rawDescOnce sync.Once
file_types_proto_rawDescData = file_types_proto_rawDesc
)
func file_types_proto_rawDescGZIP() []byte {
file_types_proto_rawDescOnce.Do(func() {
file_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_types_proto_rawDescData)
})
return file_types_proto_rawDescData
}
var file_types_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_types_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_types_proto_goTypes = []interface{}{
(Pin_PinType)(0), // 0: api.pb.Pin.PinType
(*Pin)(nil), // 1: api.pb.Pin
(*PinOptions)(nil), // 2: api.pb.PinOptions
(*Metadata)(nil), // 3: api.pb.Metadata
nil, // 4: api.pb.PinOptions.MetadataEntry
}
var file_types_proto_depIdxs = []int32{
0, // 0: api.pb.Pin.Type:type_name -> api.pb.Pin.PinType
2, // 1: api.pb.Pin.Options:type_name -> api.pb.PinOptions
4, // 2: api.pb.PinOptions.Metadata:type_name -> api.pb.PinOptions.MetadataEntry
3, // 3: api.pb.PinOptions.SortedMetadata:type_name -> api.pb.Metadata
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_types_proto_init() }
func file_types_proto_init() {
if File_types_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Pin); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PinOptions); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Metadata); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_types_proto_rawDesc,
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_types_proto_goTypes,
DependencyIndexes: file_types_proto_depIdxs,
EnumInfos: file_types_proto_enumTypes,
MessageInfos: file_types_proto_msgTypes,
}.Build()
File_types_proto = out.File
file_types_proto_rawDesc = nil
file_types_proto_goTypes = nil
file_types_proto_depIdxs = nil
}

View file

@ -1,40 +0,0 @@
syntax = "proto3";
package api.pb;
option go_package=".;pb";
message Pin {
enum PinType {
BadType = 0; // 1 << iota
DataType = 1; // 2 << iota
MetaType = 2;
ClusterDAGType = 3;
ShardType = 4;
}
bytes Cid = 1;
PinType Type = 2;
repeated bytes Allocations = 3;
sint32 MaxDepth = 4;
bytes Reference = 5;
PinOptions Options = 6;
uint64 Timestamp = 7;
}
message PinOptions {
sint32 ReplicationFactorMin = 1;
sint32 ReplicationFactorMax = 2;
string Name = 3;
uint64 ShardSize = 4;
reserved 5; // reserved for UserAllocations
map<string, string> Metadata = 6 [deprecated = true];
bytes PinUpdate = 7;
uint64 ExpireAt = 8;
repeated bytes Origins = 9;
repeated Metadata SortedMetadata = 10;
}
message Metadata {
string Key = 1;
string Value = 2;
}

View file

@ -1,131 +0,0 @@
package pinsvcapi
import (
"net/http"
"time"
ma "github.com/multiformats/go-multiaddr"
"github.com/ipfs-cluster/ipfs-cluster/api/common"
"github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi/pinsvc"
)
const configKey = "pinsvcapi"
const envConfigKey = "cluster_pinsvcapi"
const minMaxHeaderBytes = 4096
// Default values for Config.
const (
DefaultReadTimeout = 0
DefaultReadHeaderTimeout = 5 * time.Second
DefaultWriteTimeout = 0
DefaultIdleTimeout = 120 * time.Second
DefaultMaxHeaderBytes = minMaxHeaderBytes
)
// Default values for Config.
var (
// DefaultHTTPListenAddrs contains default listen addresses for the HTTP API.
DefaultHTTPListenAddrs = []string{"/ip4/127.0.0.1/tcp/9097"}
DefaultHeaders = map[string][]string{}
)
// CORS defaults.
var (
DefaultCORSAllowedOrigins = []string{"*"}
DefaultCORSAllowedMethods = []string{
http.MethodGet,
}
// rs/cors this will set sensible defaults when empty:
// {"Origin", "Accept", "Content-Type", "X-Requested-With"}
DefaultCORSAllowedHeaders = []string{}
DefaultCORSExposedHeaders = []string{
"Content-Type",
"X-Stream-Output",
"X-Chunked-Output",
"X-Content-Length",
}
DefaultCORSAllowCredentials = true
DefaultCORSMaxAge time.Duration // 0. Means always.
)
// Config fully implements the config.ComponentConfig interface. Use
// NewConfig() to instantiate. Config embeds a common.Config object.
type Config struct {
common.Config
}
// NewConfig creates a Config object setting the necessary meta-fields in the
// common.Config embedded object.
func NewConfig() *Config {
cfg := Config{}
cfg.Config.ConfigKey = configKey
cfg.EnvConfigKey = envConfigKey
cfg.Logger = logger
cfg.RequestLogger = apiLogger
cfg.DefaultFunc = defaultFunc
cfg.APIErrorFunc = func(err error, status int) error {
return pinsvc.APIError{
Details: pinsvc.APIErrorDetails{
Reason: err.Error(),
},
}
}
return &cfg
}
// ConfigKey returns a human-friendly identifier for this type of
// Config.
func (cfg *Config) ConfigKey() string {
return configKey
}
// Default initializes this Config with working values.
func (cfg *Config) Default() error {
return defaultFunc(&cfg.Config)
}
// Sets all defaults for this config.
func defaultFunc(cfg *common.Config) error {
// http
addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs))
for _, def := range DefaultHTTPListenAddrs {
httpListen, err := ma.NewMultiaddr(def)
if err != nil {
return err
}
addrs = append(addrs, httpListen)
}
cfg.HTTPListenAddr = addrs
cfg.PathSSLCertFile = ""
cfg.PathSSLKeyFile = ""
cfg.ReadTimeout = DefaultReadTimeout
cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout
cfg.WriteTimeout = DefaultWriteTimeout
cfg.IdleTimeout = DefaultIdleTimeout
cfg.MaxHeaderBytes = DefaultMaxHeaderBytes
// libp2p
cfg.ID = ""
cfg.PrivateKey = nil
cfg.Libp2pListenAddr = nil
// Auth
cfg.BasicAuthCredentials = nil
// Logs
cfg.HTTPLogFile = ""
// Headers
cfg.Headers = DefaultHeaders
cfg.CORSAllowedOrigins = DefaultCORSAllowedOrigins
cfg.CORSAllowedMethods = DefaultCORSAllowedMethods
cfg.CORSAllowedHeaders = DefaultCORSAllowedHeaders
cfg.CORSExposedHeaders = DefaultCORSExposedHeaders
cfg.CORSAllowCredentials = DefaultCORSAllowCredentials
cfg.CORSMaxAge = DefaultCORSMaxAge
return nil
}

View file

@ -1,313 +0,0 @@
// Package pinsvc contains type definitions for the Pinning Services API
package pinsvc
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
types "github.com/ipfs-cluster/ipfs-cluster/api"
)
func init() {
// intialize trackerStatusString
stringStatus = make(map[string]Status)
for k, v := range statusString {
stringStatus[v] = k
}
}
// APIError is returned by the API as a body when an error
// occurs. It implements the error interface.
type APIError struct {
Details APIErrorDetails `json:"error"`
}
// APIErrorDetails contains details about the APIError.
type APIErrorDetails struct {
Reason string `json:"reason"`
Details string `json:"details,omitempty"`
}
func (apiErr APIError) Error() string {
return apiErr.Details.Reason
}
// PinName is a string limited to 255 chars when serializing JSON.
type PinName string
// MarshalJSON converts the string to JSON.
func (pname PinName) MarshalJSON() ([]byte, error) {
return json.Marshal(string(pname))
}
// UnmarshalJSON reads the JSON string and errors if over 256 chars.
func (pname *PinName) UnmarshalJSON(data []byte) error {
if len(data) > 257 { // "a_string" 255 + 2 for quotes
return errors.New("pin name is over 255 chars")
}
var v string
err := json.Unmarshal(data, &v)
*pname = PinName(v)
return err
}
// Pin contains basic information about a Pin and pinning options.
type Pin struct {
Cid types.Cid `json:"cid"`
Name PinName `json:"name,omitempty"`
Origins []types.Multiaddr `json:"origins,omitempty"`
Meta map[string]string `json:"meta,omitempty"`
}
// Defined returns if the pinis empty (Cid not set).
func (p Pin) Defined() bool {
return p.Cid.Defined()
}
// MatchesName returns in a pin status matches a name option with a given
// match strategy.
func (p Pin) MatchesName(nameOpt string, strategy MatchingStrategy) bool {
if nameOpt == "" {
return true
}
name := string(p.Name)
switch strategy {
case MatchingStrategyUndefined:
return true
case MatchingStrategyExact:
return nameOpt == name
case MatchingStrategyIexact:
return strings.EqualFold(name, nameOpt)
case MatchingStrategyPartial:
return strings.Contains(name, nameOpt)
case MatchingStrategyIpartial:
return strings.Contains(strings.ToLower(name), strings.ToLower(nameOpt))
default:
return true
}
}
// MatchesMeta returns true if the pin status metadata matches the given. The
// metadata should have all the keys in the given metaOpts and the values
// should, be the same (metadata map includes metaOpts).
func (p Pin) MatchesMeta(metaOpts map[string]string) bool {
for k, v := range metaOpts {
if p.Meta[k] != v {
return false
}
}
return true
}
// Status represents a pin status, which defines the current state of the pin
// in the system.
type Status int
// Values for the Status type.
const (
StatusUndefined Status = 0
StatusQueued = 1 << iota
StatusPinned
StatusPinning
StatusFailed
)
var statusString = map[Status]string{
StatusUndefined: "undefined",
StatusQueued: "queued",
StatusPinned: "pinned",
StatusPinning: "pinning",
StatusFailed: "failed",
}
// values autofilled in init()
var stringStatus map[string]Status
// String converts a Status into a readable string.
// If the given Status is a filter (with several
// bits set), it will return a comma-separated list.
func (st Status) String() string {
var values []string
// simple and known composite values
if v, ok := statusString[st]; ok {
return v
}
// other filters
for k, v := range statusString {
if st&k > 0 {
values = append(values, v)
}
}
return strings.Join(values, ",")
}
// Match returns true if the tracker status matches the given filter.
func (st Status) Match(filter Status) bool {
return filter == StatusUndefined ||
st == StatusUndefined ||
st&filter > 0
}
// MarshalJSON uses the string representation of Status for JSON
// encoding.
func (st Status) MarshalJSON() ([]byte, error) {
return json.Marshal(st.String())
}
// UnmarshalJSON sets a tracker status from its JSON representation.
func (st *Status) UnmarshalJSON(data []byte) error {
var v string
err := json.Unmarshal(data, &v)
if err != nil {
return err
}
*st = StatusFromString(v)
return nil
}
// StatusFromString parses a string and returns the matching
// Status value. The string can be a comma-separated list
// representing a Status filter. Unknown status names are
// ignored.
func StatusFromString(str string) Status {
values := strings.Split(strings.Replace(str, " ", "", -1), ",")
status := StatusUndefined
for _, v := range values {
st, ok := stringStatus[v]
if ok {
status |= st
}
}
return status
}
// MatchingStrategy defines a type of match for filtering pin lists.
type MatchingStrategy int
// Values for MatchingStrategy.
const (
MatchingStrategyUndefined MatchingStrategy = iota
MatchingStrategyExact
MatchingStrategyIexact
MatchingStrategyPartial
MatchingStrategyIpartial
)
// MatchingStrategyFromString converts a string to its MatchingStrategy value.
func MatchingStrategyFromString(str string) MatchingStrategy {
switch str {
case "exact":
return MatchingStrategyExact
case "iexact":
return MatchingStrategyIexact
case "partial":
return MatchingStrategyPartial
case "ipartial":
return MatchingStrategyIpartial
default:
return MatchingStrategyUndefined
}
}
// PinStatus provides information about a Pin stored by the Pinning API.
type PinStatus struct {
RequestID string `json:"requestid"`
Status Status `json:"status"`
Created time.Time `json:"created"`
Pin Pin `json:"pin"`
Delegates []types.Multiaddr `json:"delegates"`
Info map[string]string `json:"info,omitempty"`
}
// PinList is the result of a call to List pins
type PinList struct {
Count uint64 `json:"count"`
Results []PinStatus `json:"results"`
}
// ListOptions represents possible options given to the List endpoint.
type ListOptions struct {
Cids []types.Cid
Name string
MatchingStrategy MatchingStrategy
Status Status
Before time.Time
After time.Time
Limit uint64
Meta map[string]string
}
// FromQuery parses ListOptions from url.Values.
func (lo *ListOptions) FromQuery(q url.Values) error {
cidq := q.Get("cid")
if len(cidq) > 0 {
for _, cstr := range strings.Split(cidq, ",") {
c, err := types.DecodeCid(cstr)
if err != nil {
return fmt.Errorf("error decoding cid %s: %w", cstr, err)
}
lo.Cids = append(lo.Cids, c)
}
}
n := q.Get("name")
if len(n) > 255 {
return fmt.Errorf("error in 'name' query param: longer than 255 chars")
}
lo.Name = n
lo.MatchingStrategy = MatchingStrategyFromString(q.Get("match"))
if lo.MatchingStrategy == MatchingStrategyUndefined {
lo.MatchingStrategy = MatchingStrategyExact // default
}
statusStr := q.Get("status")
lo.Status = StatusFromString(statusStr)
// FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a
// valid "pinned" filter.
if statusStr != "" && lo.Status == StatusUndefined {
return fmt.Errorf("error decoding 'status' query param: no valid filter")
}
if bef := q.Get("before"); bef != "" {
err := lo.Before.UnmarshalText([]byte(bef))
if err != nil {
return fmt.Errorf("error decoding 'before' query param: %s: %w", bef, err)
}
}
if after := q.Get("after"); after != "" {
err := lo.After.UnmarshalText([]byte(after))
if err != nil {
return fmt.Errorf("error decoding 'after' query param: %s: %w", after, err)
}
}
if v := q.Get("limit"); v != "" {
lim, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return fmt.Errorf("error parsing 'limit' query param: %s: %w", v, err)
}
lo.Limit = lim
} else {
lo.Limit = 10 // implicit default
}
if meta := q.Get("meta"); meta != "" {
err := json.Unmarshal([]byte(meta), &lo.Meta)
if err != nil {
return fmt.Errorf("error unmarshalling 'meta' query param: %s: %w", meta, err)
}
}
return nil
}

View file

@ -1,477 +0,0 @@
// Package pinsvcapi implements an IPFS Cluster API component which provides
// an IPFS Pinning Services API to the cluster.
//
// The implented API is based on the common.API component (refer to module
// description there). The only thing this module does is to provide route
// handling for the otherwise common API component.
package pinsvcapi
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"github.com/gorilla/mux"
types "github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/api/common"
"github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi/pinsvc"
"github.com/ipfs-cluster/ipfs-cluster/state"
"go.uber.org/multierr"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
rpc "github.com/libp2p/go-libp2p-gorpc"
)
var (
logger = logging.Logger("pinsvcapi")
apiLogger = logging.Logger("pinsvcapilog")
)
var apiInfo map[string]string = map[string]string{
"source": "IPFS cluster API",
"warning1": "CID used for requestID. Conflicts possible",
"warning2": "experimental",
}
func trackerStatusToSvcStatus(st types.TrackerStatus) pinsvc.Status {
switch {
case st.Match(types.TrackerStatusError):
return pinsvc.StatusFailed
case st.Match(types.TrackerStatusPinQueued):
return pinsvc.StatusQueued
case st.Match(types.TrackerStatusPinning):
return pinsvc.StatusPinning
case st.Match(types.TrackerStatusPinned):
return pinsvc.StatusPinned
default:
return pinsvc.StatusUndefined
}
}
func svcStatusToTrackerStatus(st pinsvc.Status) types.TrackerStatus {
var tst types.TrackerStatus
if st.Match(pinsvc.StatusFailed) {
tst |= types.TrackerStatusError
}
if st.Match(pinsvc.StatusQueued) {
tst |= types.TrackerStatusPinQueued
}
if st.Match(pinsvc.StatusPinned) {
tst |= types.TrackerStatusPinned
}
if st.Match(pinsvc.StatusPinning) {
tst |= types.TrackerStatusPinning
}
return tst
}
func svcPinToClusterPin(p pinsvc.Pin) (types.Pin, error) {
opts := types.PinOptions{
Name: string(p.Name),
Origins: p.Origins,
Metadata: p.Meta,
Mode: types.PinModeRecursive,
}
return types.PinWithOpts(p.Cid, opts), nil
}
func globalPinInfoToSvcPinStatus(
rID string,
gpi types.GlobalPinInfo,
) pinsvc.PinStatus {
status := pinsvc.PinStatus{
RequestID: rID,
}
var statusMask types.TrackerStatus
for _, pinfo := range gpi.PeerMap {
statusMask |= pinfo.Status
}
status.Status = trackerStatusToSvcStatus(statusMask)
status.Created = gpi.Created
status.Pin = pinsvc.Pin{
Cid: gpi.Cid,
Name: pinsvc.PinName(gpi.Name),
Origins: gpi.Origins,
Meta: gpi.Metadata,
}
status.Info = apiInfo
status.Delegates = []types.Multiaddr{}
for _, pi := range gpi.PeerMap {
status.Delegates = append(status.Delegates, pi.IPFSAddresses...)
}
return status
}
// API implements the REST API Component.
// It embeds a common.API.
type API struct {
*common.API
rpcClient *rpc.Client
config *Config
}
// NewAPI creates a new REST API component.
func NewAPI(ctx context.Context, cfg *Config) (*API, error) {
return NewAPIWithHost(ctx, cfg, nil)
}
// NewAPIWithHost creates a new REST API component using the given libp2p Host.
func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host) (*API, error) {
api := API{
config: cfg,
}
capi, err := common.NewAPIWithHost(ctx, &cfg.Config, h, api.routes)
api.API = capi
return &api, err
}
// Routes returns endpoints supported by this API.
func (api *API) routes(c *rpc.Client) []common.Route {
api.rpcClient = c
return []common.Route{
{
Name: "ListPins",
Method: "GET",
Pattern: "/pins",
HandlerFunc: api.listPins,
},
{
Name: "AddPin",
Method: "POST",
Pattern: "/pins",
HandlerFunc: api.addPin,
},
{
Name: "GetPin",
Method: "GET",
Pattern: "/pins/{requestID}",
HandlerFunc: api.getPin,
},
{
Name: "ReplacePin",
Method: "POST",
Pattern: "/pins/{requestID}",
HandlerFunc: api.addPin,
},
{
Name: "RemovePin",
Method: "DELETE",
Pattern: "/pins/{requestID}",
HandlerFunc: api.removePin,
},
{
Name: "GetToken",
Method: "POST",
Pattern: "/token",
HandlerFunc: api.GenerateTokenHandler,
},
}
}
func (api *API) parseBodyOrFail(w http.ResponseWriter, r *http.Request) pinsvc.Pin {
dec := json.NewDecoder(r.Body)
defer r.Body.Close()
var pin pinsvc.Pin
err := dec.Decode(&pin)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, fmt.Errorf("error decoding request body: %w", err), nil)
return pinsvc.Pin{}
}
return pin
}
func (api *API) parseRequestIDOrFail(w http.ResponseWriter, r *http.Request) (types.Cid, bool) {
vars := mux.Vars(r)
cStr, ok := vars["requestID"]
if !ok {
return types.CidUndef, true
}
c, err := types.DecodeCid(cStr)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding requestID: "+err.Error()), nil)
return c, false
}
return c, true
}
func (api *API) addPin(w http.ResponseWriter, r *http.Request) {
if pin := api.parseBodyOrFail(w, r); pin.Defined() {
api.config.Logger.Debugf("addPin: %s", pin.Cid)
clusterPin, err := svcPinToClusterPin(pin)
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
if updateCid, ok := api.parseRequestIDOrFail(w, r); updateCid.Defined() && ok {
clusterPin.PinUpdate = updateCid
}
// Pin item
var pinObj types.Pin
err = api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Pin",
clusterPin,
&pinObj,
)
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
// Unpin old item
if clusterPin.PinUpdate.Defined() {
var oldPin types.Pin
err = api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Unpin",
types.PinCid(clusterPin.PinUpdate),
&oldPin,
)
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
}
status := api.pinToSvcPinStatus(r.Context(), pin.Cid.String(), pinObj)
api.SendResponse(w, common.SetStatusAutomatically, nil, status)
}
}
func (api *API) getPinSvcStatus(ctx context.Context, c types.Cid) (pinsvc.PinStatus, error) {
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Status",
c,
&pinInfo,
)
if err != nil {
return pinsvc.PinStatus{}, err
}
return globalPinInfoToSvcPinStatus(c.String(), pinInfo), nil
}
func (api *API) getPin(w http.ResponseWriter, r *http.Request) {
c, ok := api.parseRequestIDOrFail(w, r)
if !ok {
return
}
api.config.Logger.Debugf("getPin: %s", c)
status, err := api.getPinSvcStatus(r.Context(), c)
if status.Status == pinsvc.StatusUndefined {
api.SendResponse(w, http.StatusNotFound, errors.New("pin not found"), nil)
return
}
api.SendResponse(w, common.SetStatusAutomatically, err, status)
}
func (api *API) removePin(w http.ResponseWriter, r *http.Request) {
c, ok := api.parseRequestIDOrFail(w, r)
if !ok {
return
}
api.config.Logger.Debugf("removePin: %s", c)
var pinObj types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Unpin",
types.PinCid(c),
&pinObj,
)
if err != nil && err.Error() == state.ErrNotFound.Error() {
api.SendResponse(w, http.StatusNotFound, err, nil)
return
}
api.SendResponse(w, http.StatusAccepted, err, nil)
}
func (api *API) listPins(w http.ResponseWriter, r *http.Request) {
opts := &pinsvc.ListOptions{}
err := opts.FromQuery(r.URL.Query())
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
tst := svcStatusToTrackerStatus(opts.Status)
var pinList pinsvc.PinList
pinList.Results = []pinsvc.PinStatus{}
count := uint64(0)
if len(opts.Cids) > 0 {
// copy approach from restapi
type statusResult struct {
st pinsvc.PinStatus
err error
}
stCh := make(chan statusResult, len(opts.Cids))
var wg sync.WaitGroup
wg.Add(len(opts.Cids))
go func() {
wg.Wait()
close(stCh)
}()
for _, ci := range opts.Cids {
go func(c types.Cid) {
defer wg.Done()
st, err := api.getPinSvcStatus(r.Context(), c)
stCh <- statusResult{st: st, err: err}
}(ci)
}
var err error
for stResult := range stCh {
if stResult.st.Status == pinsvc.StatusUndefined && stResult.err == nil {
// ignore things unpinning
continue
}
if count < opts.Limit {
pinList.Results = append(pinList.Results, stResult.st)
err = multierr.Append(err, stResult.err)
}
count++
}
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
} else {
in := make(chan types.TrackerStatus, 1)
in <- tst
close(in)
out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"StatusAll",
in,
out,
)
}()
for gpi := range out {
st := globalPinInfoToSvcPinStatus(gpi.Cid.String(), gpi)
if st.Status == pinsvc.StatusUndefined {
// i.e things unpinning
continue
}
if !opts.After.IsZero() && st.Created.Before(opts.After) {
continue
}
if !opts.Before.IsZero() && st.Created.After(opts.Before) {
continue
}
if !st.Pin.MatchesName(opts.Name, opts.MatchingStrategy) {
continue
}
if !st.Pin.MatchesMeta(opts.Meta) {
continue
}
if count < opts.Limit {
pinList.Results = append(pinList.Results, st)
}
count++
}
err := <-errCh
if err != nil {
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
return
}
}
pinList.Count = count
api.SendResponse(w, common.SetStatusAutomatically, err, pinList)
}
func (api *API) pinToSvcPinStatus(ctx context.Context, rID string, pin types.Pin) pinsvc.PinStatus {
status := pinsvc.PinStatus{
RequestID: rID,
Status: pinsvc.StatusQueued,
Created: pin.Timestamp,
Pin: pinsvc.Pin{
Cid: pin.Cid,
Name: pinsvc.PinName(pin.Name),
Origins: pin.Origins,
Meta: pin.Metadata,
},
Info: apiInfo,
}
var peers []peer.ID
if pin.IsPinEverywhere() { // all cluster peers
err := api.rpcClient.CallContext(
ctx,
"",
"Consensus",
"Peers",
struct{}{},
&peers,
)
if err != nil {
logger.Error(err)
}
} else { // Delegates should come from allocations
peers = pin.Allocations
}
status.Delegates = []types.Multiaddr{}
for _, peer := range peers {
var ipfsid types.IPFSID
err := api.rpcClient.CallContext(
ctx,
"", // call the local peer
"Cluster",
"IPFSID",
peer, // retrieve ipfs info for this peer
&ipfsid,
)
if err != nil {
logger.Error(err)
}
status.Delegates = append(status.Delegates, ipfsid.Addresses...)
}
return status
}

View file

@ -1,253 +0,0 @@
package pinsvcapi
import (
"context"
"encoding/json"
"strings"
"testing"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/api/common/test"
"github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi/pinsvc"
clustertest "github.com/ipfs-cluster/ipfs-cluster/test"
libp2p "github.com/libp2p/go-libp2p"
ma "github.com/multiformats/go-multiaddr"
)
func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API {
ctx := context.Background()
apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
h, err := libp2p.New(libp2p.ListenAddrs(apiMAddr))
if err != nil {
t.Fatal(err)
}
cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr}
svcapi, err := NewAPIWithHost(ctx, cfg, h)
if err != nil {
t.Fatalf("should be able to create a new %s API: %s", name, err)
}
// No keep alive for tests
svcapi.SetKeepAlivesEnabled(false)
svcapi.SetClient(clustertest.NewMockRPCClient(t))
return svcapi
}
func testAPI(t *testing.T) *API {
cfg := NewConfig()
cfg.Default()
cfg.CORSAllowedOrigins = []string{"myorigin"}
cfg.CORSAllowedMethods = []string{"GET", "POST", "DELETE"}
//cfg.CORSAllowedHeaders = []string{"Content-Type"}
cfg.CORSMaxAge = 10 * time.Minute
return testAPIwithConfig(t, cfg, "basic")
}
func TestAPIListEndpoint(t *testing.T) {
ctx := context.Background()
svcapi := testAPI(t)
defer svcapi.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins", &resp)
// mockPinTracker returns 3 items for Cluster.StatusAll
if resp.Count != 3 {
t.Fatal("Count should be 3")
}
if len(resp.Results) != 3 {
t.Fatal("There should be 3 results")
}
results := resp.Results
if !results[0].Pin.Cid.Equals(clustertest.Cid1) ||
results[1].Status != pinsvc.StatusPinning {
t.Errorf("unexpected statusAll resp: %+v", results)
}
// Test status filters
var resp2 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=pinning", &resp2)
// mockPinTracker calls pintracker.StatusAll which returns 2
// items.
if resp2.Count != 1 {
t.Errorf("unexpected statusAll+status=pinning resp:\n %+v", resp2)
}
var resp3 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=queued", &resp3)
if resp3.Count != 0 {
t.Errorf("unexpected statusAll+status=queued resp:\n %+v", resp3)
}
var resp4 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=pinned", &resp4)
if resp4.Count != 1 {
t.Errorf("unexpected statusAll+status=queued resp:\n %+v", resp4)
}
var resp5 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=failed", &resp5)
if resp5.Count != 1 {
t.Errorf("unexpected statusAll+status=queued resp:\n %+v", resp5)
}
var resp6 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=failed,pinned", &resp6)
if resp6.Count != 2 {
t.Errorf("unexpected statusAll+status=failed,pinned resp:\n %+v", resp6)
}
// Test with cids
var resp7 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?cid=QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq,QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb", &resp7)
if resp7.Count != 2 {
t.Errorf("unexpected statusAll+cids resp:\n %+v", resp7)
}
// Test with cids+limit
var resp8 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?cid=QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmq,QmP63DkAFEnDYNjDYBpyNDfttu1fvUw99x1brscPzpqmmb&limit=1", &resp8)
if resp8.Count != 2 || len(resp8.Results) != 1 {
t.Errorf("unexpected statusAll+cids+limit resp:\n %+v", resp8)
}
// Test with limit
var resp9 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?limit=1", &resp9)
if resp9.Count != 3 || len(resp9.Results) != 1 {
t.Errorf("unexpected statusAll+limit=1 resp:\n %+v", resp9)
}
// Test with name-match
var resp10 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+"/pins?name=C&match=ipartial", &resp10)
if resp10.Count != 1 {
t.Errorf("unexpected statusAll+name resp:\n %+v", resp10)
}
// Test with meta-match
var resp11 pinsvc.PinList
test.MakeGet(t, svcapi, url(svcapi)+`/pins?meta={"ccc":"3c"}`, &resp11)
if resp11.Count != 1 {
t.Errorf("unexpected statusAll+meta resp:\n %+v", resp11)
}
var errorResp pinsvc.APIError
test.MakeGet(t, svcapi, url(svcapi)+"/pins?status=invalid", &errorResp)
if errorResp.Details.Reason == "" {
t.Errorf("expected an error: %s", errorResp.Details.Reason)
}
}
test.BothEndpoints(t, tf)
}
func TestAPIPinEndpoint(t *testing.T) {
ctx := context.Background()
svcapi := testAPI(t)
defer svcapi.Shutdown(ctx)
ma, _ := api.NewMultiaddr("/ip4/1.2.3.4/ipfs/" + clustertest.PeerID1.String())
tf := func(t *testing.T, url test.URLFunc) {
// test normal pin
pin := pinsvc.Pin{
Cid: clustertest.Cid3,
Name: "testname",
Origins: []api.Multiaddr{
ma,
},
Meta: map[string]string{
"meta": "data",
},
}
var status pinsvc.PinStatus
pinJSON, err := json.Marshal(pin)
if err != nil {
t.Fatal(err)
}
test.MakePost(t, svcapi, url(svcapi)+"/pins", pinJSON, &status)
if status.Pin.Cid != pin.Cid {
t.Error("cids should match")
}
if status.Pin.Meta["meta"] != "data" {
t.Errorf("metadata should match: %+v", status.Pin)
}
if len(status.Pin.Origins) != 1 {
t.Errorf("expected origins: %+v", status.Pin)
}
if len(status.Delegates) != 3 {
t.Errorf("expected 3 delegates: %+v", status)
}
var errName pinsvc.APIError
pin2 := pinsvc.Pin{
Cid: clustertest.Cid1,
Name: pinsvc.PinName(make([]byte, 256)),
}
pinJSON, err = json.Marshal(pin2)
if err != nil {
t.Fatal(err)
}
test.MakePost(t, svcapi, url(svcapi)+"/pins", pinJSON, &errName)
if !strings.Contains(errName.Details.Reason, "255") {
t.Error("expected name error")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIGetPinEndpoint(t *testing.T) {
ctx := context.Background()
svcapi := testAPI(t)
defer svcapi.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
// test existing pin
var status pinsvc.PinStatus
test.MakeGet(t, svcapi, url(svcapi)+"/pins/"+clustertest.Cid1.String(), &status)
if !status.Pin.Cid.Equals(clustertest.Cid1) {
t.Error("Cid should be set")
}
if status.Pin.Meta["meta"] != "data" {
t.Errorf("metadata should match: %+v", status.Pin)
}
if len(status.Delegates) != 1 {
t.Errorf("expected 1 delegates: %+v", status)
}
var err pinsvc.APIError
test.MakeGet(t, svcapi, url(svcapi)+"/pins/"+clustertest.ErrorCid.String(), &err)
if err.Details.Reason == "" {
t.Error("expected an error")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIRemovePinEndpoint(t *testing.T) {
ctx := context.Background()
svcapi := testAPI(t)
defer svcapi.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
// test existing pin
test.MakeDelete(t, svcapi, url(svcapi)+"/pins/"+clustertest.Cid1.String(), nil)
}
test.BothEndpoints(t, tf)
}

View file

@ -1,14 +0,0 @@
language: go
go:
- '1.9'
- tip
install:
- go get golang.org/x/tools/cmd/cover
- go get github.com/mattn/goveralls
- make deps
script:
- make test
- "$GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN"
env:
global:
secure: Skjty77A/J/34pKFmHtxnpNejY2QAJw5PAacBnflo1yZfq4D2mEqVjyd0V2o/pSqm54b+eUouYp+9hNsBbVRHXlgi3PocVClBTV7McFMAoOn+OOEBrdt5wF57L0IPbt8yde+RpXcnCQ5rRvuSfCkEcTNhlxUdUjx4r9qhFsGWKvZVodcSO6xZTRwPYu7/MJWnJK/JV5CAWl7dWlWeAZhrASwXwS7662tu3SN9eor5+ZVF0t5BMhLP6juu6WPz9TFijQ/W4cRiXJ1REbg+M2RscAj9gOy7lIdKR5MEF1xj8naX2jtiZXcxIdV5cduLwSeBA8v5hahwV0H/1cN4Ypymix9vXfkZKyMbU7/TpO0pEzZOcoFne9edHRh6oUrCRBrf4veOiPbkObjmAs0HsdE1ZoeakgCQVHGqaMUlYW1ybeu04JJrXNAMC7s+RD9lxacwknrx333fSBmw+kQwJGmkYkdKcELo2toivrX+yXezISLf2+puqVPAZznY/OxHAuWDi047QLEBxW72ZuTCpT9QiOj3nl5chvmNV+edqgdLN3SlUNOB0jTOpyac/J1GicFkI7IgE2+PjeqpzVnrhZvpcAy4j8YLadGfISWVzbg4NaoUrBUIqA82rqwiZ1L+CcQKNW1h+vEXWp6cLnn2kcPSihM8RrsLuSiJMMgdIhMN3o=

View file

@ -1,43 +0,0 @@
# ipfs-cluster client
[![Made by](https://img.shields.io/badge/By-Protocol%20Labs-000000.svg?style=flat-square)](https://protocol.ai)
[![Main project](https://img.shields.io/badge/project-ipfs--cluster-ef5c43.svg?style=flat-square)](http://github.com/ipfs-cluster)
[![Discord](https://img.shields.io/badge/forum-discuss.ipfs.io-f9a035.svg?style=flat-square)](https://discuss.ipfs.io/c/help/help-ipfs-cluster/24)
[![Matrix channel](https://img.shields.io/badge/matrix-%23ipfs--cluster-3c8da0.svg?style=flat-square)](https://app.element.io/#/room/#ipfs-cluster:ipfs.io)
[![pkg.go.dev](https://pkg.go.dev/badge/github.com/ipfs-cluster/ipfs-cluster)](https://pkg.go.dev/github.com/ipfs-cluster/ipfs-cluster/api/rest/client)
> Go client for the ipfs-cluster HTTP API.
This is a Go client library to use the ipfs-cluster REST HTTP API.
## Table of Contents
- [Install](#install)
- [Usage](#usage)
- [Contribute](#contribute)
- [License](#license)
## Install
You can import `github.com/ipfs-cluster/ipfs-cluster/api/rest/client` in your code.
The code can be downloaded and tested with:
```
$ git clone https://github.com/ipfs-cluster/ipfs-cluster.git
$ cd ipfs-cluster/ipfs-cluster/rest/api/client
$ go test -v
```
## Usage
Documentation can be read at [pkg.go.dev](https://pkg.go.dev/github.com/ipfs-cluster/ipfs-cluster/api/rest/client).
## Contribute
PRs accepted.
## License
MIT © Protocol Labs

View file

@ -1,402 +0,0 @@
// Package client provides a Go Client for the IPFS Cluster API provided
// by the "api/rest" component. It supports both the HTTP(s) endpoint and
// the libp2p-http endpoint.
package client
import (
"context"
"fmt"
"net"
"net/http"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
shell "github.com/ipfs/go-ipfs-api"
files "github.com/ipfs/go-ipfs-files"
logging "github.com/ipfs/go-log/v2"
host "github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
pnet "github.com/libp2p/go-libp2p/core/pnet"
ma "github.com/multiformats/go-multiaddr"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/plugin/ochttp/propagation/tracecontext"
"go.opencensus.io/trace"
)
// Configuration defaults
var (
DefaultTimeout = 0
DefaultAPIAddr = "/ip4/127.0.0.1/tcp/9094"
DefaultLogLevel = "info"
DefaultProxyPort = 9095
ResolveTimeout = 30 * time.Second
DefaultPort = 9094
)
var loggingFacility = "apiclient"
var logger = logging.Logger(loggingFacility)
// Client interface defines the interface to be used by API clients to
// interact with the ipfs-cluster-service. All methods take a
// context.Context as their first parameter, this allows for
// timing out and canceling of requests as well as recording
// metrics and tracing of requests through the API.
type Client interface {
// ID returns information about the cluster Peer.
ID(context.Context) (api.ID, error)
// Peers requests ID information for all cluster peers.
Peers(context.Context, chan<- api.ID) error
// PeerAdd adds a new peer to the cluster.
PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error)
// PeerRm removes a current peer from the cluster
PeerRm(ctx context.Context, pid peer.ID) error
// Add imports files to the cluster from the given paths.
Add(ctx context.Context, paths []string, params api.AddParams, out chan<- api.AddedOutput) error
// AddMultiFile imports new files from a MultiFileReader.
AddMultiFile(ctx context.Context, multiFileR *files.MultiFileReader, params api.AddParams, out chan<- api.AddedOutput) error
// Pin tracks a Cid with the given replication factor and a name for
// human-friendliness.
Pin(ctx context.Context, ci api.Cid, opts api.PinOptions) (api.Pin, error)
// Unpin untracks a Cid from cluster.
Unpin(ctx context.Context, ci api.Cid) (api.Pin, error)
// PinPath resolves given path into a cid and performs the pin operation.
PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error)
// UnpinPath resolves given path into a cid and performs the unpin operation.
// It returns api.Pin of the given cid before it is unpinned.
UnpinPath(ctx context.Context, path string) (api.Pin, error)
// Allocations returns the consensus state listing all tracked items
// and the peers that should be pinning them.
Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error
// Allocation returns the current allocations for a given Cid.
Allocation(ctx context.Context, ci api.Cid) (api.Pin, error)
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
Status(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error)
// StatusCids status information for the requested CIDs.
StatusCids(ctx context.Context, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error
// StatusAll gathers Status() for all tracked items.
StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error
// Recover retriggers pin or unpin ipfs operations for a Cid in error
// state. If local is true, the operation is limited to the current
// peer, otherwise it happens on every cluster peer.
Recover(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error)
// RecoverAll triggers Recover() operations on all tracked items. If
// local is true, the operation is limited to the current peer.
// Otherwise, it happens everywhere.
RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error
// Alerts returns information health events in the cluster (expired
// metrics etc.).
Alerts(ctx context.Context) ([]api.Alert, error)
// Version returns the ipfs-cluster peer's version.
Version(context.Context) (api.Version, error)
// IPFS returns an instance of go-ipfs-api's Shell, pointing to a
// Cluster's IPFS proxy endpoint.
IPFS(context.Context) *shell.Shell
// GetConnectGraph returns an ipfs-cluster connection graph.
GetConnectGraph(context.Context) (api.ConnectGraph, error)
// Metrics returns a map with the latest metrics of matching name
// for the current cluster peers.
Metrics(ctx context.Context, name string) ([]api.Metric, error)
// MetricNames returns the list of metric types.
MetricNames(ctx context.Context) ([]string, error)
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
// returns collected CIDs. If local is true, it would garbage collect
// only on contacted peer, otherwise on all peers' IPFS daemons.
RepoGC(ctx context.Context, local bool) (api.GlobalRepoGC, error)
}
// Config allows to configure the parameters to connect
// to the ipfs-cluster REST API.
type Config struct {
// Enable SSL support. Only valid without APIAddr.
SSL bool
// Skip certificate verification (insecure)
NoVerifyCert bool
// Username and password for basic authentication
Username string
Password string
// The ipfs-cluster REST API endpoint in multiaddress form
// (takes precedence over host:port). It this address contains
// an /ipfs/, /p2p/ or /dnsaddr, the API will be contacted
// through a libp2p tunnel, thus getting encryption for
// free. Using the libp2p tunnel will ignore any configurations.
APIAddr ma.Multiaddr
// REST API endpoint host and port. Only valid without
// APIAddr.
Host string
Port string
// If APIAddr is provided, and the peer uses private networks (pnet),
// then we need to provide the key. If the peer is the cluster peer,
// this corresponds to the cluster secret.
ProtectorKey pnet.PSK
// ProxyAddr is used to obtain a go-ipfs-api Shell instance pointing
// to the ipfs proxy endpoint of ipfs-cluster. If empty, the location
// will be guessed from one of APIAddr/Host,
// and the port used will be ipfs-cluster's proxy default port (9095)
ProxyAddr ma.Multiaddr
// Define timeout for network operations
Timeout time.Duration
// Specifies if we attempt to re-use connections to the same
// hosts.
DisableKeepAlives bool
// LogLevel defines the verbosity of the logging facility
LogLevel string
}
// AsTemplateFor creates client configs from resolved multiaddresses
func (c *Config) AsTemplateFor(addrs []ma.Multiaddr) []*Config {
var cfgs []*Config
for _, addr := range addrs {
cfg := *c
cfg.APIAddr = addr
cfgs = append(cfgs, &cfg)
}
return cfgs
}
// AsTemplateForResolvedAddress creates client configs from a multiaddress
func (c *Config) AsTemplateForResolvedAddress(ctx context.Context, addr ma.Multiaddr) ([]*Config, error) {
resolvedAddrs, err := resolveAddr(ctx, addr)
if err != nil {
return nil, err
}
return c.AsTemplateFor(resolvedAddrs), nil
}
// DefaultClient provides methods to interact with the ipfs-cluster API. Use
// NewDefaultClient() to create one.
type defaultClient struct {
ctx context.Context
cancel context.CancelFunc
config *Config
transport *http.Transport
net string
hostname string
client *http.Client
p2p host.Host
}
// NewDefaultClient initializes a client given a Config.
func NewDefaultClient(cfg *Config) (Client, error) {
ctx, cancel := context.WithCancel(context.Background())
client := &defaultClient{
ctx: ctx,
cancel: cancel,
config: cfg,
}
if client.config.Port == "" {
client.config.Port = fmt.Sprintf("%d", DefaultPort)
}
err := client.setupAPIAddr()
if err != nil {
return nil, err
}
err = client.resolveAPIAddr()
if err != nil {
return nil, err
}
err = client.setupHTTPClient()
if err != nil {
return nil, err
}
err = client.setupHostname()
if err != nil {
return nil, err
}
err = client.setupProxy()
if err != nil {
return nil, err
}
if lvl := cfg.LogLevel; lvl != "" {
logging.SetLogLevel(loggingFacility, lvl)
} else {
logging.SetLogLevel(loggingFacility, DefaultLogLevel)
}
return client, nil
}
func (c *defaultClient) setupAPIAddr() error {
if c.config.APIAddr != nil {
return nil // already setup by user
}
var addr ma.Multiaddr
var err error
if c.config.Host == "" { //default
addr, err := ma.NewMultiaddr(DefaultAPIAddr)
c.config.APIAddr = addr
return err
}
var addrStr string
ip := net.ParseIP(c.config.Host)
switch {
case ip == nil:
addrStr = fmt.Sprintf("/dns4/%s/tcp/%s", c.config.Host, c.config.Port)
case ip.To4() != nil:
addrStr = fmt.Sprintf("/ip4/%s/tcp/%s", c.config.Host, c.config.Port)
default:
addrStr = fmt.Sprintf("/ip6/%s/tcp/%s", c.config.Host, c.config.Port)
}
addr, err = ma.NewMultiaddr(addrStr)
c.config.APIAddr = addr
return err
}
func (c *defaultClient) resolveAPIAddr() error {
// Only resolve libp2p addresses. For HTTP addresses, we let
// the default client handle any resolving. We extract the hostname
// in setupHostname()
if !IsPeerAddress(c.config.APIAddr) {
return nil
}
resolved, err := resolveAddr(c.ctx, c.config.APIAddr)
if err != nil {
return err
}
c.config.APIAddr = resolved[0]
return nil
}
func (c *defaultClient) setupHTTPClient() error {
var err error
switch {
case IsPeerAddress(c.config.APIAddr):
err = c.enableLibp2p()
case isUnixSocketAddress(c.config.APIAddr):
err = c.enableUnix()
case c.config.SSL:
err = c.enableTLS()
default:
c.defaultTransport()
}
if err != nil {
return err
}
c.client = &http.Client{
Transport: &ochttp.Transport{
Base: c.transport,
Propagation: &tracecontext.HTTPFormat{},
StartOptions: trace.StartOptions{SpanKind: trace.SpanKindClient},
FormatSpanName: func(req *http.Request) string { return req.Host + ":" + req.URL.Path + ":" + req.Method },
NewClientTrace: ochttp.NewSpanAnnotatingClientTrace,
},
Timeout: c.config.Timeout,
}
return nil
}
func (c *defaultClient) setupHostname() error {
// Extract host:port form APIAddr or use Host:Port.
// For libp2p, hostname is set in enableLibp2p()
// For unix sockets, hostname set in enableUnix()
if IsPeerAddress(c.config.APIAddr) || isUnixSocketAddress(c.config.APIAddr) {
return nil
}
_, hostname, err := manet.DialArgs(c.config.APIAddr)
if err != nil {
return err
}
c.hostname = hostname
return nil
}
func (c *defaultClient) setupProxy() error {
if c.config.ProxyAddr != nil {
return nil
}
// Guess location from APIAddr
port, err := ma.NewMultiaddr(fmt.Sprintf("/tcp/%d", DefaultProxyPort))
if err != nil {
return err
}
c.config.ProxyAddr = ma.Split(c.config.APIAddr)[0].Encapsulate(port)
return nil
}
// IPFS returns an instance of go-ipfs-api's Shell, pointing to the
// configured ProxyAddr (or to the default Cluster's IPFS proxy port).
// It re-uses this Client's HTTP client, thus will be constrained by
// the same configurations affecting it (timeouts...).
func (c *defaultClient) IPFS(ctx context.Context) *shell.Shell {
return shell.NewShellWithClient(c.config.ProxyAddr.String(), c.client)
}
// IsPeerAddress detects if the given multiaddress identifies a libp2p peer,
// either because it has the /p2p/ protocol or because it uses /dnsaddr/
func IsPeerAddress(addr ma.Multiaddr) bool {
if addr == nil {
return false
}
pid, err := addr.ValueForProtocol(ma.P_P2P)
dnsaddr, err2 := addr.ValueForProtocol(ma.P_DNSADDR)
return (pid != "" && err == nil) || (dnsaddr != "" && err2 == nil)
}
// isUnixSocketAddress returns if the given address corresponds to a
// unix socket.
func isUnixSocketAddress(addr ma.Multiaddr) bool {
if addr == nil {
return false
}
value, err := addr.ValueForProtocol(ma.P_UNIX)
return (value != "" && err == nil)
}
// resolve addr
func resolveAddr(ctx context.Context, addr ma.Multiaddr) ([]ma.Multiaddr, error) {
resolveCtx, cancel := context.WithTimeout(ctx, ResolveTimeout)
defer cancel()
resolved, err := madns.Resolve(resolveCtx, addr)
if err != nil {
return nil, err
}
if len(resolved) == 0 {
return nil, fmt.Errorf("resolving %s returned 0 results", addr)
}
return resolved, nil
}

View file

@ -1,306 +0,0 @@
package client
import (
"context"
"fmt"
"strings"
"testing"
"github.com/ipfs-cluster/ipfs-cluster/api/rest"
"github.com/ipfs-cluster/ipfs-cluster/test"
libp2p "github.com/libp2p/go-libp2p"
pnet "github.com/libp2p/go-libp2p/core/pnet"
tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
ma "github.com/multiformats/go-multiaddr"
)
func testAPI(t *testing.T) *rest.API {
ctx := context.Background()
//logging.SetDebugLogging()
apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfg := rest.NewConfig()
cfg.Default()
cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr}
secret := make(pnet.PSK, 32)
h, err := libp2p.New(
libp2p.ListenAddrs(apiMAddr),
libp2p.PrivateNetwork(secret),
libp2p.NoTransports,
libp2p.Transport(tcp.NewTCPTransport),
)
if err != nil {
t.Fatal(err)
}
rest, err := rest.NewAPIWithHost(ctx, cfg, h)
if err != nil {
t.Fatal("should be able to create a new Api: ", err)
}
rest.SetClient(test.NewMockRPCClient(t))
return rest
}
func shutdown(a *rest.API) {
ctx := context.Background()
a.Shutdown(ctx)
a.Host().Close()
}
func apiMAddr(a *rest.API) ma.Multiaddr {
listen, _ := a.HTTPAddresses()
hostPort := strings.Split(listen[0], ":")
addr, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%s", hostPort[1]))
return addr
}
func peerMAddr(a *rest.API) ma.Multiaddr {
ipfsAddr, _ := ma.NewMultiaddr(fmt.Sprintf("/p2p/%s", a.Host().ID().String()))
for _, a := range a.Host().Addrs() {
if _, err := a.ValueForProtocol(ma.P_IP4); err == nil {
return a.Encapsulate(ipfsAddr)
}
}
return nil
}
func testClientHTTP(t *testing.T, api *rest.API) *defaultClient {
cfg := &Config{
APIAddr: apiMAddr(api),
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
return c.(*defaultClient)
}
func testClientLibp2p(t *testing.T, api *rest.API) *defaultClient {
cfg := &Config{
APIAddr: peerMAddr(api),
ProtectorKey: make([]byte, 32),
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
return c.(*defaultClient)
}
func TestNewDefaultClient(t *testing.T) {
api := testAPI(t)
defer shutdown(api)
c := testClientHTTP(t, api)
if c.p2p != nil {
t.Error("should not use a libp2p host")
}
c = testClientLibp2p(t, api)
if c.p2p == nil {
t.Error("expected a libp2p host")
}
}
func TestDefaultAddress(t *testing.T) {
cfg := &Config{
APIAddr: nil,
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
if dc.hostname != "127.0.0.1:9094" {
t.Error("default should be used")
}
if dc.config.ProxyAddr == nil || dc.config.ProxyAddr.String() != "/ip4/127.0.0.1/tcp/9095" {
t.Error("proxy address was not guessed correctly")
}
}
func TestMultiaddressPrecedence(t *testing.T) {
addr, _ := ma.NewMultiaddr("/ip4/1.2.3.4/tcp/1234")
cfg := &Config{
APIAddr: addr,
Host: "localhost",
Port: "9094",
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
if dc.hostname != "1.2.3.4:1234" {
t.Error("APIAddr should be used")
}
if dc.config.ProxyAddr == nil || dc.config.ProxyAddr.String() != "/ip4/1.2.3.4/tcp/9095" {
t.Error("proxy address was not guessed correctly")
}
}
func TestHostPort(t *testing.T) {
type testcase struct {
host string
port string
expectedHostname string
expectedProxyAddr string
}
testcases := []testcase{
{
host: "3.3.1.1",
port: "9094",
expectedHostname: "3.3.1.1:9094",
expectedProxyAddr: "/ip4/3.3.1.1/tcp/9095",
},
{
host: "ipfs.io",
port: "9094",
expectedHostname: "ipfs.io:9094",
expectedProxyAddr: "/dns4/ipfs.io/tcp/9095",
},
{
host: "2001:db8::1",
port: "9094",
expectedHostname: "[2001:db8::1]:9094",
expectedProxyAddr: "/ip6/2001:db8::1/tcp/9095",
},
}
for _, tc := range testcases {
cfg := &Config{
APIAddr: nil,
Host: tc.host,
Port: tc.port,
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
if dc.hostname != tc.expectedHostname {
t.Error("Host Port should be used")
}
if paddr := dc.config.ProxyAddr; paddr == nil || paddr.String() != tc.expectedProxyAddr {
t.Error("proxy address was not guessed correctly: ", paddr)
}
}
}
func TestDNSMultiaddress(t *testing.T) {
addr2, _ := ma.NewMultiaddr("/dns4/localhost/tcp/1234")
cfg := &Config{
APIAddr: addr2,
Host: "localhost",
Port: "9094",
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
if dc.hostname != "localhost:1234" {
t.Error("address should not be resolved")
}
if paddr := dc.config.ProxyAddr; paddr == nil || paddr.String() != "/dns4/localhost/tcp/9095" {
t.Error("proxy address was not guessed correctly: ", paddr)
}
}
func TestPeerAddress(t *testing.T) {
peerAddr, _ := ma.NewMultiaddr("/dns4/localhost/tcp/1234/p2p/QmP7R7gWEnruNePxmCa9GBa4VmUNexLVnb1v47R8Gyo3LP")
cfg := &Config{
APIAddr: peerAddr,
Host: "localhost",
Port: "9094",
DisableKeepAlives: true,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
if dc.hostname != "QmP7R7gWEnruNePxmCa9GBa4VmUNexLVnb1v47R8Gyo3LP" || dc.net != "libp2p" {
t.Error("bad resolved address")
}
if dc.config.ProxyAddr == nil || dc.config.ProxyAddr.String() != "/ip4/127.0.0.1/tcp/9095" {
t.Error("proxy address was not guessed correctly")
}
}
func TestProxyAddress(t *testing.T) {
addr, _ := ma.NewMultiaddr("/ip4/1.3.4.5/tcp/1234")
cfg := &Config{
DisableKeepAlives: true,
ProxyAddr: addr,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
if dc.config.ProxyAddr.String() != addr.String() {
t.Error("proxy address was replaced")
}
}
func TestIPFS(t *testing.T) {
ctx := context.Background()
ipfsMock := test.NewIpfsMock(t)
defer ipfsMock.Close()
proxyAddr, err := ma.NewMultiaddr(
fmt.Sprintf("/ip4/%s/tcp/%d", ipfsMock.Addr, ipfsMock.Port),
)
if err != nil {
t.Fatal(err)
}
cfg := &Config{
DisableKeepAlives: true,
ProxyAddr: proxyAddr,
}
c, err := NewDefaultClient(cfg)
if err != nil {
t.Fatal(err)
}
dc := c.(*defaultClient)
ipfs := dc.IPFS(ctx)
err = ipfs.Pin(test.Cid1.String())
if err != nil {
t.Error(err)
}
pins, err := ipfs.Pins()
if err != nil {
t.Error(err)
}
pin, ok := pins[test.Cid1.String()]
if !ok {
t.Error("pin should be in pin list")
}
if pin.Type != "recursive" {
t.Error("pin type unexpected")
}
}

View file

@ -1,555 +0,0 @@
package client
import (
"context"
"sync/atomic"
shell "github.com/ipfs/go-ipfs-api"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipfs-cluster/ipfs-cluster/api"
peer "github.com/libp2p/go-libp2p/core/peer"
)
// loadBalancingClient is a client to interact with IPFS Cluster APIs
// that balances the load by distributing requests among peers.
type loadBalancingClient struct {
strategy LBStrategy
retries int
}
// LBStrategy is a strategy to load balance requests among clients.
type LBStrategy interface {
Next(count int) Client
SetClients(clients []Client)
}
// RoundRobin is a load balancing strategy that would use clients in a sequence
// for all methods, throughout the lifetime of the lb client.
type RoundRobin struct {
clients []Client
counter uint32
length uint32
}
// Next return the next client to be used.
func (r *RoundRobin) Next(count int) Client {
i := atomic.AddUint32(&r.counter, 1) % r.length
return r.clients[i]
}
// SetClients sets a list of clients for this strategy.
func (r *RoundRobin) SetClients(cl []Client) {
r.clients = cl
r.length = uint32(len(cl))
}
// Failover is a load balancing strategy that would try the first cluster peer
// first. If the first call fails it would try other clients for that call in a
// round robin fashion.
type Failover struct {
clients []Client
}
// Next returns the next client to be used.
func (f *Failover) Next(count int) Client {
return f.clients[count%len(f.clients)]
}
// SetClients sets a list of clients for this strategy.
func (f *Failover) SetClients(cl []Client) {
f.clients = cl
}
// NewLBClient returns a new client that would load balance requests among
// clients.
func NewLBClient(strategy LBStrategy, cfgs []*Config, retries int) (Client, error) {
var clients []Client
for _, cfg := range cfgs {
defaultClient, err := NewDefaultClient(cfg)
if err != nil {
return nil, err
}
clients = append(clients, defaultClient)
}
strategy.SetClients(clients)
return &loadBalancingClient{strategy: strategy, retries: retries}, nil
}
// retry tries the request until it is successful or tries `lc.retries` times.
func (lc *loadBalancingClient) retry(count int, call func(Client) error) error {
logger.Debugf("retrying %d times", count+1)
err := call(lc.strategy.Next(count))
count++
// successful request
if err == nil {
return nil
}
// It is a safety check. This error should never occur.
// All errors returned by client methods are of type `api.Error`.
apiErr, ok := err.(api.Error)
if !ok {
logger.Error("could not cast error into api.Error")
return err
}
if apiErr.Code != 0 {
return err
}
if count == lc.retries {
logger.Errorf("reached maximum number of retries without success, retries: %d", lc.retries)
return err
}
return lc.retry(count, call)
}
// ID returns information about the cluster Peer.
func (lc *loadBalancingClient) ID(ctx context.Context) (api.ID, error) {
var id api.ID
call := func(c Client) error {
var err error
id, err = c.ID(ctx)
return err
}
err := lc.retry(0, call)
return id, err
}
// Peers requests ID information for all cluster peers.
func (lc *loadBalancingClient) Peers(ctx context.Context, out chan<- api.ID) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.ID, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.Peers(ctx, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
// retries call as needed.
err := lc.retry(0, call)
close(out)
return err
}
// PeerAdd adds a new peer to the cluster.
func (lc *loadBalancingClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
var id api.ID
call := func(c Client) error {
var err error
id, err = c.PeerAdd(ctx, pid)
return err
}
err := lc.retry(0, call)
return id, err
}
// PeerRm removes a current peer from the cluster.
func (lc *loadBalancingClient) PeerRm(ctx context.Context, id peer.ID) error {
call := func(c Client) error {
return c.PeerRm(ctx, id)
}
return lc.retry(0, call)
}
// Pin tracks a Cid with the given replication factor and a name for
// human-friendliness.
func (lc *loadBalancingClient) Pin(ctx context.Context, ci api.Cid, opts api.PinOptions) (api.Pin, error) {
var pin api.Pin
call := func(c Client) error {
var err error
pin, err = c.Pin(ctx, ci, opts)
return err
}
err := lc.retry(0, call)
return pin, err
}
// Unpin untracks a Cid from cluster.
func (lc *loadBalancingClient) Unpin(ctx context.Context, ci api.Cid) (api.Pin, error) {
var pin api.Pin
call := func(c Client) error {
var err error
pin, err = c.Unpin(ctx, ci)
return err
}
err := lc.retry(0, call)
return pin, err
}
// PinPath allows to pin an element by the given IPFS path.
func (lc *loadBalancingClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) {
var pin api.Pin
call := func(c Client) error {
var err error
pin, err = c.PinPath(ctx, path, opts)
return err
}
err := lc.retry(0, call)
return pin, err
}
// UnpinPath allows to unpin an item by providing its IPFS path.
// It returns the unpinned api.Pin information of the resolved Cid.
func (lc *loadBalancingClient) UnpinPath(ctx context.Context, p string) (api.Pin, error) {
var pin api.Pin
call := func(c Client) error {
var err error
pin, err = c.UnpinPath(ctx, p)
return err
}
err := lc.retry(0, call)
return pin, err
}
// Allocations returns the consensus state listing all tracked items and
// the peers that should be pinning them.
func (lc *loadBalancingClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.Pin, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.Allocations(ctx, filter, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
err := lc.retry(0, call)
close(out)
return err
}
// Allocation returns the current allocations for a given Cid.
func (lc *loadBalancingClient) Allocation(ctx context.Context, ci api.Cid) (api.Pin, error) {
var pin api.Pin
call := func(c Client) error {
var err error
pin, err = c.Allocation(ctx, ci)
return err
}
err := lc.retry(0, call)
return pin, err
}
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
func (lc *loadBalancingClient) Status(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) {
var pinInfo api.GlobalPinInfo
call := func(c Client) error {
var err error
pinInfo, err = c.Status(ctx, ci, local)
return err
}
err := lc.retry(0, call)
return pinInfo, err
}
// StatusCids returns Status() information for the given Cids. If local is
// true, the information affects only the current peer, otherwise the
// information is fetched from all cluster peers.
func (lc *loadBalancingClient) StatusCids(ctx context.Context, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.GlobalPinInfo, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.StatusCids(ctx, cids, local, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
err := lc.retry(0, call)
close(out)
return err
}
// StatusAll gathers Status() for all tracked items. If a filter is
// provided, only entries matching the given filter statuses
// will be returned. A filter can be built by merging TrackerStatuses with
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
// api.TrackerStatusUndefined), means all.
func (lc *loadBalancingClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.GlobalPinInfo, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.StatusAll(ctx, filter, local, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
err := lc.retry(0, call)
close(out)
return err
}
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
// If local is true, the operation is limited to the current peer, otherwise
// it happens on every cluster peer.
func (lc *loadBalancingClient) Recover(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) {
var pinInfo api.GlobalPinInfo
call := func(c Client) error {
var err error
pinInfo, err = c.Recover(ctx, ci, local)
return err
}
err := lc.retry(0, call)
return pinInfo, err
}
// RecoverAll triggers Recover() operations on all tracked items. If local is
// true, the operation is limited to the current peer. Otherwise, it happens
// everywhere.
func (lc *loadBalancingClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.GlobalPinInfo, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.RecoverAll(ctx, local, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
err := lc.retry(0, call)
close(out)
return err
}
// Alerts returns things that are wrong with cluster.
func (lc *loadBalancingClient) Alerts(ctx context.Context) ([]api.Alert, error) {
var alerts []api.Alert
call := func(c Client) error {
var err error
alerts, err = c.Alerts(ctx)
return err
}
err := lc.retry(0, call)
return alerts, err
}
// Version returns the ipfs-cluster peer's version.
func (lc *loadBalancingClient) Version(ctx context.Context) (api.Version, error) {
var v api.Version
call := func(c Client) error {
var err error
v, err = c.Version(ctx)
return err
}
err := lc.retry(0, call)
return v, err
}
// GetConnectGraph returns an ipfs-cluster connection graph.
// The serialized version, strings instead of pids, is returned.
func (lc *loadBalancingClient) GetConnectGraph(ctx context.Context) (api.ConnectGraph, error) {
var graph api.ConnectGraph
call := func(c Client) error {
var err error
graph, err = c.GetConnectGraph(ctx)
return err
}
err := lc.retry(0, call)
return graph, err
}
// Metrics returns a map with the latest valid metrics of the given name
// for the current cluster peers.
func (lc *loadBalancingClient) Metrics(ctx context.Context, name string) ([]api.Metric, error) {
var metrics []api.Metric
call := func(c Client) error {
var err error
metrics, err = c.Metrics(ctx, name)
return err
}
err := lc.retry(0, call)
return metrics, err
}
// MetricNames returns the list of metric types.
func (lc *loadBalancingClient) MetricNames(ctx context.Context) ([]string, error) {
var metricNames []string
call := func(c Client) error {
var err error
metricNames, err = c.MetricNames(ctx)
return err
}
err := lc.retry(0, call)
return metricNames, err
}
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
// returns collected CIDs. If local is true, it would garbage collect
// only on contacted peer, otherwise on all peers' IPFS daemons.
func (lc *loadBalancingClient) RepoGC(ctx context.Context, local bool) (api.GlobalRepoGC, error) {
var repoGC api.GlobalRepoGC
call := func(c Client) error {
var err error
repoGC, err = c.RepoGC(ctx, local)
return err
}
err := lc.retry(0, call)
return repoGC, err
}
// Add imports files to the cluster from the given paths. A path can
// either be a local filesystem location or an web url (http:// or https://).
// In the latter case, the destination will be downloaded with a GET request.
// The AddParams allow to control different options, like enabling the
// sharding the resulting DAG across the IPFS daemons of multiple cluster
// peers. The output channel will receive regular updates as the adding
// process progresses.
func (lc *loadBalancingClient) Add(
ctx context.Context,
paths []string,
params api.AddParams,
out chan<- api.AddedOutput,
) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.AddedOutput, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.Add(ctx, paths, params, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
err := lc.retry(0, call)
close(out)
return err
}
// AddMultiFile imports new files from a MultiFileReader. See Add().
func (lc *loadBalancingClient) AddMultiFile(
ctx context.Context,
multiFileR *files.MultiFileReader,
params api.AddParams,
out chan<- api.AddedOutput,
) error {
call := func(c Client) error {
done := make(chan struct{})
cout := make(chan api.AddedOutput, cap(out))
go func() {
for o := range cout {
out <- o
}
done <- struct{}{}
}()
// this blocks until done
err := c.AddMultiFile(ctx, multiFileR, params, cout)
// wait for cout to be closed
select {
case <-ctx.Done():
case <-done:
}
return err
}
err := lc.retry(0, call)
close(out)
return err
}
// IPFS returns an instance of go-ipfs-api's Shell, pointing to the
// configured ProxyAddr (or to the default Cluster's IPFS proxy port).
// It re-uses this Client's HTTP client, thus will be constrained by
// the same configurations affecting it (timeouts...).
func (lc *loadBalancingClient) IPFS(ctx context.Context) *shell.Shell {
var s *shell.Shell
call := func(c Client) error {
s = c.IPFS(ctx)
return nil
}
lc.retry(0, call)
return s
}

View file

@ -1,107 +0,0 @@
package client
import (
"context"
"fmt"
"sync"
"testing"
"github.com/ipfs-cluster/ipfs-cluster/api"
ma "github.com/multiformats/go-multiaddr"
)
func TestFailoverConcurrently(t *testing.T) {
// Create a load balancing client with 5 empty clients and 5 clients with APIs
// say we want to retry the request for at most 5 times
cfgs := make([]*Config, 10)
// 5 clients with an invalid api address
for i := 0; i < 5; i++ {
maddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
cfgs[i] = &Config{
APIAddr: maddr,
DisableKeepAlives: true,
}
}
// 5 clients with APIs
for i := 5; i < 10; i++ {
cfgs[i] = &Config{
APIAddr: apiMAddr(testAPI(t)),
DisableKeepAlives: true,
}
}
// Run many requests at the same time
// With Failover strategy, it would go through first 5 empty clients
// and then 6th working client. Thus, all requests should always succeed.
testRunManyRequestsConcurrently(t, cfgs, &Failover{}, 200, 6, true)
// First 5 clients are empty. Thus, all requests should fail.
testRunManyRequestsConcurrently(t, cfgs, &Failover{}, 200, 5, false)
}
type dummyClient struct {
defaultClient
i int
}
// ID returns dummy client's serial number.
func (d *dummyClient) ID(ctx context.Context) (api.ID, error) {
return api.ID{
Peername: fmt.Sprintf("%d", d.i),
}, nil
}
func TestRoundRobin(t *testing.T) {
var clients []Client
// number of clients
n := 5
// create n dummy clients
for i := 0; i < n; i++ {
c := &dummyClient{
i: i,
}
clients = append(clients, c)
}
roundRobin := loadBalancingClient{
strategy: &RoundRobin{
clients: clients,
length: uint32(len(clients)),
},
}
// clients should be used in the sequence 1, 2,.., 4, 0.
for i := 0; i < n; i++ {
id, _ := roundRobin.ID(context.Background())
if id.Peername != fmt.Sprintf("%d", (i+1)%n) {
t.Errorf("clients are not being tried in sequence, expected client: %d, but found: %s", i, id.Peername)
}
}
}
func testRunManyRequestsConcurrently(t *testing.T, cfgs []*Config, strategy LBStrategy, requests int, retries int, pass bool) {
c, err := NewLBClient(strategy, cfgs, retries)
if err != nil {
t.Fatal(err)
}
var wg sync.WaitGroup
for i := 0; i < requests; i++ {
wg.Add(1)
go func() {
defer wg.Done()
ctx := context.Background()
_, err := c.ID(ctx)
if err != nil && pass {
t.Error(err)
}
if err == nil && !pass {
t.Error("request should fail with connection refusal")
}
}()
}
wg.Wait()
}

View file

@ -1,699 +0,0 @@
package client
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
files "github.com/ipfs/go-ipfs-files"
gopath "github.com/ipfs/go-path"
peer "github.com/libp2p/go-libp2p/core/peer"
"go.opencensus.io/trace"
)
// ID returns information about the cluster Peer.
func (c *defaultClient) ID(ctx context.Context) (api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/ID")
defer span.End()
var id api.ID
err := c.do(ctx, "GET", "/id", nil, nil, &id)
return id, err
}
// Peers requests ID information for all cluster peers.
func (c *defaultClient) Peers(ctx context.Context, out chan<- api.ID) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/Peers")
defer span.End()
handler := func(dec *json.Decoder) error {
var obj api.ID
err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
return c.doStream(ctx, "GET", "/peers", nil, nil, handler)
}
type peerAddBody struct {
PeerID string `json:"peer_id"`
}
// PeerAdd adds a new peer to the cluster.
func (c *defaultClient) PeerAdd(ctx context.Context, pid peer.ID) (api.ID, error) {
ctx, span := trace.StartSpan(ctx, "client/PeerAdd")
defer span.End()
body := peerAddBody{pid.String()}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(body)
var id api.ID
err := c.do(ctx, "POST", "/peers", nil, &buf, &id)
return id, err
}
// PeerRm removes a current peer from the cluster
func (c *defaultClient) PeerRm(ctx context.Context, id peer.ID) error {
ctx, span := trace.StartSpan(ctx, "client/PeerRm")
defer span.End()
return c.do(ctx, "DELETE", fmt.Sprintf("/peers/%s", id.Pretty()), nil, nil, nil)
}
// Pin tracks a Cid with the given replication factor and a name for
// human-friendliness.
func (c *defaultClient) Pin(ctx context.Context, ci api.Cid, opts api.PinOptions) (api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Pin")
defer span.End()
query, err := opts.ToQuery()
if err != nil {
return api.Pin{}, err
}
var pin api.Pin
err = c.do(
ctx,
"POST",
fmt.Sprintf(
"/pins/%s?%s",
ci.String(),
query,
),
nil,
nil,
&pin,
)
return pin, err
}
// Unpin untracks a Cid from cluster.
func (c *defaultClient) Unpin(ctx context.Context, ci api.Cid) (api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Unpin")
defer span.End()
var pin api.Pin
err := c.do(ctx, "DELETE", fmt.Sprintf("/pins/%s", ci.String()), nil, nil, &pin)
return pin, err
}
// PinPath allows to pin an element by the given IPFS path.
func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOptions) (api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/PinPath")
defer span.End()
var pin api.Pin
ipfspath, err := gopath.ParsePath(path)
if err != nil {
return api.Pin{}, err
}
query, err := opts.ToQuery()
if err != nil {
return api.Pin{}, err
}
err = c.do(
ctx,
"POST",
fmt.Sprintf(
"/pins%s?%s",
ipfspath.String(),
query,
),
nil,
nil,
&pin,
)
return pin, err
}
// UnpinPath allows to unpin an item by providing its IPFS path.
// It returns the unpinned api.Pin information of the resolved Cid.
func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/UnpinPath")
defer span.End()
var pin api.Pin
ipfspath, err := gopath.ParsePath(p)
if err != nil {
return api.Pin{}, err
}
err = c.do(ctx, "DELETE", fmt.Sprintf("/pins%s", ipfspath.String()), nil, nil, &pin)
return pin, err
}
// Allocations returns the consensus state listing all tracked items and
// the peers that should be pinning them.
func (c *defaultClient) Allocations(ctx context.Context, filter api.PinType, out chan<- api.Pin) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/Allocations")
defer span.End()
types := []api.PinType{
api.DataType,
api.MetaType,
api.ClusterDAGType,
api.ShardType,
}
var strFilter []string
if filter == api.AllType {
strFilter = []string{"all"}
} else {
for _, t := range types {
if t&filter > 0 { // the filter includes this type
strFilter = append(strFilter, t.String())
}
}
}
handler := func(dec *json.Decoder) error {
var obj api.Pin
err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
f := url.QueryEscape(strings.Join(strFilter, ","))
return c.doStream(
ctx,
"GET",
fmt.Sprintf("/allocations?filter=%s", f),
nil,
nil,
handler)
}
// Allocation returns the current allocations for a given Cid.
func (c *defaultClient) Allocation(ctx context.Context, ci api.Cid) (api.Pin, error) {
ctx, span := trace.StartSpan(ctx, "client/Allocation")
defer span.End()
var pin api.Pin
err := c.do(ctx, "GET", fmt.Sprintf("/allocations/%s", ci.String()), nil, nil, &pin)
return pin, err
}
// Status returns the current ipfs state for a given Cid. If local is true,
// the information affects only the current peer, otherwise the information
// is fetched from all cluster peers.
func (c *defaultClient) Status(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Status")
defer span.End()
var gpi api.GlobalPinInfo
err := c.do(
ctx,
"GET",
fmt.Sprintf("/pins/%s?local=%t", ci.String(), local),
nil,
nil,
&gpi,
)
return gpi, err
}
// StatusCids returns Status() information for the given Cids. If local is
// true, the information affects only the current peer, otherwise the
// information is fetched from all cluster peers.
func (c *defaultClient) StatusCids(ctx context.Context, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error {
return c.statusAllWithCids(ctx, api.TrackerStatusUndefined, cids, local, out)
}
// StatusAll gathers Status() for all tracked items. If a filter is
// provided, only entries matching the given filter statuses
// will be returned. A filter can be built by merging TrackerStatuses with
// a bitwise OR operation (st1 | st2 | ...). A "0" filter value (or
// api.TrackerStatusUndefined), means all.
func (c *defaultClient) StatusAll(ctx context.Context, filter api.TrackerStatus, local bool, out chan<- api.GlobalPinInfo) error {
return c.statusAllWithCids(ctx, filter, nil, local, out)
}
func (c *defaultClient) statusAllWithCids(ctx context.Context, filter api.TrackerStatus, cids []api.Cid, local bool, out chan<- api.GlobalPinInfo) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/StatusAll")
defer span.End()
filterStr := ""
if filter != api.TrackerStatusUndefined { // undefined filter means "all"
filterStr = filter.String()
if filterStr == "" {
return errors.New("invalid filter value")
}
}
cidsStr := make([]string, len(cids))
for i, c := range cids {
cidsStr[i] = c.String()
}
handler := func(dec *json.Decoder) error {
var obj api.GlobalPinInfo
err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
return c.doStream(
ctx,
"GET",
fmt.Sprintf("/pins?local=%t&filter=%s&cids=%s",
local, url.QueryEscape(filterStr), strings.Join(cidsStr, ",")),
nil,
nil,
handler,
)
}
// Recover retriggers pin or unpin ipfs operations for a Cid in error state.
// If local is true, the operation is limited to the current peer, otherwise
// it happens on every cluster peer.
func (c *defaultClient) Recover(ctx context.Context, ci api.Cid, local bool) (api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/Recover")
defer span.End()
var gpi api.GlobalPinInfo
err := c.do(ctx, "POST", fmt.Sprintf("/pins/%s/recover?local=%t", ci.String(), local), nil, nil, &gpi)
return gpi, err
}
// RecoverAll triggers Recover() operations on all tracked items. If local is
// true, the operation is limited to the current peer. Otherwise, it happens
// everywhere.
func (c *defaultClient) RecoverAll(ctx context.Context, local bool, out chan<- api.GlobalPinInfo) error {
defer close(out)
ctx, span := trace.StartSpan(ctx, "client/RecoverAll")
defer span.End()
handler := func(dec *json.Decoder) error {
var obj api.GlobalPinInfo
err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
return c.doStream(
ctx,
"POST",
fmt.Sprintf("/pins/recover?local=%t", local),
nil,
nil,
handler)
}
// Alerts returns information health events in the cluster (expired metrics
// etc.).
func (c *defaultClient) Alerts(ctx context.Context) ([]api.Alert, error) {
ctx, span := trace.StartSpan(ctx, "client/Alert")
defer span.End()
var alerts []api.Alert
err := c.do(ctx, "GET", "/health/alerts", nil, nil, &alerts)
return alerts, err
}
// Version returns the ipfs-cluster peer's version.
func (c *defaultClient) Version(ctx context.Context) (api.Version, error) {
ctx, span := trace.StartSpan(ctx, "client/Version")
defer span.End()
var ver api.Version
err := c.do(ctx, "GET", "/version", nil, nil, &ver)
return ver, err
}
// GetConnectGraph returns an ipfs-cluster connection graph.
// The serialized version, strings instead of pids, is returned
func (c *defaultClient) GetConnectGraph(ctx context.Context) (api.ConnectGraph, error) {
ctx, span := trace.StartSpan(ctx, "client/GetConnectGraph")
defer span.End()
var graph api.ConnectGraph
err := c.do(ctx, "GET", "/health/graph", nil, nil, &graph)
return graph, err
}
// Metrics returns a map with the latest valid metrics of the given name
// for the current cluster peers.
func (c *defaultClient) Metrics(ctx context.Context, name string) ([]api.Metric, error) {
ctx, span := trace.StartSpan(ctx, "client/Metrics")
defer span.End()
if name == "" {
return nil, errors.New("bad metric name")
}
var metrics []api.Metric
err := c.do(ctx, "GET", fmt.Sprintf("/monitor/metrics/%s", name), nil, nil, &metrics)
return metrics, err
}
// MetricNames lists names of all metrics.
func (c *defaultClient) MetricNames(ctx context.Context) ([]string, error) {
ctx, span := trace.StartSpan(ctx, "client/MetricNames")
defer span.End()
var metricsNames []string
err := c.do(ctx, "GET", "/monitor/metrics", nil, nil, &metricsNames)
return metricsNames, err
}
// RepoGC runs garbage collection on IPFS daemons of cluster peers and
// returns collected CIDs. If local is true, it would garbage collect
// only on contacted peer, otherwise on all peers' IPFS daemons.
func (c *defaultClient) RepoGC(ctx context.Context, local bool) (api.GlobalRepoGC, error) {
ctx, span := trace.StartSpan(ctx, "client/RepoGC")
defer span.End()
var repoGC api.GlobalRepoGC
err := c.do(
ctx,
"POST",
fmt.Sprintf("/ipfs/gc?local=%t", local),
nil,
nil,
&repoGC,
)
return repoGC, err
}
// WaitFor is a utility function that allows for a caller to wait until a CID
// status target is reached (as given in StatusFilterParams).
// It returns the final status for that CID and an error, if there was one.
//
// WaitFor works by calling Status() repeatedly and checking that returned
// peers have transitioned to the target TrackerStatus. It immediately returns
// an error when the an error is among the statuses (and an empty
// GlobalPinInfo).
//
// A special case exists for TrackerStatusPinned targets: in this case,
// TrackerStatusRemote statuses are ignored, so WaitFor will return when
// all Statuses are Pinned or Remote by default.
//
// The Limit parameter allows to specify finer-grained control to, for
// example, only wait until a number of peers reaches a status.
func WaitFor(ctx context.Context, c Client, fp StatusFilterParams) (api.GlobalPinInfo, error) {
ctx, span := trace.StartSpan(ctx, "client/WaitFor")
defer span.End()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
sf := newStatusFilter()
go sf.pollStatus(ctx, c, fp)
go sf.filter(ctx, fp)
var status api.GlobalPinInfo
for {
select {
case <-ctx.Done():
return status, ctx.Err()
case err := <-sf.Err:
return status, err
case st, ok := <-sf.Out:
if !ok { // channel closed
return status, nil
}
status = st
}
}
}
// StatusFilterParams contains the parameters required
// to filter a stream of status results.
type StatusFilterParams struct {
Cid api.Cid
Local bool // query status from the local peer only
Target api.TrackerStatus
Limit int // wait for N peers reaching status. 0 == all
CheckFreq time.Duration
}
type statusFilter struct {
In, Out chan api.GlobalPinInfo
Done chan struct{}
Err chan error
}
func newStatusFilter() *statusFilter {
return &statusFilter{
In: make(chan api.GlobalPinInfo),
Out: make(chan api.GlobalPinInfo),
Done: make(chan struct{}),
Err: make(chan error),
}
}
func (sf *statusFilter) filter(ctx context.Context, fp StatusFilterParams) {
defer close(sf.Done)
defer close(sf.Out)
for {
select {
case <-ctx.Done():
sf.Err <- ctx.Err()
return
case gblPinInfo, more := <-sf.In:
if !more {
return
}
ok, err := statusReached(fp.Target, gblPinInfo, fp.Limit)
if err != nil {
sf.Err <- err
return
}
sf.Out <- gblPinInfo
if !ok {
continue
}
return
}
}
}
func (sf *statusFilter) pollStatus(ctx context.Context, c Client, fp StatusFilterParams) {
ticker := time.NewTicker(fp.CheckFreq)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
sf.Err <- ctx.Err()
return
case <-ticker.C:
gblPinInfo, err := c.Status(ctx, fp.Cid, fp.Local)
if err != nil {
sf.Err <- err
return
}
logger.Debugf("pollStatus: status: %#v", gblPinInfo)
sf.In <- gblPinInfo
case <-sf.Done:
close(sf.In)
return
}
}
}
func statusReached(target api.TrackerStatus, gblPinInfo api.GlobalPinInfo, limit int) (bool, error) {
// Specific case: return error if there are errors
for _, pinInfo := range gblPinInfo.PeerMap {
switch pinInfo.Status {
case api.TrackerStatusUndefined,
api.TrackerStatusClusterError,
api.TrackerStatusPinError,
api.TrackerStatusUnpinError:
return false, fmt.Errorf("error has occurred while attempting to reach status: %s", target.String())
}
}
// Specific case: when limit it set, just count how many targets we
// reached.
if limit > 0 {
total := 0
for _, pinInfo := range gblPinInfo.PeerMap {
if pinInfo.Status == target {
total++
}
}
return total >= limit, nil
}
// General case: all statuses should be the target.
// Specific case: when looking for Pinned, ignore status remote.
for _, pinInfo := range gblPinInfo.PeerMap {
if pinInfo.Status == api.TrackerStatusRemote && target == api.TrackerStatusPinned {
continue
}
if pinInfo.Status == target {
continue
}
return false, nil
}
// All statuses are the target, as otherwise we would have returned
// false.
return true, nil
}
// logic drawn from go-ipfs-cmds/cli/parse.go: appendFile
func makeSerialFile(fpath string, params api.AddParams) (string, files.Node, error) {
if fpath == "." {
cwd, err := os.Getwd()
if err != nil {
return "", nil, err
}
cwd, err = filepath.EvalSymlinks(cwd)
if err != nil {
return "", nil, err
}
fpath = cwd
}
fpath = filepath.ToSlash(filepath.Clean(fpath))
stat, err := os.Lstat(fpath)
if err != nil {
return "", nil, err
}
if stat.IsDir() {
if !params.Recursive {
return "", nil, fmt.Errorf("%s is a directory, but Recursive option is not set", fpath)
}
}
sf, err := files.NewSerialFile(fpath, params.Hidden, stat)
return path.Base(fpath), sf, err
}
// Add imports files to the cluster from the given paths. A path can
// either be a local filesystem location or an web url (http:// or https://).
// In the latter case, the destination will be downloaded with a GET request.
// The AddParams allow to control different options, like enabling the
// sharding the resulting DAG across the IPFS daemons of multiple cluster
// peers. The output channel will receive regular updates as the adding
// process progresses.
func (c *defaultClient) Add(
ctx context.Context,
paths []string,
params api.AddParams,
out chan<- api.AddedOutput,
) error {
ctx, span := trace.StartSpan(ctx, "client/Add")
defer span.End()
addFiles := make([]files.DirEntry, len(paths))
for i, p := range paths {
u, err := url.Parse(p)
if err != nil {
close(out)
return fmt.Errorf("error parsing path: %s", err)
}
var name string
var addFile files.Node
if strings.HasPrefix(u.Scheme, "http") {
addFile = files.NewWebFile(u)
name = path.Base(u.Path)
} else {
if params.NoCopy {
close(out)
return fmt.Errorf("nocopy option is only valid for URLs")
}
name, addFile, err = makeSerialFile(p, params)
if err != nil {
close(out)
return err
}
}
addFiles[i] = files.FileEntry(name, addFile)
}
sliceFile := files.NewSliceDirectory(addFiles)
// If `form` is set to true, the multipart data will have
// a Content-Type of 'multipart/form-data', if `form` is false,
// the Content-Type will be 'multipart/mixed'.
return c.AddMultiFile(ctx, files.NewMultiFileReader(sliceFile, true), params, out)
}
// AddMultiFile imports new files from a MultiFileReader. See Add().
func (c *defaultClient) AddMultiFile(
ctx context.Context,
multiFileR *files.MultiFileReader,
params api.AddParams,
out chan<- api.AddedOutput,
) error {
ctx, span := trace.StartSpan(ctx, "client/AddMultiFile")
defer span.End()
defer close(out)
headers := make(map[string]string)
headers["Content-Type"] = "multipart/form-data; boundary=" + multiFileR.Boundary()
// This method must run with StreamChannels set.
params.StreamChannels = true
queryStr, err := params.ToQueryString()
if err != nil {
return err
}
// our handler decodes an AddedOutput and puts it
// in the out channel.
handler := func(dec *json.Decoder) error {
if out == nil {
return nil
}
var obj api.AddedOutput
err := dec.Decode(&obj)
if err != nil {
return err
}
out <- obj
return nil
}
err = c.doStream(ctx,
"POST",
"/add?"+queryStr,
headers,
multiFileR,
handler,
)
return err
}

View file

@ -1,905 +0,0 @@
package client
import (
"context"
"errors"
"sync"
"testing"
"time"
types "github.com/ipfs-cluster/ipfs-cluster/api"
rest "github.com/ipfs-cluster/ipfs-cluster/api/rest"
test "github.com/ipfs-cluster/ipfs-cluster/test"
rpc "github.com/libp2p/go-libp2p-gorpc"
peer "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
func testClients(t *testing.T, api *rest.API, f func(*testing.T, Client)) {
t.Run("in-parallel", func(t *testing.T) {
t.Run("libp2p", func(t *testing.T) {
t.Parallel()
f(t, testClientLibp2p(t, api))
})
t.Run("http", func(t *testing.T) {
t.Parallel()
f(t, testClientHTTP(t, api))
})
})
}
func TestVersion(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
v, err := c.Version(ctx)
if err != nil || v.Version == "" {
t.Logf("%+v", v)
t.Log(err)
t.Error("expected something in version")
}
}
testClients(t, api, testF)
}
func TestID(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
id, err := c.ID(ctx)
if err != nil {
t.Fatal(err)
}
if id.ID == "" {
t.Error("bad id")
}
}
testClients(t, api, testF)
}
func TestPeers(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
out := make(chan types.ID, 10)
err := c.Peers(ctx, out)
if err != nil {
t.Fatal(err)
}
if len(out) == 0 {
t.Error("expected some peers")
}
}
testClients(t, api, testF)
}
func TestPeersWithError(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
addr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/44444")
var _ = c
c, _ = NewDefaultClient(&Config{APIAddr: addr, DisableKeepAlives: true})
out := make(chan types.ID, 10)
err := c.Peers(ctx, out)
if err == nil {
t.Fatal("expected error")
}
if len(out) > 0 {
t.Fatal("expected no ids")
}
}
testClients(t, api, testF)
}
func TestPeerAdd(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
id, err := c.PeerAdd(ctx, test.PeerID1)
if err != nil {
t.Fatal(err)
}
if id.ID != test.PeerID1 {
t.Error("bad peer")
}
}
testClients(t, api, testF)
}
func TestPeerRm(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
err := c.PeerRm(ctx, test.PeerID1)
if err != nil {
t.Fatal(err)
}
}
testClients(t, api, testF)
}
func TestPin(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
opts := types.PinOptions{
ReplicationFactorMin: 6,
ReplicationFactorMax: 7,
Name: "hello there",
}
_, err := c.Pin(ctx, test.Cid1, opts)
if err != nil {
t.Fatal(err)
}
}
testClients(t, api, testF)
}
func TestUnpin(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
_, err := c.Unpin(ctx, test.Cid1)
if err != nil {
t.Fatal(err)
}
}
testClients(t, api, testF)
}
type pathCase struct {
path string
wantErr bool
expectedCid string
}
var pathTestCases = []pathCase{
{
test.CidResolved.String(),
false,
test.CidResolved.String(),
},
{
test.PathIPFS1,
false,
"QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY",
},
{
test.PathIPFS2,
false,
test.CidResolved.String(),
},
{
test.PathIPNS1,
false,
test.CidResolved.String(),
},
{
test.PathIPLD1,
false,
"QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY",
},
{
test.InvalidPath1,
true,
"",
},
}
func TestPinPath(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
opts := types.PinOptions{
ReplicationFactorMin: 6,
ReplicationFactorMax: 7,
Name: "hello there",
UserAllocations: []peer.ID{test.PeerID1, test.PeerID2},
}
testF := func(t *testing.T, c Client) {
for _, testCase := range pathTestCases {
ec, _ := types.DecodeCid(testCase.expectedCid)
resultantPin := types.PinWithOpts(ec, opts)
p := testCase.path
pin, err := c.PinPath(ctx, p, opts)
if err != nil {
if testCase.wantErr {
continue
}
t.Fatalf("unexpected error %s: %s", p, err)
}
if !pin.Equals(resultantPin) {
t.Errorf("expected different pin: %s", p)
t.Errorf("expected: %+v", resultantPin)
t.Errorf("actual: %+v", pin)
}
}
}
testClients(t, api, testF)
}
func TestUnpinPath(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
for _, testCase := range pathTestCases {
p := testCase.path
pin, err := c.UnpinPath(ctx, p)
if err != nil {
if testCase.wantErr {
continue
}
t.Fatalf("unepected error %s: %s", p, err)
}
if pin.Cid.String() != testCase.expectedCid {
t.Errorf("bad resolved Cid: %s, %s", p, pin.Cid)
}
}
}
testClients(t, api, testF)
}
func TestAllocations(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
pins := make(chan types.Pin)
n := 0
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for range pins {
n++
}
}()
err := c.Allocations(ctx, types.DataType|types.MetaType, pins)
if err != nil {
t.Fatal(err)
}
wg.Wait()
if n == 0 {
t.Error("should be some pins")
}
}
testClients(t, api, testF)
}
func TestAllocation(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
pin, err := c.Allocation(ctx, test.Cid1)
if err != nil {
t.Fatal(err)
}
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
testClients(t, api, testF)
}
func TestStatus(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
pin, err := c.Status(ctx, test.Cid1, false)
if err != nil {
t.Fatal(err)
}
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
testClients(t, api, testF)
}
func TestStatusCids(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
out := make(chan types.GlobalPinInfo)
go func() {
err := c.StatusCids(ctx, []types.Cid{test.Cid1}, false, out)
if err != nil {
t.Error(err)
}
}()
pins := collectGlobalPinInfos(t, out)
if len(pins) != 1 {
t.Fatal("wrong number of pins returned")
}
if !pins[0].Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
testClients(t, api, testF)
}
func collectGlobalPinInfos(t *testing.T, out <-chan types.GlobalPinInfo) []types.GlobalPinInfo {
t.Helper()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
var gpis []types.GlobalPinInfo
for {
select {
case <-ctx.Done():
t.Error(ctx.Err())
return gpis
case gpi, ok := <-out:
if !ok {
return gpis
}
gpis = append(gpis, gpi)
}
}
}
func TestStatusAll(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
out := make(chan types.GlobalPinInfo)
go func() {
err := c.StatusAll(ctx, 0, false, out)
if err != nil {
t.Error(err)
}
}()
pins := collectGlobalPinInfos(t, out)
if len(pins) == 0 {
t.Error("there should be some pins")
}
out2 := make(chan types.GlobalPinInfo)
go func() {
err := c.StatusAll(ctx, 0, true, out2)
if err != nil {
t.Error(err)
}
}()
pins = collectGlobalPinInfos(t, out2)
if len(pins) != 2 {
t.Error("there should be two pins")
}
out3 := make(chan types.GlobalPinInfo)
go func() {
err := c.StatusAll(ctx, types.TrackerStatusPinning, false, out3)
if err != nil {
t.Error(err)
}
}()
pins = collectGlobalPinInfos(t, out3)
if len(pins) != 1 {
t.Error("there should be one pin")
}
out4 := make(chan types.GlobalPinInfo)
go func() {
err := c.StatusAll(ctx, types.TrackerStatusPinned|types.TrackerStatusError, false, out4)
if err != nil {
t.Error(err)
}
}()
pins = collectGlobalPinInfos(t, out4)
if len(pins) != 2 {
t.Error("there should be two pins")
}
out5 := make(chan types.GlobalPinInfo, 1)
err := c.StatusAll(ctx, 1<<25, false, out5)
if err == nil {
t.Error("expected an error")
}
}
testClients(t, api, testF)
}
func TestRecover(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
pin, err := c.Recover(ctx, test.Cid1, false)
if err != nil {
t.Fatal(err)
}
if !pin.Cid.Equals(test.Cid1) {
t.Error("should be same pin")
}
}
testClients(t, api, testF)
}
func TestRecoverAll(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
out := make(chan types.GlobalPinInfo, 10)
err := c.RecoverAll(ctx, true, out)
if err != nil {
t.Fatal(err)
}
out2 := make(chan types.GlobalPinInfo, 10)
err = c.RecoverAll(ctx, false, out2)
if err != nil {
t.Fatal(err)
}
}
testClients(t, api, testF)
}
func TestAlerts(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
alerts, err := c.Alerts(ctx)
if err != nil {
t.Fatal(err)
}
if len(alerts) != 1 {
t.Fatal("expected 1 alert")
}
pID2 := test.PeerID2.String()
if alerts[0].Peer != test.PeerID2 {
t.Errorf("expected an alert from %s", pID2)
}
}
testClients(t, api, testF)
}
func TestGetConnectGraph(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
cg, err := c.GetConnectGraph(ctx)
if err != nil {
t.Fatal(err)
}
if len(cg.IPFSLinks) != 3 || len(cg.ClusterLinks) != 3 ||
len(cg.ClustertoIPFS) != 3 {
t.Fatal("Bad graph")
}
}
testClients(t, api, testF)
}
func TestMetrics(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
m, err := c.Metrics(ctx, "somemetricstype")
if err != nil {
t.Fatal(err)
}
if len(m) == 0 {
t.Fatal("No metrics found")
}
}
testClients(t, api, testF)
}
func TestMetricNames(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
m, err := c.MetricNames(ctx)
if err != nil {
t.Fatal(err)
}
if len(m) == 0 {
t.Fatal("No metric names found")
}
}
testClients(t, api, testF)
}
type waitService struct {
l sync.Mutex
pinStart time.Time
}
func (wait *waitService) Pin(ctx context.Context, in types.Pin, out *types.Pin) error {
wait.l.Lock()
defer wait.l.Unlock()
wait.pinStart = time.Now()
*out = in
return nil
}
func (wait *waitService) Status(ctx context.Context, in types.Cid, out *types.GlobalPinInfo) error {
wait.l.Lock()
defer wait.l.Unlock()
if time.Now().After(wait.pinStart.Add(5 * time.Second)) { //pinned
*out = types.GlobalPinInfo{
Cid: in,
PeerMap: map[string]types.PinInfoShort{
test.PeerID1.String(): {
Status: types.TrackerStatusPinned,
TS: wait.pinStart,
},
test.PeerID2.String(): {
Status: types.TrackerStatusPinned,
TS: wait.pinStart,
},
test.PeerID3.String(): {
Status: types.TrackerStatusPinning,
TS: wait.pinStart,
},
test.PeerID3.String(): {
Status: types.TrackerStatusRemote,
TS: wait.pinStart,
},
},
}
} else { // pinning
*out = types.GlobalPinInfo{
Cid: in,
PeerMap: map[string]types.PinInfoShort{
test.PeerID1.String(): {
Status: types.TrackerStatusPinning,
TS: wait.pinStart,
},
test.PeerID2.String(): {
Status: types.TrackerStatusPinned,
TS: wait.pinStart,
},
test.PeerID3.String(): {
Status: types.TrackerStatusPinning,
TS: wait.pinStart,
},
test.PeerID3.String(): {
Status: types.TrackerStatusRemote,
TS: wait.pinStart,
},
},
}
}
return nil
}
func (wait *waitService) PinGet(ctx context.Context, in types.Cid, out *types.Pin) error {
p := types.PinCid(in)
p.ReplicationFactorMin = 2
p.ReplicationFactorMax = 3
*out = p
return nil
}
type waitServiceUnpin struct {
l sync.Mutex
unpinStart time.Time
}
func (wait *waitServiceUnpin) Unpin(ctx context.Context, in types.Pin, out *types.Pin) error {
wait.l.Lock()
defer wait.l.Unlock()
wait.unpinStart = time.Now()
return nil
}
func (wait *waitServiceUnpin) Status(ctx context.Context, in types.Cid, out *types.GlobalPinInfo) error {
wait.l.Lock()
defer wait.l.Unlock()
if time.Now().After(wait.unpinStart.Add(5 * time.Second)) { //unpinned
*out = types.GlobalPinInfo{
Cid: in,
PeerMap: map[string]types.PinInfoShort{
test.PeerID1.String(): {
Status: types.TrackerStatusUnpinned,
TS: wait.unpinStart,
},
test.PeerID2.String(): {
Status: types.TrackerStatusUnpinned,
TS: wait.unpinStart,
},
},
}
} else { // pinning
*out = types.GlobalPinInfo{
Cid: in,
PeerMap: map[string]types.PinInfoShort{
test.PeerID1.String(): {
Status: types.TrackerStatusUnpinning,
TS: wait.unpinStart,
},
test.PeerID2.String(): {
Status: types.TrackerStatusUnpinning,
TS: wait.unpinStart,
},
},
}
}
return nil
}
func (wait *waitServiceUnpin) PinGet(ctx context.Context, in types.Cid, out *types.Pin) error {
return errors.New("not found")
}
func TestWaitForPin(t *testing.T) {
ctx := context.Background()
tapi := testAPI(t)
defer shutdown(tapi)
rpcS := rpc.NewServer(nil, "wait")
rpcC := rpc.NewClientWithServer(nil, "wait", rpcS)
err := rpcS.RegisterName("Cluster", &waitService{})
if err != nil {
t.Fatal(err)
}
tapi.SetClient(rpcC)
testF := func(t *testing.T, c Client) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fp := StatusFilterParams{
Cid: test.Cid1,
Local: false,
Target: types.TrackerStatusPinned,
CheckFreq: time.Second,
}
start := time.Now()
st, err := WaitFor(ctx, c, fp)
if err != nil {
t.Error(err)
return
}
if time.Since(start) <= 5*time.Second {
t.Error("slow pin should have taken at least 5 seconds")
return
}
totalPinned := 0
for _, pi := range st.PeerMap {
if pi.Status == types.TrackerStatusPinned {
totalPinned++
}
}
if totalPinned < 2 { // repl factor min
t.Error("pin info should show the item is pinnedin two places at least")
}
}()
_, err := c.Pin(ctx, test.Cid1, types.PinOptions{ReplicationFactorMin: 0, ReplicationFactorMax: 0, Name: "test", ShardSize: 0})
if err != nil {
t.Fatal(err)
}
wg.Wait()
}
testClients(t, tapi, testF)
}
func TestWaitForUnpin(t *testing.T) {
ctx := context.Background()
tapi := testAPI(t)
defer shutdown(tapi)
rpcS := rpc.NewServer(nil, "wait")
rpcC := rpc.NewClientWithServer(nil, "wait", rpcS)
err := rpcS.RegisterName("Cluster", &waitServiceUnpin{})
if err != nil {
t.Fatal(err)
}
tapi.SetClient(rpcC)
testF := func(t *testing.T, c Client) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fp := StatusFilterParams{
Cid: test.Cid1,
Local: false,
Target: types.TrackerStatusUnpinned,
CheckFreq: time.Second,
}
start := time.Now()
st, err := WaitFor(ctx, c, fp)
if err != nil {
t.Error(err)
return
}
if time.Since(start) <= 5*time.Second {
t.Error("slow unpin should have taken at least 5 seconds")
return
}
for _, pi := range st.PeerMap {
if pi.Status != types.TrackerStatusUnpinned {
t.Error("the item should have been unpinned everywhere")
}
}
}()
_, err := c.Unpin(ctx, test.Cid1)
if err != nil {
t.Fatal(err)
}
wg.Wait()
}
testClients(t, tapi, testF)
}
func TestAddMultiFile(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer api.Shutdown(ctx)
sth := test.NewShardingTestHelper()
defer sth.Clean(t)
testF := func(t *testing.T, c Client) {
mfr, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
p := types.AddParams{
PinOptions: types.PinOptions{
ReplicationFactorMin: -1,
ReplicationFactorMax: -1,
Name: "test something",
ShardSize: 1024,
},
Shard: false,
Format: "",
IPFSAddParams: types.IPFSAddParams{
Chunker: "",
RawLeaves: false,
},
Hidden: false,
StreamChannels: true,
}
out := make(chan types.AddedOutput, 1)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for v := range out {
t.Logf("output: Name: %s. Hash: %s", v.Name, v.Cid)
}
}()
err := c.AddMultiFile(ctx, mfr, p, out)
if err != nil {
t.Fatal(err)
}
wg.Wait()
}
testClients(t, api, testF)
}
func TestRepoGC(t *testing.T) {
ctx := context.Background()
api := testAPI(t)
defer shutdown(api)
testF := func(t *testing.T, c Client) {
globalGC, err := c.RepoGC(ctx, false)
if err != nil {
t.Fatal(err)
}
if globalGC.PeerMap == nil {
t.Fatal("expected a non-nil peer map")
}
for _, gc := range globalGC.PeerMap {
if gc.Peer == "" {
t.Error("bad id")
}
if gc.Error != "" {
t.Error("did not expect any error")
}
if gc.Keys == nil {
t.Error("expected a non-nil array of IPFSRepoGC")
} else {
if !gc.Keys[0].Key.Equals(test.Cid1) {
t.Errorf("expected a different cid, expected: %s, found: %s", test.Cid1, gc.Keys[0].Key)
}
}
}
}
testClients(t, api, testF)
}

View file

@ -1,170 +0,0 @@
package client
import (
"context"
"encoding/json"
"errors"
"io"
"net/http"
"strings"
"github.com/ipfs-cluster/ipfs-cluster/api"
"go.uber.org/multierr"
"go.opencensus.io/trace"
)
type responseDecoder func(d *json.Decoder) error
func (c *defaultClient) do(
ctx context.Context,
method, path string,
headers map[string]string,
body io.Reader,
obj interface{},
) error {
resp, err := c.doRequest(ctx, method, path, headers, body)
if err != nil {
return api.Error{Code: 0, Message: err.Error()}
}
return c.handleResponse(resp, obj)
}
func (c *defaultClient) doStream(
ctx context.Context,
method, path string,
headers map[string]string,
body io.Reader,
outHandler responseDecoder,
) error {
resp, err := c.doRequest(ctx, method, path, headers, body)
if err != nil {
return api.Error{Code: 0, Message: err.Error()}
}
return c.handleStreamResponse(resp, outHandler)
}
func (c *defaultClient) doRequest(
ctx context.Context,
method, path string,
headers map[string]string,
body io.Reader,
) (*http.Response, error) {
span := trace.FromContext(ctx)
span.AddAttributes(
trace.StringAttribute("method", method),
trace.StringAttribute("path", path),
)
defer span.End()
urlpath := c.net + "://" + c.hostname + "/" + strings.TrimPrefix(path, "/")
logger.Debugf("%s: %s", method, urlpath)
r, err := http.NewRequestWithContext(ctx, method, urlpath, body)
if err != nil {
return nil, err
}
if c.config.DisableKeepAlives {
r.Close = true
}
if c.config.Username != "" {
r.SetBasicAuth(c.config.Username, c.config.Password)
}
for k, v := range headers {
r.Header.Set(k, v)
}
if body != nil {
r.ContentLength = -1 // this lets go use "chunked".
}
ctx = trace.NewContext(ctx, span)
r = r.WithContext(ctx)
return c.client.Do(r)
}
func (c *defaultClient) handleResponse(resp *http.Response, obj interface{}) error {
body, err := io.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return api.Error{Code: resp.StatusCode, Message: err.Error()}
}
logger.Debugf("Response body: %s", body)
switch {
case resp.StatusCode == http.StatusAccepted:
logger.Debug("Request accepted")
case resp.StatusCode == http.StatusNoContent:
logger.Debug("Request succeeded. Response has no content")
default:
if resp.StatusCode > 399 && resp.StatusCode < 600 {
var apiErr api.Error
err = json.Unmarshal(body, &apiErr)
if err != nil {
// not json. 404s etc.
return api.Error{
Code: resp.StatusCode,
Message: string(body),
}
}
return apiErr
}
err = json.Unmarshal(body, obj)
if err != nil {
return api.Error{
Code: resp.StatusCode,
Message: err.Error(),
}
}
}
return nil
}
func (c *defaultClient) handleStreamResponse(resp *http.Response, handler responseDecoder) error {
if resp.StatusCode > 399 && resp.StatusCode < 600 {
return c.handleResponse(resp, nil)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
return api.Error{
Code: resp.StatusCode,
Message: "expected streaming response with code 200/204",
}
}
dec := json.NewDecoder(resp.Body)
for {
err := handler(dec)
if err == io.EOF {
// we need to check trailers
break
}
if err != nil {
logger.Error(err)
return err
}
}
trailerErrs := resp.Trailer.Values("X-Stream-Error")
var err error
for _, trailerErr := range trailerErrs {
if trailerErr != "" {
err = multierr.Append(err, errors.New(trailerErr))
}
}
if err != nil {
return api.Error{
Code: 500,
Message: err.Error(),
}
}
return nil
}

View file

@ -1,129 +0,0 @@
package client
import (
"context"
"crypto/tls"
"errors"
"net"
"net/http"
"time"
libp2p "github.com/libp2p/go-libp2p"
p2phttp "github.com/libp2p/go-libp2p-http"
peer "github.com/libp2p/go-libp2p/core/peer"
peerstore "github.com/libp2p/go-libp2p/core/peerstore"
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
websocket "github.com/libp2p/go-libp2p/p2p/transport/websocket"
madns "github.com/multiformats/go-multiaddr-dns"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/tv42/httpunix"
)
// This is essentially a http.DefaultTransport. We should not mess
// with it since it's a global variable, and we don't know who else uses
// it, so we create our own.
// TODO: Allow more configuration options.
func (c *defaultClient) defaultTransport() {
c.transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
c.net = "http"
}
func (c *defaultClient) enableLibp2p() error {
c.defaultTransport()
pinfo, err := peer.AddrInfoFromP2pAddr(c.config.APIAddr)
if err != nil {
return err
}
if len(pinfo.Addrs) == 0 {
return errors.New("APIAddr only includes a Peer ID")
}
if c.config.ProtectorKey != nil && len(c.config.ProtectorKey) > 0 {
if len(c.config.ProtectorKey) != 32 {
return errors.New("length of ProtectorKey should be 32")
}
}
transports := libp2p.DefaultTransports
if c.config.ProtectorKey != nil {
transports = libp2p.ChainOptions(
libp2p.NoTransports,
libp2p.Transport(tcp.NewTCPTransport),
libp2p.Transport(websocket.New),
)
}
h, err := libp2p.New(
libp2p.PrivateNetwork(c.config.ProtectorKey),
libp2p.Security(noise.ID, noise.New),
libp2p.Security(libp2ptls.ID, libp2ptls.New),
transports,
)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(c.ctx, ResolveTimeout)
defer cancel()
resolvedAddrs, err := madns.Resolve(ctx, pinfo.Addrs[0])
if err != nil {
return err
}
h.Peerstore().AddAddrs(pinfo.ID, resolvedAddrs, peerstore.PermanentAddrTTL)
c.transport.RegisterProtocol("libp2p", p2phttp.NewTransport(h))
c.net = "libp2p"
c.p2p = h
c.hostname = pinfo.ID.String()
return nil
}
func (c *defaultClient) enableTLS() error {
c.defaultTransport()
// based on https://github.com/denji/golang-tls
c.transport.TLSClientConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256},
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
InsecureSkipVerify: c.config.NoVerifyCert,
}
c.net = "https"
return nil
}
func (c *defaultClient) enableUnix() error {
c.defaultTransport()
unixTransport := &httpunix.Transport{
DialTimeout: time.Second,
}
_, addr, err := manet.DialArgs(c.config.APIAddr)
if err != nil {
return err
}
unixTransport.RegisterLocation("restapi", addr)
c.transport.RegisterProtocol(httpunix.Scheme, unixTransport)
c.net = httpunix.Scheme
c.hostname = "restapi"
return nil
}

View file

@ -1,130 +0,0 @@
package rest
import (
"net/http"
"time"
ma "github.com/multiformats/go-multiaddr"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/api/common"
)
const configKey = "restapi"
const envConfigKey = "cluster_restapi"
const minMaxHeaderBytes = 4096
// Default values for Config.
const (
DefaultReadTimeout = 0
DefaultReadHeaderTimeout = 5 * time.Second
DefaultWriteTimeout = 0
DefaultIdleTimeout = 120 * time.Second
DefaultMaxHeaderBytes = minMaxHeaderBytes
)
// Default values for Config.
var (
// DefaultHTTPListenAddrs contains default listen addresses for the HTTP API.
DefaultHTTPListenAddrs = []string{"/ip4/127.0.0.1/tcp/9094"}
DefaultHeaders = map[string][]string{}
)
// CORS defaults.
var (
DefaultCORSAllowedOrigins = []string{"*"}
DefaultCORSAllowedMethods = []string{
http.MethodGet,
}
// rs/cors this will set sensible defaults when empty:
// {"Origin", "Accept", "Content-Type", "X-Requested-With"}
DefaultCORSAllowedHeaders = []string{}
DefaultCORSExposedHeaders = []string{
"Content-Type",
"X-Stream-Output",
"X-Chunked-Output",
"X-Content-Length",
}
DefaultCORSAllowCredentials = true
DefaultCORSMaxAge time.Duration // 0. Means always.
)
// Config fully implements the config.ComponentConfig interface. Use
// NewConfig() to instantiate. Config embeds a common.Config object.
type Config struct {
common.Config
}
// NewConfig creates a Config object setting the necessary meta-fields in the
// common.Config embedded object.
func NewConfig() *Config {
cfg := Config{}
cfg.Config.ConfigKey = configKey
cfg.EnvConfigKey = envConfigKey
cfg.Logger = logger
cfg.RequestLogger = apiLogger
cfg.DefaultFunc = defaultFunc
cfg.APIErrorFunc = func(err error, status int) error {
return &api.Error{
Code: status,
Message: err.Error(),
}
}
return &cfg
}
// ConfigKey returns a human-friendly identifier for this type of
// Config.
func (cfg *Config) ConfigKey() string {
return configKey
}
// Default initializes this Config with working values.
func (cfg *Config) Default() error {
return defaultFunc(&cfg.Config)
}
// Sets all defaults for this config.
func defaultFunc(cfg *common.Config) error {
// http
addrs := make([]ma.Multiaddr, 0, len(DefaultHTTPListenAddrs))
for _, def := range DefaultHTTPListenAddrs {
httpListen, err := ma.NewMultiaddr(def)
if err != nil {
return err
}
addrs = append(addrs, httpListen)
}
cfg.HTTPListenAddr = addrs
cfg.PathSSLCertFile = ""
cfg.PathSSLKeyFile = ""
cfg.ReadTimeout = DefaultReadTimeout
cfg.ReadHeaderTimeout = DefaultReadHeaderTimeout
cfg.WriteTimeout = DefaultWriteTimeout
cfg.IdleTimeout = DefaultIdleTimeout
cfg.MaxHeaderBytes = DefaultMaxHeaderBytes
// libp2p
cfg.ID = ""
cfg.PrivateKey = nil
cfg.Libp2pListenAddr = nil
// Auth
cfg.BasicAuthCredentials = nil
// Logs
cfg.HTTPLogFile = ""
// Headers
cfg.Headers = DefaultHeaders
cfg.CORSAllowedOrigins = DefaultCORSAllowedOrigins
cfg.CORSAllowedMethods = DefaultCORSAllowedMethods
cfg.CORSAllowedHeaders = DefaultCORSAllowedHeaders
cfg.CORSExposedHeaders = DefaultCORSExposedHeaders
cfg.CORSAllowCredentials = DefaultCORSAllowCredentials
cfg.CORSMaxAge = DefaultCORSMaxAge
return nil
}

View file

@ -1,856 +0,0 @@
// Package rest implements an IPFS Cluster API component. It provides
// a REST-ish API to interact with Cluster.
//
// The implented API is based on the common.API component (refer to module
// description there). The only thing this module does is to provide route
// handling for the otherwise common API component.
package rest
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"github.com/ipfs-cluster/ipfs-cluster/adder/adderutils"
types "github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/api/common"
logging "github.com/ipfs/go-log/v2"
rpc "github.com/libp2p/go-libp2p-gorpc"
"github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
mux "github.com/gorilla/mux"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var (
logger = logging.Logger("restapi")
apiLogger = logging.Logger("restapilog")
)
type peerAddBody struct {
PeerID string `json:"peer_id"`
}
// API implements the REST API Component.
// It embeds a common.API.
type API struct {
*common.API
rpcClient *rpc.Client
config *Config
}
// NewAPI creates a new REST API component.
func NewAPI(ctx context.Context, cfg *Config) (*API, error) {
return NewAPIWithHost(ctx, cfg, nil)
}
// NewAPIWithHost creates a new REST API component using the given libp2p Host.
func NewAPIWithHost(ctx context.Context, cfg *Config, h host.Host) (*API, error) {
api := API{
config: cfg,
}
capi, err := common.NewAPIWithHost(ctx, &cfg.Config, h, api.routes)
api.API = capi
return &api, err
}
// Routes returns endpoints supported by this API.
func (api *API) routes(c *rpc.Client) []common.Route {
api.rpcClient = c
return []common.Route{
{
Name: "ID",
Method: "GET",
Pattern: "/id",
HandlerFunc: api.idHandler,
},
{
Name: "Version",
Method: "GET",
Pattern: "/version",
HandlerFunc: api.versionHandler,
},
{
Name: "Peers",
Method: "GET",
Pattern: "/peers",
HandlerFunc: api.peerListHandler,
},
{
Name: "PeerAdd",
Method: "POST",
Pattern: "/peers",
HandlerFunc: api.peerAddHandler,
},
{
Name: "PeerRemove",
Method: "DELETE",
Pattern: "/peers/{peer}",
HandlerFunc: api.peerRemoveHandler,
},
{
Name: "Add",
Method: "POST",
Pattern: "/add",
HandlerFunc: api.addHandler,
},
{
Name: "Allocations",
Method: "GET",
Pattern: "/allocations",
HandlerFunc: api.allocationsHandler,
},
{
Name: "Allocation",
Method: "GET",
Pattern: "/allocations/{hash}",
HandlerFunc: api.allocationHandler,
},
{
Name: "StatusAll",
Method: "GET",
Pattern: "/pins",
HandlerFunc: api.statusAllHandler,
},
{
Name: "Recover",
Method: "POST",
Pattern: "/pins/{hash}/recover",
HandlerFunc: api.recoverHandler,
},
{
Name: "RecoverAll",
Method: "POST",
Pattern: "/pins/recover",
HandlerFunc: api.recoverAllHandler,
},
{
Name: "Status",
Method: "GET",
Pattern: "/pins/{hash}",
HandlerFunc: api.statusHandler,
},
{
Name: "Pin",
Method: "POST",
Pattern: "/pins/{hash}",
HandlerFunc: api.pinHandler,
},
{
Name: "PinPath",
Method: "POST",
Pattern: "/pins/{keyType:ipfs|ipns|ipld}/{path:.*}",
HandlerFunc: api.pinPathHandler,
},
{
Name: "Unpin",
Method: "DELETE",
Pattern: "/pins/{hash}",
HandlerFunc: api.unpinHandler,
},
{
Name: "UnpinPath",
Method: "DELETE",
Pattern: "/pins/{keyType:ipfs|ipns|ipld}/{path:.*}",
HandlerFunc: api.unpinPathHandler,
},
{
Name: "RepoGC",
Method: "POST",
Pattern: "/ipfs/gc",
HandlerFunc: api.repoGCHandler,
},
{
Name: "ConnectionGraph",
Method: "GET",
Pattern: "/health/graph",
HandlerFunc: api.graphHandler,
},
{
Name: "Alerts",
Method: "GET",
Pattern: "/health/alerts",
HandlerFunc: api.alertsHandler,
},
{
Name: "Metrics",
Method: "GET",
Pattern: "/monitor/metrics/{name}",
HandlerFunc: api.metricsHandler,
},
{
Name: "MetricNames",
Method: "GET",
Pattern: "/monitor/metrics",
HandlerFunc: api.metricNamesHandler,
},
{
Name: "GetToken",
Method: "POST",
Pattern: "/token",
HandlerFunc: api.GenerateTokenHandler,
},
}
}
func (api *API) idHandler(w http.ResponseWriter, r *http.Request) {
var id types.ID
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"ID",
struct{}{},
&id,
)
api.SendResponse(w, common.SetStatusAutomatically, err, &id)
}
func (api *API) versionHandler(w http.ResponseWriter, r *http.Request) {
var v types.Version
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Version",
struct{}{},
&v,
)
api.SendResponse(w, common.SetStatusAutomatically, err, v)
}
func (api *API) graphHandler(w http.ResponseWriter, r *http.Request) {
var graph types.ConnectGraph
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"ConnectGraph",
struct{}{},
&graph,
)
api.SendResponse(w, common.SetStatusAutomatically, err, graph)
}
func (api *API) metricsHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
name := vars["name"]
var metrics []types.Metric
err := api.rpcClient.CallContext(
r.Context(),
"",
"PeerMonitor",
"LatestMetrics",
name,
&metrics,
)
api.SendResponse(w, common.SetStatusAutomatically, err, metrics)
}
func (api *API) metricNamesHandler(w http.ResponseWriter, r *http.Request) {
var metricNames []string
err := api.rpcClient.CallContext(
r.Context(),
"",
"PeerMonitor",
"MetricNames",
struct{}{},
&metricNames,
)
api.SendResponse(w, common.SetStatusAutomatically, err, metricNames)
}
func (api *API) alertsHandler(w http.ResponseWriter, r *http.Request) {
var alerts []types.Alert
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Alerts",
struct{}{},
&alerts,
)
api.SendResponse(w, common.SetStatusAutomatically, err, alerts)
}
func (api *API) addHandler(w http.ResponseWriter, r *http.Request) {
reader, err := r.MultipartReader()
if err != nil {
api.SendResponse(w, http.StatusBadRequest, err, nil)
return
}
params, err := types.AddParamsFromQuery(r.URL.Query())
if err != nil {
api.SendResponse(w, http.StatusBadRequest, err, nil)
return
}
api.SetHeaders(w)
// any errors sent as trailer
adderutils.AddMultipartHTTPHandler(
r.Context(),
api.rpcClient,
params,
reader,
w,
nil,
)
}
func (api *API) peerListHandler(w http.ResponseWriter, r *http.Request) {
in := make(chan struct{})
close(in)
out := make(chan types.ID, common.StreamChannelSize)
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"Peers",
in,
out,
)
}()
iter := func() (interface{}, bool, error) {
p, ok := <-out
return p, ok, nil
}
api.StreamResponse(w, iter, errCh)
}
func (api *API) peerAddHandler(w http.ResponseWriter, r *http.Request) {
dec := json.NewDecoder(r.Body)
defer r.Body.Close()
var addInfo peerAddBody
err := dec.Decode(&addInfo)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding request body"), nil)
return
}
pid, err := peer.Decode(addInfo.PeerID)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error decoding peer_id"), nil)
return
}
var id types.ID
err = api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PeerAdd",
pid,
&id,
)
api.SendResponse(w, common.SetStatusAutomatically, err, &id)
}
func (api *API) peerRemoveHandler(w http.ResponseWriter, r *http.Request) {
if p := api.ParsePidOrFail(w, r); p != "" {
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PeerRemove",
p,
&struct{}{},
)
api.SendResponse(w, common.SetStatusAutomatically, err, nil)
}
}
func (api *API) pinHandler(w http.ResponseWriter, r *http.Request) {
if pin := api.ParseCidOrFail(w, r); pin.Defined() {
api.config.Logger.Debugf("rest api pinHandler: %s", pin.Cid)
// span.AddAttributes(trace.StringAttribute("cid", pin.Cid))
var pinObj types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Pin",
pin,
&pinObj,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinObj)
api.config.Logger.Debug("rest api pinHandler done")
}
}
func (api *API) unpinHandler(w http.ResponseWriter, r *http.Request) {
if pin := api.ParseCidOrFail(w, r); pin.Defined() {
api.config.Logger.Debugf("rest api unpinHandler: %s", pin.Cid)
// span.AddAttributes(trace.StringAttribute("cid", pin.Cid))
var pinObj types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Unpin",
pin,
&pinObj,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinObj)
api.config.Logger.Debug("rest api unpinHandler done")
}
}
func (api *API) pinPathHandler(w http.ResponseWriter, r *http.Request) {
var pin types.Pin
if pinpath := api.ParsePinPathOrFail(w, r); pinpath.Defined() {
api.config.Logger.Debugf("rest api pinPathHandler: %s", pinpath.Path)
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PinPath",
pinpath,
&pin,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pin)
api.config.Logger.Debug("rest api pinPathHandler done")
}
}
func (api *API) unpinPathHandler(w http.ResponseWriter, r *http.Request) {
var pin types.Pin
if pinpath := api.ParsePinPathOrFail(w, r); pinpath.Defined() {
api.config.Logger.Debugf("rest api unpinPathHandler: %s", pinpath.Path)
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"UnpinPath",
pinpath,
&pin,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pin)
api.config.Logger.Debug("rest api unpinPathHandler done")
}
}
func (api *API) allocationsHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
filterStr := queryValues.Get("filter")
var filter types.PinType
for _, f := range strings.Split(filterStr, ",") {
filter |= types.PinTypeFromString(f)
}
if filter == types.BadType {
api.SendResponse(w, http.StatusBadRequest, errors.New("invalid filter value"), nil)
return
}
in := make(chan struct{})
close(in)
out := make(chan types.Pin, common.StreamChannelSize)
errCh := make(chan error, 1)
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"Pins",
in,
out,
)
}()
iter := func() (interface{}, bool, error) {
var p types.Pin
var ok bool
iterloop:
for {
select {
case <-ctx.Done():
break iterloop
case p, ok = <-out:
if !ok {
break iterloop
}
// this means we keep iterating if no filter
// matched
if filter == types.AllType || filter&p.Type > 0 {
break iterloop
}
}
}
return p, ok, ctx.Err()
}
api.StreamResponse(w, iter, errCh)
}
func (api *API) allocationHandler(w http.ResponseWriter, r *http.Request) {
if pin := api.ParseCidOrFail(w, r); pin.Defined() {
var pinResp types.Pin
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"PinGet",
pin.Cid,
&pinResp,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinResp)
}
}
func (api *API) statusAllHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
queryValues := r.URL.Query()
if queryValues.Get("cids") != "" {
api.statusCidsHandler(w, r)
return
}
local := queryValues.Get("local")
filterStr := queryValues.Get("filter")
filter := types.TrackerStatusFromString(filterStr)
// FIXME: This is a bit lazy, as "invalidxx,pinned" would result in a
// valid "pinned" filter.
if filter == types.TrackerStatusUndefined && filterStr != "" {
api.SendResponse(w, http.StatusBadRequest, errors.New("invalid filter value"), nil)
return
}
var iter common.StreamIterator
in := make(chan types.TrackerStatus, 1)
in <- filter
close(in)
errCh := make(chan error, 1)
if local == "true" {
out := make(chan types.PinInfo, common.StreamChannelSize)
iter = func() (interface{}, bool, error) {
select {
case <-ctx.Done():
return nil, false, ctx.Err()
case p, ok := <-out:
return p.ToGlobal(), ok, nil
}
}
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"StatusAllLocal",
in,
out,
)
}()
} else {
out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
iter = func() (interface{}, bool, error) {
select {
case <-ctx.Done():
return nil, false, ctx.Err()
case p, ok := <-out:
return p, ok, nil
}
}
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"StatusAll",
in,
out,
)
}()
}
api.StreamResponse(w, iter, errCh)
}
// request statuses for multiple CIDs in parallel.
func (api *API) statusCidsHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
queryValues := r.URL.Query()
filterCidsStr := strings.Split(queryValues.Get("cids"), ",")
var cids []types.Cid
for _, cidStr := range filterCidsStr {
c, err := types.DecodeCid(cidStr)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, fmt.Errorf("error decoding Cid: %w", err), nil)
return
}
cids = append(cids, c)
}
local := queryValues.Get("local")
gpiCh := make(chan types.GlobalPinInfo, len(cids))
errCh := make(chan error, len(cids))
var wg sync.WaitGroup
wg.Add(len(cids))
// Close channel when done
go func() {
wg.Wait()
close(errCh)
close(gpiCh)
}()
if local == "true" {
for _, ci := range cids {
go func(c types.Cid) {
defer wg.Done()
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
ctx,
"",
"Cluster",
"StatusLocal",
c,
&pinInfo,
)
if err != nil {
errCh <- err
return
}
gpiCh <- pinInfo.ToGlobal()
}(ci)
}
} else {
for _, ci := range cids {
go func(c types.Cid) {
defer wg.Done()
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
ctx,
"",
"Cluster",
"Status",
c,
&pinInfo,
)
if err != nil {
errCh <- err
return
}
gpiCh <- pinInfo
}(ci)
}
}
iter := func() (interface{}, bool, error) {
gpi, ok := <-gpiCh
return gpi, ok, nil
}
api.StreamResponse(w, iter, errCh)
}
func (api *API) statusHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if pin := api.ParseCidOrFail(w, r); pin.Defined() {
if local == "true" {
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"StatusLocal",
pin.Cid,
&pinInfo,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo.ToGlobal())
} else {
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Status",
pin.Cid,
&pinInfo,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo)
}
}
}
func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
queryValues := r.URL.Query()
local := queryValues.Get("local")
var iter common.StreamIterator
in := make(chan struct{})
close(in)
errCh := make(chan error, 1)
if local == "true" {
out := make(chan types.PinInfo, common.StreamChannelSize)
iter = func() (interface{}, bool, error) {
select {
case <-ctx.Done():
return nil, false, ctx.Err()
case p, ok := <-out:
return p.ToGlobal(), ok, nil
}
}
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"RecoverAllLocal",
in,
out,
)
}()
} else {
out := make(chan types.GlobalPinInfo, common.StreamChannelSize)
iter = func() (interface{}, bool, error) {
select {
case <-ctx.Done():
return nil, false, ctx.Err()
case p, ok := <-out:
return p, ok, nil
}
}
go func() {
defer close(errCh)
errCh <- api.rpcClient.Stream(
r.Context(),
"",
"Cluster",
"RecoverAll",
in,
out,
)
}()
}
api.StreamResponse(w, iter, errCh)
}
func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if pin := api.ParseCidOrFail(w, r); pin.Defined() {
if local == "true" {
var pinInfo types.PinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RecoverLocal",
pin.Cid,
&pinInfo,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo.ToGlobal())
} else {
var pinInfo types.GlobalPinInfo
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"Recover",
pin.Cid,
&pinInfo,
)
api.SendResponse(w, common.SetStatusAutomatically, err, pinInfo)
}
}
}
func (api *API) repoGCHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if local == "true" {
var localRepoGC types.RepoGC
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RepoGCLocal",
struct{}{},
&localRepoGC,
)
api.SendResponse(w, common.SetStatusAutomatically, err, repoGCToGlobal(localRepoGC))
return
}
var repoGC types.GlobalRepoGC
err := api.rpcClient.CallContext(
r.Context(),
"",
"Cluster",
"RepoGC",
struct{}{},
&repoGC,
)
api.SendResponse(w, common.SetStatusAutomatically, err, repoGC)
}
func repoGCToGlobal(r types.RepoGC) types.GlobalRepoGC {
return types.GlobalRepoGC{
PeerMap: map[string]types.RepoGC{
r.Peer.String(): r,
},
}
}

View file

@ -1,846 +0,0 @@
package rest
import (
"context"
"fmt"
"io"
"net/http"
"strings"
"testing"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
test "github.com/ipfs-cluster/ipfs-cluster/api/common/test"
clustertest "github.com/ipfs-cluster/ipfs-cluster/test"
libp2p "github.com/libp2p/go-libp2p"
peer "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
const (
SSLCertFile = "test/server.crt"
SSLKeyFile = "test/server.key"
clientOrigin = "myorigin"
validUserName = "validUserName"
validUserPassword = "validUserPassword"
adminUserName = "adminUserName"
adminUserPassword = "adminUserPassword"
invalidUserName = "invalidUserName"
invalidUserPassword = "invalidUserPassword"
)
func testAPIwithConfig(t *testing.T, cfg *Config, name string) *API {
ctx := context.Background()
apiMAddr, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/0")
h, err := libp2p.New(libp2p.ListenAddrs(apiMAddr))
if err != nil {
t.Fatal(err)
}
cfg.HTTPListenAddr = []ma.Multiaddr{apiMAddr}
rest, err := NewAPIWithHost(ctx, cfg, h)
if err != nil {
t.Fatalf("should be able to create a new %s API: %s", name, err)
}
// No keep alive for tests
rest.SetKeepAlivesEnabled(false)
rest.SetClient(clustertest.NewMockRPCClient(t))
return rest
}
func testAPI(t *testing.T) *API {
cfg := NewConfig()
cfg.Default()
cfg.CORSAllowedOrigins = []string{clientOrigin}
cfg.CORSAllowedMethods = []string{"GET", "POST", "DELETE"}
//cfg.CORSAllowedHeaders = []string{"Content-Type"}
cfg.CORSMaxAge = 10 * time.Minute
return testAPIwithConfig(t, cfg, "basic")
}
func TestRestAPIIDEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
id := api.ID{}
test.MakeGet(t, rest, url(rest)+"/id", &id)
if id.ID.Pretty() != clustertest.PeerID1.Pretty() {
t.Error("expected correct id")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIVersionEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
ver := api.Version{}
test.MakeGet(t, rest, url(rest)+"/version", &ver)
if ver.Version != "0.0.mock" {
t.Error("expected correct version")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIPeersEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var list []api.ID
test.MakeStreamingGet(t, rest, url(rest)+"/peers", &list, false)
if len(list) != 1 {
t.Fatal("expected 1 element")
}
if list[0].ID.Pretty() != clustertest.PeerID1.Pretty() {
t.Error("expected a different peer id list: ", list)
}
}
test.BothEndpoints(t, tf)
}
func TestAPIPeerAddEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
id := api.ID{}
// post with valid body
body := fmt.Sprintf("{\"peer_id\":\"%s\"}", clustertest.PeerID1.Pretty())
t.Log(body)
test.MakePost(t, rest, url(rest)+"/peers", []byte(body), &id)
if id.ID.Pretty() != clustertest.PeerID1.Pretty() {
t.Error("expected correct ID")
}
if id.Error != "" {
t.Error("did not expect an error")
}
// Send invalid body
errResp := api.Error{}
test.MakePost(t, rest, url(rest)+"/peers", []byte("oeoeoeoe"), &errResp)
if errResp.Code != 400 {
t.Error("expected error with bad body")
}
// Send invalid peer id
test.MakePost(t, rest, url(rest)+"/peers", []byte("{\"peer_id\": \"ab\"}"), &errResp)
if errResp.Code != 400 {
t.Error("expected error with bad peer_id")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIAddFileEndpointBadContentType(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1"
localURL := url(rest) + fmtStr1
errResp := api.Error{}
test.MakePost(t, rest, localURL, []byte("test"), &errResp)
if errResp.Code != 400 {
t.Error("expected error with bad content-type")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIAddFileEndpointLocal(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
sth := clustertest.NewShardingTestHelper()
defer sth.Clean(t)
// This generates the testing files and
// writes them to disk.
// This is necessary here because we run tests
// in parallel, and otherwise a write-race might happen.
_, closer := sth.GetTreeMultiReader(t)
closer.Close()
tf := func(t *testing.T, url test.URLFunc) {
fmtStr1 := "/add?shard=false&repl_min=-1&repl_max=-1&stream-channels=true"
localURL := url(rest) + fmtStr1
body, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
resp := api.AddedOutput{}
mpContentType := "multipart/form-data; boundary=" + body.Boundary()
test.MakeStreamingPost(t, rest, localURL, body, mpContentType, &resp)
// resp will contain the last object from the streaming
if resp.Cid.String() != clustertest.ShardingDirBalancedRootCID {
t.Error("Bad Cid after adding: ", resp.Cid)
}
}
test.BothEndpoints(t, tf)
}
func TestAPIAddFileEndpointShard(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
sth := clustertest.NewShardingTestHelper()
defer sth.Clean(t)
// This generates the testing files and
// writes them to disk.
// This is necessary here because we run tests
// in parallel, and otherwise a write-race might happen.
_, closer := sth.GetTreeMultiReader(t)
closer.Close()
tf := func(t *testing.T, url test.URLFunc) {
body, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
mpContentType := "multipart/form-data; boundary=" + body.Boundary()
resp := api.AddedOutput{}
fmtStr1 := "/add?shard=true&repl_min=-1&repl_max=-1&stream-channels=true&shard-size=1000000"
shardURL := url(rest) + fmtStr1
test.MakeStreamingPost(t, rest, shardURL, body, mpContentType, &resp)
}
test.BothEndpoints(t, tf)
}
func TestAPIAddFileEndpoint_StreamChannelsFalse(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
sth := clustertest.NewShardingTestHelper()
defer sth.Clean(t)
// This generates the testing files and
// writes them to disk.
// This is necessary here because we run tests
// in parallel, and otherwise a write-race might happen.
_, closer := sth.GetTreeMultiReader(t)
closer.Close()
tf := func(t *testing.T, url test.URLFunc) {
body, closer := sth.GetTreeMultiReader(t)
defer closer.Close()
fullBody, err := io.ReadAll(body)
if err != nil {
t.Fatal(err)
}
mpContentType := "multipart/form-data; boundary=" + body.Boundary()
resp := []api.AddedOutput{}
fmtStr1 := "/add?shard=false&repl_min=-1&repl_max=-1&stream-channels=false"
shardURL := url(rest) + fmtStr1
test.MakePostWithContentType(t, rest, shardURL, fullBody, mpContentType, &resp)
lastHash := resp[len(resp)-1]
if lastHash.Cid.String() != clustertest.ShardingDirBalancedRootCID {
t.Error("Bad Cid after adding: ", lastHash.Cid)
}
}
test.BothEndpoints(t, tf)
}
func TestAPIPeerRemoveEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
test.MakeDelete(t, rest, url(rest)+"/peers/"+clustertest.PeerID1.Pretty(), &struct{}{})
}
test.BothEndpoints(t, tf)
}
func TestConnectGraphEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var cg api.ConnectGraph
test.MakeGet(t, rest, url(rest)+"/health/graph", &cg)
if cg.ClusterID.Pretty() != clustertest.PeerID1.Pretty() {
t.Error("unexpected cluster id")
}
if len(cg.IPFSLinks) != 3 {
t.Error("unexpected number of ipfs peers")
}
if len(cg.ClusterLinks) != 3 {
t.Error("unexpected number of cluster peers")
}
if len(cg.ClustertoIPFS) != 3 {
t.Error("unexpected number of cluster to ipfs links")
}
// test a few link values
pid1 := clustertest.PeerID1
pid4 := clustertest.PeerID4
if _, ok := cg.ClustertoIPFS[pid1.String()]; !ok {
t.Fatal("missing cluster peer 1 from cluster to peer links map")
}
if cg.ClustertoIPFS[pid1.String()] != pid4 {
t.Error("unexpected ipfs peer mapped to cluster peer 1 in graph")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIPinEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
// test regular post
test.MakePost(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String(), []byte{}, &struct{}{})
errResp := api.Error{}
test.MakePost(t, rest, url(rest)+"/pins/"+clustertest.ErrorCid.String(), []byte{}, &errResp)
if errResp.Message != clustertest.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
test.MakePost(t, rest, url(rest)+"/pins/abcd", []byte{}, &errResp)
if errResp.Code != 400 {
t.Error("should fail with bad Cid")
}
}
test.BothEndpoints(t, tf)
}
type pathCase struct {
path string
opts api.PinOptions
wantErr bool
code int
expectedCid string
}
func (p *pathCase) WithQuery(t *testing.T) string {
query, err := p.opts.ToQuery()
if err != nil {
t.Fatal(err)
}
return p.path + "?" + query
}
var testPinOpts = api.PinOptions{
ReplicationFactorMax: 7,
ReplicationFactorMin: 6,
Name: "hello there",
UserAllocations: []peer.ID{clustertest.PeerID1, clustertest.PeerID2},
ExpireAt: time.Now().Add(30 * time.Second),
}
var pathTestCases = []pathCase{
{
"/ipfs/QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY",
testPinOpts,
false,
http.StatusOK,
"QmaNJ5acV31sx8jq626qTpAWW4DXKw34aGhx53dECLvXbY",
},
{
"/ipfs/QmbUNM297ZwxB8CfFAznK7H9YMesDoY6Tt5bPgt5MSCB2u/im.gif",
testPinOpts,
false,
http.StatusOK,
clustertest.CidResolved.String(),
},
{
"/ipfs/invalidhash",
testPinOpts,
true,
http.StatusBadRequest,
"",
},
{
"/ipfs/bafyreiay3jpjk74dkckv2r74eyvf3lfnxujefay2rtuluintasq2zlapv4",
testPinOpts,
true,
http.StatusNotFound,
"",
},
// TODO: A case with trailing slash with paths
// clustertest.PathIPNS2, clustertest.PathIPLD2, clustertest.InvalidPath1
}
func TestAPIPinEndpointWithPath(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
for _, testCase := range pathTestCases[:3] {
c, _ := api.DecodeCid(testCase.expectedCid)
resultantPin := api.PinWithOpts(
c,
testPinOpts,
)
if testCase.wantErr {
errResp := api.Error{}
q := testCase.WithQuery(t)
test.MakePost(t, rest, url(rest)+"/pins"+q, []byte{}, &errResp)
if errResp.Code != testCase.code {
t.Errorf(
"status code: expected: %d, got: %d, path: %s\n",
testCase.code,
errResp.Code,
testCase.path,
)
}
continue
}
pin := api.Pin{}
q := testCase.WithQuery(t)
test.MakePost(t, rest, url(rest)+"/pins"+q, []byte{}, &pin)
if !pin.Equals(resultantPin) {
t.Errorf("pin: expected: %+v", resultantPin)
t.Errorf("pin: got: %+v", pin)
t.Errorf("path: %s", testCase.path)
}
}
}
test.BothEndpoints(t, tf)
}
func TestAPIUnpinEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
// test regular delete
test.MakeDelete(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String(), &struct{}{})
errResp := api.Error{}
test.MakeDelete(t, rest, url(rest)+"/pins/"+clustertest.ErrorCid.String(), &errResp)
if errResp.Message != clustertest.ErrBadCid.Error() {
t.Error("expected different error: ", errResp.Message)
}
test.MakeDelete(t, rest, url(rest)+"/pins/"+clustertest.NotFoundCid.String(), &errResp)
if errResp.Code != http.StatusNotFound {
t.Error("expected different error code: ", errResp.Code)
}
test.MakeDelete(t, rest, url(rest)+"/pins/abcd", &errResp)
if errResp.Code != 400 {
t.Error("expected different error code: ", errResp.Code)
}
}
test.BothEndpoints(t, tf)
}
func TestAPIUnpinEndpointWithPath(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
for _, testCase := range pathTestCases {
if testCase.wantErr {
errResp := api.Error{}
test.MakeDelete(t, rest, url(rest)+"/pins"+testCase.path, &errResp)
if errResp.Code != testCase.code {
t.Errorf(
"status code: expected: %d, got: %d, path: %s\n",
testCase.code,
errResp.Code,
testCase.path,
)
}
continue
}
pin := api.Pin{}
test.MakeDelete(t, rest, url(rest)+"/pins"+testCase.path, &pin)
if pin.Cid.String() != testCase.expectedCid {
t.Errorf(
"cid: expected: %s, got: %s, path: %s\n",
clustertest.CidResolved,
pin.Cid,
testCase.path,
)
}
}
}
test.BothEndpoints(t, tf)
}
func TestAPIAllocationsEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []api.Pin
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=pin,meta-pin", &resp, false)
if len(resp) != 3 ||
!resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) ||
!resp[2].Cid.Equals(clustertest.Cid3) {
t.Error("unexpected pin list: ", resp)
}
test.MakeStreamingGet(t, rest, url(rest)+"/allocations", &resp, false)
if len(resp) != 3 ||
!resp[0].Cid.Equals(clustertest.Cid1) || !resp[1].Cid.Equals(clustertest.Cid2) ||
!resp[2].Cid.Equals(clustertest.Cid3) {
t.Error("unexpected pin list: ", resp)
}
errResp := api.Error{}
test.MakeStreamingGet(t, rest, url(rest)+"/allocations?filter=invalid", &errResp, false)
if errResp.Code != http.StatusBadRequest {
t.Error("an invalid filter value should 400")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIAllocationEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp api.Pin
test.MakeGet(t, rest, url(rest)+"/allocations/"+clustertest.Cid1.String(), &resp)
if !resp.Cid.Equals(clustertest.Cid1) {
t.Errorf("cid should be the same: %s %s", resp.Cid, clustertest.Cid1)
}
errResp := api.Error{}
test.MakeGet(t, rest, url(rest)+"/allocations/"+clustertest.Cid4.String(), &errResp)
if errResp.Code != 404 {
t.Error("a non-pinned cid should 404")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIMetricsEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []api.Metric
test.MakeGet(t, rest, url(rest)+"/monitor/metrics/somemetricstype", &resp)
if len(resp) == 0 {
t.Fatal("No metrics found")
}
for _, m := range resp {
if m.Name != "test" {
t.Error("Unexpected metric name: ", m.Name)
}
if m.Peer.Pretty() != clustertest.PeerID1.Pretty() {
t.Error("Unexpected peer id: ", m.Peer)
}
}
}
test.BothEndpoints(t, tf)
}
func TestAPIMetricNamesEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []string
test.MakeGet(t, rest, url(rest)+"/monitor/metrics", &resp)
if len(resp) == 0 {
t.Fatal("No metric names found")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIAlertsEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []api.Alert
test.MakeGet(t, rest, url(rest)+"/health/alerts", &resp)
if len(resp) != 1 {
t.Error("expected one alert")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIStatusAllEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins", &resp, false)
// mockPinTracker returns 3 items for Cluster.StatusAll
if len(resp) != 3 ||
!resp[0].Cid.Equals(clustertest.Cid1) ||
resp[1].PeerMap[clustertest.PeerID1.String()].Status.String() != "pinning" {
t.Errorf("unexpected statusAll resp")
}
// Test local=true
var resp2 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins?local=true", &resp2, false)
// mockPinTracker calls pintracker.StatusAll which returns 2
// items.
if len(resp2) != 2 {
t.Errorf("unexpected statusAll+local resp:\n %+v", resp2)
}
// Test with filter
var resp3 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=queued", &resp3, false)
if len(resp3) != 0 {
t.Errorf("unexpected statusAll+filter=queued resp:\n %+v", resp3)
}
var resp4 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pinned", &resp4, false)
if len(resp4) != 1 {
t.Errorf("unexpected statusAll+filter=pinned resp:\n %+v", resp4)
}
var resp5 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=pin_error", &resp5, false)
if len(resp5) != 1 {
t.Errorf("unexpected statusAll+filter=pin_error resp:\n %+v", resp5)
}
var resp6 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error", &resp6, false)
if len(resp6) != 1 {
t.Errorf("unexpected statusAll+filter=error resp:\n %+v", resp6)
}
var resp7 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=error,pinned", &resp7, false)
if len(resp7) != 2 {
t.Errorf("unexpected statusAll+filter=error,pinned resp:\n %+v", resp7)
}
var errorResp api.Error
test.MakeStreamingGet(t, rest, url(rest)+"/pins?filter=invalid", &errorResp, false)
if errorResp.Code != http.StatusBadRequest {
t.Error("an invalid filter value should 400")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIStatusAllWithCidsEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []api.GlobalPinInfo
cids := []string{
clustertest.Cid1.String(),
clustertest.Cid2.String(),
clustertest.Cid3.String(),
clustertest.Cid4.String(),
}
test.MakeStreamingGet(t, rest, url(rest)+"/pins/?cids="+strings.Join(cids, ","), &resp, false)
if len(resp) != 4 {
t.Error("wrong number of responses")
}
// Test local=true
var resp2 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp2, false)
if len(resp2) != 4 {
t.Error("wrong number of responses")
}
// Test with an error. This should produce a trailer error.
cids = append(cids, clustertest.ErrorCid.String())
var resp3 []api.GlobalPinInfo
test.MakeStreamingGet(t, rest, url(rest)+"/pins/?local=true&cids="+strings.Join(cids, ","), &resp3, true)
if len(resp3) != 4 {
t.Error("wrong number of responses")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIStatusEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String(), &resp)
if !resp.Cid.Equals(clustertest.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[clustertest.PeerID1.String()]
if !ok {
t.Fatal("expected info for clustertest.PeerID1")
}
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
// Test local=true
var resp2 api.GlobalPinInfo
test.MakeGet(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String()+"?local=true", &resp2)
if !resp2.Cid.Equals(clustertest.Cid1) {
t.Error("expected the same cid")
}
info, ok = resp2.PeerMap[clustertest.PeerID2.String()]
if !ok {
t.Fatal("expected info for clustertest.PeerID2")
}
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIRecoverEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp api.GlobalPinInfo
test.MakePost(t, rest, url(rest)+"/pins/"+clustertest.Cid1.String()+"/recover", []byte{}, &resp)
if !resp.Cid.Equals(clustertest.Cid1) {
t.Error("expected the same cid")
}
info, ok := resp.PeerMap[clustertest.PeerID1.String()]
if !ok {
t.Fatal("expected info for clustertest.PeerID1")
}
if info.Status.String() != "pinned" {
t.Error("expected different status")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIRecoverAllEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
tf := func(t *testing.T, url test.URLFunc) {
var resp []api.GlobalPinInfo
test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover?local=true", nil, "", &resp)
if len(resp) != 0 {
t.Fatal("bad response length")
}
var resp1 []api.GlobalPinInfo
test.MakeStreamingPost(t, rest, url(rest)+"/pins/recover", nil, "", &resp1)
if len(resp1) == 0 {
t.Fatal("bad response length")
}
}
test.BothEndpoints(t, tf)
}
func TestAPIIPFSGCEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
defer rest.Shutdown(ctx)
testGlobalRepoGC := func(t *testing.T, gRepoGC api.GlobalRepoGC) {
if gRepoGC.PeerMap == nil {
t.Fatal("expected a non-nil peer map")
}
if len(gRepoGC.PeerMap) != 1 {
t.Error("expected repo gc information for one peer")
}
for _, repoGC := range gRepoGC.PeerMap {
if repoGC.Peer == "" {
t.Error("expected a cluster ID")
}
if repoGC.Error != "" {
t.Error("did not expect any error")
}
if repoGC.Keys == nil {
t.Fatal("expected a non-nil array of IPFSRepoGC")
}
if len(repoGC.Keys) == 0 {
t.Fatal("expected at least one key, but found none")
}
if !repoGC.Keys[0].Key.Equals(clustertest.Cid1) {
t.Errorf("expected a different cid, expected: %s, found: %s", clustertest.Cid1, repoGC.Keys[0].Key)
}
}
}
tf := func(t *testing.T, url test.URLFunc) {
var resp api.GlobalRepoGC
test.MakePost(t, rest, url(rest)+"/ipfs/gc?local=true", []byte{}, &resp)
testGlobalRepoGC(t, resp)
var resp1 api.GlobalRepoGC
test.MakePost(t, rest, url(rest)+"/ipfs/gc", []byte{}, &resp1)
testGlobalRepoGC(t, resp1)
}
test.BothEndpoints(t, tf)
}

File diff suppressed because it is too large Load diff

View file

@ -1,283 +0,0 @@
package api
import (
"bytes"
"net/url"
"reflect"
"strings"
"testing"
"time"
peer "github.com/libp2p/go-libp2p/core/peer"
multiaddr "github.com/multiformats/go-multiaddr"
"github.com/ugorji/go/codec"
)
func TestTrackerFromString(t *testing.T) {
testcases := []string{"cluster_error", "pin_error", "unpin_error", "pinned", "pinning", "unpinning", "unpinned", "remote"}
for i, tc := range testcases {
if TrackerStatusFromString(tc).String() != TrackerStatus(1<<uint(i+1)).String() {
t.Errorf("%s does not match TrackerStatus %d", tc, i)
}
}
if TrackerStatusFromString("") != TrackerStatusUndefined ||
TrackerStatusFromString("xyz") != TrackerStatusUndefined {
t.Error("expected tracker status undefined for bad strings")
}
}
func TestIPFSPinStatusFromString(t *testing.T) {
testcases := []string{"direct", "recursive", "indirect"}
for i, tc := range testcases {
if IPFSPinStatusFromString(tc) != IPFSPinStatus(i+2) {
t.Errorf("%s does not match IPFSPinStatus %d", tc, i+2)
}
}
}
func BenchmarkIPFSPinStatusFromString(b *testing.B) {
for i := 0; i < b.N; i++ {
IPFSPinStatusFromString("indirect")
}
}
func TestMetric(t *testing.T) {
m := Metric{
Name: "hello",
Value: "abc",
}
if !m.Expired() {
t.Error("metric should be expired")
}
m.SetTTL(1 * time.Second)
if m.Expired() {
t.Error("metric should not be expired")
}
// let it expire
time.Sleep(1500 * time.Millisecond)
if !m.Expired() {
t.Error("metric should be expired")
}
m.SetTTL(30 * time.Second)
m.Valid = true
if m.Discard() {
t.Error("metric should be valid")
}
m.Valid = false
if !m.Discard() {
t.Error("metric should be invalid")
}
ttl := m.GetTTL()
if ttl > 30*time.Second || ttl < 29*time.Second {
t.Error("looks like a bad ttl")
}
}
func TestConvertPinType(t *testing.T) {
for _, t1 := range []PinType{BadType, ShardType} {
i := convertPinType(t1)
t2 := PinType(1 << uint64(i))
if t2 != t1 {
t.Error("bad conversion")
}
}
}
func checkDupTags(t *testing.T, name string, typ reflect.Type, tags map[string]struct{}) {
if tags == nil {
tags = make(map[string]struct{})
}
for i := 0; i < typ.NumField(); i++ {
f := typ.Field(i)
if f.Type.Kind() == reflect.Struct && f.Anonymous {
checkDupTags(t, name, f.Type, tags)
continue
}
tag := f.Tag.Get(name)
if tag == "" {
continue
}
val := strings.Split(tag, ",")[0]
t.Logf("%s: '%s:%s'", f.Name, name, val)
_, ok := tags[val]
if ok {
t.Errorf("%s: tag %s already used", f.Name, val)
}
tags[val] = struct{}{}
}
}
// TestDupTags checks that we are not re-using the same codec tag for
// different fields in the types objects.
func TestDupTags(t *testing.T) {
typ := reflect.TypeOf(Pin{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ID{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(GlobalPinInfo{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(PinInfo{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ConnectGraph{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(ID{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(NodeWithMeta{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(Metric{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(Error{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(IPFSRepoStat{})
checkDupTags(t, "codec", typ, nil)
typ = reflect.TypeOf(AddedOutput{})
checkDupTags(t, "codec", typ, nil)
}
func TestPinOptionsQuery(t *testing.T) {
testcases := []*PinOptions{
{
ReplicationFactorMax: 3,
ReplicationFactorMin: 2,
Name: "abc",
ShardSize: 33,
UserAllocations: StringsToPeers([]string{
"QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc",
"QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6",
}),
ExpireAt: time.Now().Add(12 * time.Hour),
Metadata: map[string]string{
"hello": "bye",
"hello2": "bye2",
},
Origins: []Multiaddr{
NewMultiaddrWithValue(multiaddr.StringCast("/ip4/1.2.3.4/tcp/1234/p2p/12D3KooWKewdAMAU3WjYHm8qkAJc5eW6KHbHWNigWraXXtE1UCng")),
NewMultiaddrWithValue(multiaddr.StringCast("/ip4/2.3.3.4/tcp/1234/p2p/12D3KooWF6BgwX966ge5AVFs9Gd2wVTBmypxZVvaBR12eYnUmXkR")),
},
},
{
ReplicationFactorMax: -1,
ReplicationFactorMin: 0,
Name: "",
ShardSize: 0,
UserAllocations: []peer.ID{},
Metadata: nil,
},
{
ReplicationFactorMax: -1,
ReplicationFactorMin: 0,
Name: "",
ShardSize: 0,
UserAllocations: nil,
Metadata: map[string]string{
"": "bye",
},
},
}
for _, tc := range testcases {
queryStr, err := tc.ToQuery()
if err != nil {
t.Fatal("error converting to query", err)
}
q, err := url.ParseQuery(queryStr)
if err != nil {
t.Error("error parsing query", err)
}
po2 := PinOptions{}
err = po2.FromQuery(q)
if err != nil {
t.Fatal("error parsing options", err)
}
if !tc.Equals(po2) {
t.Error("expected equal PinOptions")
t.Error(queryStr)
t.Errorf("%+v\n", tc)
t.Errorf("%+v\n", po2)
}
}
}
func TestIDCodec(t *testing.T) {
TestPeerID1, _ := peer.Decode("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
TestPeerID2, _ := peer.Decode("QmUZ13osndQ5uL4tPWHXe3iBgBgq9gfewcBMSCAuMBsDJ6")
TestPeerID3, _ := peer.Decode("QmPGDFvBkgWhvzEK9qaTWrWurSwqXNmhnK3hgELPdZZNPa")
addr, _ := NewMultiaddr("/ip4/1.2.3.4")
id := &ID{
ID: TestPeerID1,
Addresses: []Multiaddr{addr},
ClusterPeers: []peer.ID{TestPeerID2},
ClusterPeersAddresses: []Multiaddr{addr},
Version: "2",
Commit: "",
RPCProtocolVersion: "abc",
Error: "",
IPFS: IPFSID{
ID: TestPeerID3,
Addresses: []Multiaddr{addr},
Error: "",
},
Peername: "hi",
}
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
err := enc.Encode(id)
if err != nil {
t.Fatal(err)
}
var buf2 = bytes.NewBuffer(buf.Bytes())
dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{})
var id2 ID
err = dec.Decode(&id2)
if err != nil {
t.Fatal(err)
}
}
func TestPinCodec(t *testing.T) {
ci, _ := DecodeCid("QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc")
pin := PinCid(ci)
var buf bytes.Buffer
enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{})
err := enc.Encode(pin)
if err != nil {
t.Fatal(err)
}
var buf2 = bytes.NewBuffer(buf.Bytes())
dec := codec.NewDecoder(buf2, &codec.MsgpackHandle{})
var pin2 Pin
err = dec.Decode(&pin2)
if err != nil {
t.Fatal(err)
}
}

View file

@ -1,29 +0,0 @@
package api
import (
peer "github.com/libp2p/go-libp2p/core/peer"
)
// PeersToStrings Encodes a list of peers.
func PeersToStrings(peers []peer.ID) []string {
strs := make([]string, len(peers))
for i, p := range peers {
if p != "" {
strs[i] = p.String()
}
}
return strs
}
// StringsToPeers decodes peer.IDs from strings.
func StringsToPeers(strs []string) []peer.ID {
peers := []peer.ID{}
for _, p := range strs {
pid, err := peer.Decode(p)
if err != nil {
continue
}
peers = append(peers, pid)
}
return peers
}

File diff suppressed because it is too large Load diff

View file

@ -1,576 +0,0 @@
package ipfscluster
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"time"
"github.com/ipfs-cluster/ipfs-cluster/config"
pnet "github.com/libp2p/go-libp2p/core/pnet"
ma "github.com/multiformats/go-multiaddr"
"github.com/kelseyhightower/envconfig"
)
const configKey = "cluster"
// DefaultListenAddrs contains TCP and QUIC listen addresses.
var DefaultListenAddrs = []string{
"/ip4/0.0.0.0/tcp/9096",
"/ip4/0.0.0.0/udp/9096/quic",
}
// Configuration defaults
const (
DefaultEnableRelayHop = true
DefaultStateSyncInterval = 5 * time.Minute
DefaultPinRecoverInterval = 12 * time.Minute
DefaultMonitorPingInterval = 15 * time.Second
DefaultPeerWatchInterval = 5 * time.Second
DefaultReplicationFactor = -1
DefaultLeaveOnShutdown = false
DefaultPinOnlyOnTrustedPeers = false
DefaultDisableRepinning = true
DefaultPeerstoreFile = "peerstore"
DefaultConnMgrHighWater = 400
DefaultConnMgrLowWater = 100
DefaultConnMgrGracePeriod = 2 * time.Minute
DefaultDialPeerTimeout = 3 * time.Second
DefaultFollowerMode = false
DefaultMDNSInterval = 10 * time.Second
)
// ConnMgrConfig configures the libp2p host connection manager.
type ConnMgrConfig struct {
HighWater int
LowWater int
GracePeriod time.Duration
}
// Config is the configuration object containing customizable variables to
// initialize the main ipfs-cluster component. It implements the
// config.ComponentConfig interface.
type Config struct {
config.Saver
// User-defined peername for use as human-readable identifier.
Peername string
// Cluster secret for private network. Peers will be in the same cluster if and
// only if they have the same ClusterSecret. The cluster secret must be exactly
// 64 characters and contain only hexadecimal characters (`[0-9a-f]`).
Secret pnet.PSK
// RPCPolicy defines access control to RPC endpoints.
RPCPolicy map[string]RPCEndpointType
// Leave Cluster on shutdown. Politely informs other peers
// of the departure and removes itself from the consensus
// peer set. The Cluster size will be reduced by one.
LeaveOnShutdown bool
// Listen parameters for the Cluster libp2p Host. Used by
// the RPC and Consensus components.
ListenAddr []ma.Multiaddr
// Enables HOP relay for the node. If this is enabled, the node will act as
// an intermediate (Hop Relay) node in relay circuits for connected peers.
EnableRelayHop bool
// ConnMgr holds configuration values for the connection manager for
// the libp2p host.
// FIXME: This only applies to ipfs-cluster-service.
ConnMgr ConnMgrConfig
// Sets the default dial timeout for libp2p connections to other
// peers.
DialPeerTimeout time.Duration
// Time between syncs of the consensus state to the
// tracker state. Normally states are synced anyway, but this helps
// when new nodes are joining the cluster. Reduce for faster
// consistency, increase with larger states.
StateSyncInterval time.Duration
// Time between automatic runs of the "recover" operation
// which will retry to pin/unpin items in error state.
PinRecoverInterval time.Duration
// ReplicationFactorMax indicates the target number of nodes
// that should pin content. For exampe, a replication_factor of
// 3 will have cluster allocate each pinned hash to 3 peers if
// possible.
// See also ReplicationFactorMin. A ReplicationFactorMax of -1
// will allocate to every available node.
ReplicationFactorMax int
// ReplicationFactorMin indicates the minimum number of healthy
// nodes pinning content. If the number of nodes available to pin
// is less than this threshold, an error will be returned.
// In the case of peer health issues, content pinned will be
// re-allocated if the threshold is crossed.
// For exampe, a ReplicationFactorMin of 2 will allocate at least
// two peer to hold content, and return an error if this is not
// possible.
ReplicationFactorMin int
// MonitorPingInterval is the frequency with which a cluster peer
// sends a "ping" metric. The metric has a TTL set to the double of
// this value. This metric sends information about this peer to other
// peers.
MonitorPingInterval time.Duration
// PeerWatchInterval is the frequency that we use to watch for changes
// in the consensus peerset and save new peers to the configuration
// file. This also affects how soon we realize that we have
// been removed from a cluster.
PeerWatchInterval time.Duration
// MDNSInterval controls the time between mDNS broadcasts to the
// network announcing the peer addresses. Set to 0 to disable
// mDNS.
MDNSInterval time.Duration
// PinOnlyOnTrustedPeers limits allocations to trusted peers only.
PinOnlyOnTrustedPeers bool
// If true, DisableRepinning, ensures that no repinning happens
// when a node goes down.
// This is useful when doing certain types of maintenance, or simply
// when not wanting to rely on the monitoring system which needs a revamp.
DisableRepinning bool
// FollowerMode disables broadcast requests from this peer
// (sync, recover, status) and disallows pinset management
// operations (Pin/Unpin).
FollowerMode bool
// Peerstore file specifies the file on which we persist the
// libp2p host peerstore addresses. This file is regularly saved.
PeerstoreFile string
// PeerAddresses stores additional addresses for peers that may or may
// not be in the peerstore file. These are considered high priority
// when bootstrapping the initial cluster connections.
PeerAddresses []ma.Multiaddr
// Tracing flag used to skip tracing specific paths when not enabled.
Tracing bool
}
// configJSON represents a Cluster configuration as it will look when it is
// saved using JSON. Most configuration keys are converted into simple types
// like strings, and key names aim to be self-explanatory for the user.
type configJSON struct {
ID string `json:"id,omitempty"`
Peername string `json:"peername"`
PrivateKey string `json:"private_key,omitempty" hidden:"true"`
Secret string `json:"secret" hidden:"true"`
LeaveOnShutdown bool `json:"leave_on_shutdown"`
ListenMultiaddress config.Strings `json:"listen_multiaddress"`
EnableRelayHop bool `json:"enable_relay_hop"`
ConnectionManager *connMgrConfigJSON `json:"connection_manager"`
DialPeerTimeout string `json:"dial_peer_timeout"`
StateSyncInterval string `json:"state_sync_interval"`
PinRecoverInterval string `json:"pin_recover_interval"`
ReplicationFactorMin int `json:"replication_factor_min"`
ReplicationFactorMax int `json:"replication_factor_max"`
MonitorPingInterval string `json:"monitor_ping_interval"`
PeerWatchInterval string `json:"peer_watch_interval"`
MDNSInterval string `json:"mdns_interval"`
PinOnlyOnTrustedPeers bool `json:"pin_only_on_trusted_peers"`
DisableRepinning bool `json:"disable_repinning"`
FollowerMode bool `json:"follower_mode,omitempty"`
PeerstoreFile string `json:"peerstore_file,omitempty"`
PeerAddresses []string `json:"peer_addresses"`
}
// connMgrConfigJSON configures the libp2p host connection manager.
type connMgrConfigJSON struct {
HighWater int `json:"high_water"`
LowWater int `json:"low_water"`
GracePeriod string `json:"grace_period"`
}
// ConfigKey returns a human-readable string to identify
// a cluster Config.
func (cfg *Config) ConfigKey() string {
return configKey
}
// Default fills in all the Config fields with
// default working values. This means, it will
// generate a Secret.
func (cfg *Config) Default() error {
cfg.setDefaults()
clusterSecret := make([]byte, 32)
n, err := rand.Read(clusterSecret)
if err != nil {
return err
}
if n != 32 {
return errors.New("did not generate 32-byte secret")
}
cfg.Secret = clusterSecret
return nil
}
// ApplyEnvVars fills in any Config fields found
// as environment variables.
func (cfg *Config) ApplyEnvVars() error {
jcfg, err := cfg.toConfigJSON()
if err != nil {
return err
}
err = envconfig.Process(cfg.ConfigKey(), jcfg)
if err != nil {
return err
}
return cfg.applyConfigJSON(jcfg)
}
// Validate will check that the values of this config
// seem to be working ones.
func (cfg *Config) Validate() error {
if cfg.ListenAddr == nil {
return errors.New("cluster.listen_multiaddress is undefined")
}
if len(cfg.ListenAddr) == 0 {
return errors.New("cluster.listen_multiaddress is empty")
}
if cfg.ConnMgr.LowWater <= 0 {
return errors.New("cluster.connection_manager.low_water is invalid")
}
if cfg.ConnMgr.HighWater <= 0 {
return errors.New("cluster.connection_manager.high_water is invalid")
}
if cfg.ConnMgr.LowWater > cfg.ConnMgr.HighWater {
return errors.New("cluster.connection_manager.low_water is greater than high_water")
}
if cfg.ConnMgr.GracePeriod == 0 {
return errors.New("cluster.connection_manager.grace_period is invalid")
}
if cfg.DialPeerTimeout <= 0 {
return errors.New("cluster.dial_peer_timeout is invalid")
}
if cfg.StateSyncInterval <= 0 {
return errors.New("cluster.state_sync_interval is invalid")
}
if cfg.PinRecoverInterval <= 0 {
return errors.New("cluster.pin_recover_interval is invalid")
}
if cfg.MonitorPingInterval <= 0 {
return errors.New("cluster.monitoring_interval is invalid")
}
if cfg.PeerWatchInterval <= 0 {
return errors.New("cluster.peer_watch_interval is invalid")
}
rfMax := cfg.ReplicationFactorMax
rfMin := cfg.ReplicationFactorMin
if err := isReplicationFactorValid(rfMin, rfMax); err != nil {
return err
}
return isRPCPolicyValid(cfg.RPCPolicy)
}
func isReplicationFactorValid(rplMin, rplMax int) error {
// check Max and Min are correct
if rplMin == 0 || rplMax == 0 {
return errors.New("cluster.replication_factor_min and max must be set")
}
if rplMin > rplMax {
return errors.New("cluster.replication_factor_min is larger than max")
}
if rplMin < -1 {
return errors.New("cluster.replication_factor_min is wrong")
}
if rplMax < -1 {
return errors.New("cluster.replication_factor_max is wrong")
}
if (rplMin == -1 && rplMax != -1) || (rplMin != -1 && rplMax == -1) {
return errors.New("cluster.replication_factor_min and max must be -1 when one of them is")
}
return nil
}
func isRPCPolicyValid(p map[string]RPCEndpointType) error {
rpcComponents := []interface{}{
&ClusterRPCAPI{},
&PinTrackerRPCAPI{},
&IPFSConnectorRPCAPI{},
&ConsensusRPCAPI{},
&PeerMonitorRPCAPI{},
}
total := 0
for _, c := range rpcComponents {
t := reflect.TypeOf(c)
for i := 0; i < t.NumMethod(); i++ {
total++
method := t.Method(i)
name := fmt.Sprintf("%s.%s", RPCServiceID(c), method.Name)
_, ok := p[name]
if !ok {
return fmt.Errorf("RPCPolicy is missing the %s method", name)
}
}
}
if len(p) != total {
logger.Warn("defined RPC policy has more entries than needed")
}
return nil
}
// this just sets non-generated defaults
func (cfg *Config) setDefaults() {
hostname, err := os.Hostname()
if err != nil {
hostname = ""
}
cfg.Peername = hostname
listenAddrs := []ma.Multiaddr{}
for _, m := range DefaultListenAddrs {
addr, _ := ma.NewMultiaddr(m)
listenAddrs = append(listenAddrs, addr)
}
cfg.ListenAddr = listenAddrs
cfg.EnableRelayHop = DefaultEnableRelayHop
cfg.ConnMgr = ConnMgrConfig{
HighWater: DefaultConnMgrHighWater,
LowWater: DefaultConnMgrLowWater,
GracePeriod: DefaultConnMgrGracePeriod,
}
cfg.DialPeerTimeout = DefaultDialPeerTimeout
cfg.LeaveOnShutdown = DefaultLeaveOnShutdown
cfg.StateSyncInterval = DefaultStateSyncInterval
cfg.PinRecoverInterval = DefaultPinRecoverInterval
cfg.ReplicationFactorMin = DefaultReplicationFactor
cfg.ReplicationFactorMax = DefaultReplicationFactor
cfg.MonitorPingInterval = DefaultMonitorPingInterval
cfg.PeerWatchInterval = DefaultPeerWatchInterval
cfg.MDNSInterval = DefaultMDNSInterval
cfg.PinOnlyOnTrustedPeers = DefaultPinOnlyOnTrustedPeers
cfg.DisableRepinning = DefaultDisableRepinning
cfg.FollowerMode = DefaultFollowerMode
cfg.PeerstoreFile = "" // empty so it gets omitted.
cfg.PeerAddresses = []ma.Multiaddr{}
cfg.RPCPolicy = DefaultRPCPolicy
}
// LoadJSON receives a raw json-formatted configuration and
// sets the Config fields from it. Note that it should be JSON
// as generated by ToJSON().
func (cfg *Config) LoadJSON(raw []byte) error {
jcfg := &configJSON{}
err := json.Unmarshal(raw, jcfg)
if err != nil {
logger.Error("Error unmarshaling cluster config")
return err
}
cfg.setDefaults()
return cfg.applyConfigJSON(jcfg)
}
func (cfg *Config) applyConfigJSON(jcfg *configJSON) error {
config.SetIfNotDefault(jcfg.PeerstoreFile, &cfg.PeerstoreFile)
config.SetIfNotDefault(jcfg.Peername, &cfg.Peername)
clusterSecret, err := DecodeClusterSecret(jcfg.Secret)
if err != nil {
err = fmt.Errorf("error loading cluster secret from config: %s", err)
return err
}
cfg.Secret = clusterSecret
var listenAddrs []ma.Multiaddr
for _, addr := range jcfg.ListenMultiaddress {
listenAddr, err := ma.NewMultiaddr(addr)
if err != nil {
err = fmt.Errorf("error parsing a listen_multiaddress: %s", err)
return err
}
listenAddrs = append(listenAddrs, listenAddr)
}
cfg.ListenAddr = listenAddrs
cfg.EnableRelayHop = jcfg.EnableRelayHop
if conman := jcfg.ConnectionManager; conman != nil {
cfg.ConnMgr = ConnMgrConfig{
HighWater: jcfg.ConnectionManager.HighWater,
LowWater: jcfg.ConnectionManager.LowWater,
}
err = config.ParseDurations("cluster",
&config.DurationOpt{Duration: jcfg.ConnectionManager.GracePeriod, Dst: &cfg.ConnMgr.GracePeriod, Name: "connection_manager.grace_period"},
)
if err != nil {
return err
}
}
rplMin := jcfg.ReplicationFactorMin
rplMax := jcfg.ReplicationFactorMax
config.SetIfNotDefault(rplMin, &cfg.ReplicationFactorMin)
config.SetIfNotDefault(rplMax, &cfg.ReplicationFactorMax)
err = config.ParseDurations("cluster",
&config.DurationOpt{Duration: jcfg.DialPeerTimeout, Dst: &cfg.DialPeerTimeout, Name: "dial_peer_timeout"},
&config.DurationOpt{Duration: jcfg.StateSyncInterval, Dst: &cfg.StateSyncInterval, Name: "state_sync_interval"},
&config.DurationOpt{Duration: jcfg.PinRecoverInterval, Dst: &cfg.PinRecoverInterval, Name: "pin_recover_interval"},
&config.DurationOpt{Duration: jcfg.MonitorPingInterval, Dst: &cfg.MonitorPingInterval, Name: "monitor_ping_interval"},
&config.DurationOpt{Duration: jcfg.PeerWatchInterval, Dst: &cfg.PeerWatchInterval, Name: "peer_watch_interval"},
&config.DurationOpt{Duration: jcfg.MDNSInterval, Dst: &cfg.MDNSInterval, Name: "mdns_interval"},
)
if err != nil {
return err
}
// PeerAddresses
peerAddrs := []ma.Multiaddr{}
for _, addr := range jcfg.PeerAddresses {
peerAddr, err := ma.NewMultiaddr(addr)
if err != nil {
err = fmt.Errorf("error parsing peer_addresses: %s", err)
return err
}
peerAddrs = append(peerAddrs, peerAddr)
}
cfg.PeerAddresses = peerAddrs
cfg.LeaveOnShutdown = jcfg.LeaveOnShutdown
cfg.PinOnlyOnTrustedPeers = jcfg.PinOnlyOnTrustedPeers
cfg.DisableRepinning = jcfg.DisableRepinning
cfg.FollowerMode = jcfg.FollowerMode
return cfg.Validate()
}
// ToJSON generates a human-friendly version of Config.
func (cfg *Config) ToJSON() (raw []byte, err error) {
jcfg, err := cfg.toConfigJSON()
if err != nil {
return
}
raw, err = json.MarshalIndent(jcfg, "", " ")
return
}
func (cfg *Config) toConfigJSON() (jcfg *configJSON, err error) {
// Multiaddress String() may panic
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("%s", r)
}
}()
jcfg = &configJSON{}
// Set all configuration fields
jcfg.Peername = cfg.Peername
jcfg.Secret = EncodeProtectorKey(cfg.Secret)
jcfg.ReplicationFactorMin = cfg.ReplicationFactorMin
jcfg.ReplicationFactorMax = cfg.ReplicationFactorMax
jcfg.LeaveOnShutdown = cfg.LeaveOnShutdown
var listenAddrs config.Strings
for _, addr := range cfg.ListenAddr {
listenAddrs = append(listenAddrs, addr.String())
}
jcfg.ListenMultiaddress = config.Strings(listenAddrs)
jcfg.EnableRelayHop = cfg.EnableRelayHop
jcfg.ConnectionManager = &connMgrConfigJSON{
HighWater: cfg.ConnMgr.HighWater,
LowWater: cfg.ConnMgr.LowWater,
GracePeriod: cfg.ConnMgr.GracePeriod.String(),
}
jcfg.DialPeerTimeout = cfg.DialPeerTimeout.String()
jcfg.StateSyncInterval = cfg.StateSyncInterval.String()
jcfg.PinRecoverInterval = cfg.PinRecoverInterval.String()
jcfg.MonitorPingInterval = cfg.MonitorPingInterval.String()
jcfg.PeerWatchInterval = cfg.PeerWatchInterval.String()
jcfg.MDNSInterval = cfg.MDNSInterval.String()
jcfg.PinOnlyOnTrustedPeers = cfg.PinOnlyOnTrustedPeers
jcfg.DisableRepinning = cfg.DisableRepinning
jcfg.PeerstoreFile = cfg.PeerstoreFile
jcfg.PeerAddresses = []string{}
for _, addr := range cfg.PeerAddresses {
jcfg.PeerAddresses = append(jcfg.PeerAddresses, addr.String())
}
jcfg.FollowerMode = cfg.FollowerMode
return
}
// GetPeerstorePath returns the full path of the
// PeerstoreFile, obtained by concatenating that value
// with BaseDir of the configuration, if set.
// An empty string is returned when BaseDir is not set.
func (cfg *Config) GetPeerstorePath() string {
if cfg.BaseDir == "" {
return ""
}
filename := DefaultPeerstoreFile
if cfg.PeerstoreFile != "" {
filename = cfg.PeerstoreFile
}
return filepath.Join(cfg.BaseDir, filename)
}
// ToDisplayJSON returns JSON config as a string.
func (cfg *Config) ToDisplayJSON() ([]byte, error) {
jcfg, err := cfg.toConfigJSON()
if err != nil {
return nil, err
}
return config.DisplayJSON(jcfg)
}
// DecodeClusterSecret parses a hex-encoded string, checks that it is exactly
// 32 bytes long and returns its value as a byte-slice.x
func DecodeClusterSecret(hexSecret string) ([]byte, error) {
secret, err := hex.DecodeString(hexSecret)
if err != nil {
return nil, err
}
switch secretLen := len(secret); secretLen {
case 0:
logger.Warn("Cluster secret is empty, cluster will start on unprotected network.")
return nil, nil
case 32:
return secret, nil
default:
return nil, fmt.Errorf("input secret is %d bytes, cluster secret should be 32", secretLen)
}
}

View file

@ -1,286 +0,0 @@
package ipfscluster
import (
"encoding/json"
"os"
"testing"
"time"
"github.com/ipfs-cluster/ipfs-cluster/config"
)
var ccfgTestJSON = []byte(`
{
"peername": "testpeer",
"secret": "2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed",
"leave_on_shutdown": true,
"connection_manager": {
"high_water": 501,
"low_water": 500,
"grace_period": "100m0s"
},
"listen_multiaddress": [
"/ip4/127.0.0.1/tcp/10000",
"/ip4/127.0.0.1/udp/10000/quic"
],
"state_sync_interval": "1m0s",
"pin_recover_interval": "1m",
"replication_factor_min": 5,
"replication_factor_max": 5,
"monitor_ping_interval": "2s",
"pin_only_on_trusted_peers": true,
"disable_repinning": true,
"peer_addresses": [ "/ip4/127.0.0.1/tcp/1234/p2p/QmXZrtE5jQwXNqCJMfHUTQkvhQ4ZAnqMnmzFMJfLewuabc" ]
}
`)
func TestLoadJSON(t *testing.T) {
loadJSON := func(t *testing.T) *Config {
cfg := &Config{}
err := cfg.LoadJSON(ccfgTestJSON)
if err != nil {
t.Fatal(err)
}
return cfg
}
t.Run("basic", func(t *testing.T) {
cfg := &Config{}
err := cfg.LoadJSON(ccfgTestJSON)
if err != nil {
t.Fatal(err)
}
})
t.Run("peername", func(t *testing.T) {
cfg := loadJSON(t)
if cfg.Peername != "testpeer" {
t.Error("expected peername 'testpeer'")
}
})
t.Run("expected replication factor", func(t *testing.T) {
cfg := loadJSON(t)
if cfg.ReplicationFactorMin != 5 {
t.Error("expected replication factor min == 5")
}
})
t.Run("expected disable_repinning", func(t *testing.T) {
cfg := loadJSON(t)
if !cfg.DisableRepinning {
t.Error("expected disable_repinning to be true")
}
})
t.Run("expected pin_only_on_trusted_peers", func(t *testing.T) {
cfg := loadJSON(t)
if !cfg.PinOnlyOnTrustedPeers {
t.Error("expected pin_only_on_trusted_peers to be true")
}
})
t.Run("expected pin_recover_interval", func(t *testing.T) {
cfg := loadJSON(t)
if cfg.PinRecoverInterval != time.Minute {
t.Error("expected pin_recover_interval of 1m")
}
})
t.Run("expected connection_manager", func(t *testing.T) {
cfg := loadJSON(t)
if cfg.ConnMgr.LowWater != 500 {
t.Error("expected low_water to be 500")
}
if cfg.ConnMgr.HighWater != 501 {
t.Error("expected high_water to be 501")
}
if cfg.ConnMgr.GracePeriod != 100*time.Minute {
t.Error("expected grace_period to be 100m")
}
})
t.Run("expected peer addresses", func(t *testing.T) {
cfg := loadJSON(t)
if len(cfg.PeerAddresses) != 1 {
t.Error("expected 1 peer address")
}
})
loadJSON2 := func(t *testing.T, f func(j *configJSON)) (*Config, error) {
cfg := &Config{}
j := &configJSON{}
json.Unmarshal(ccfgTestJSON, j)
f(j)
tst, err := json.Marshal(j)
if err != nil {
return cfg, err
}
err = cfg.LoadJSON(tst)
if err != nil {
return cfg, err
}
return cfg, nil
}
t.Run("empty default peername", func(t *testing.T) {
cfg, err := loadJSON2(t, func(j *configJSON) { j.Peername = "" })
if err != nil {
t.Error(err)
}
if cfg.Peername == "" {
t.Error("expected default peername")
}
})
t.Run("bad listen multiaddress", func(t *testing.T) {
_, err := loadJSON2(t, func(j *configJSON) { j.ListenMultiaddress = config.Strings{"abc"} })
if err == nil {
t.Error("expected error parsing listen_multiaddress")
}
})
t.Run("bad secret", func(t *testing.T) {
_, err := loadJSON2(t, func(j *configJSON) { j.Secret = "abc" })
if err == nil {
t.Error("expected error decoding secret")
}
})
t.Run("default replication factors", func(t *testing.T) {
cfg, err := loadJSON2(
t,
func(j *configJSON) {
j.ReplicationFactorMin = 0
j.ReplicationFactorMax = 0
},
)
if err != nil {
t.Error(err)
}
if cfg.ReplicationFactorMin != -1 || cfg.ReplicationFactorMax != -1 {
t.Error("expected default replication factor")
}
})
t.Run("only replication factor min set to -1", func(t *testing.T) {
_, err := loadJSON2(t, func(j *configJSON) { j.ReplicationFactorMin = -1 })
if err == nil {
t.Error("expected error when only one replication factor is -1")
}
})
t.Run("replication factor min > max", func(t *testing.T) {
_, err := loadJSON2(
t,
func(j *configJSON) {
j.ReplicationFactorMin = 5
j.ReplicationFactorMax = 4
},
)
if err == nil {
t.Error("expected error when only rplMin > rplMax")
}
})
t.Run("default replication factor", func(t *testing.T) {
cfg, err := loadJSON2(
t,
func(j *configJSON) {
j.ReplicationFactorMin = 0
j.ReplicationFactorMax = 0
},
)
if err != nil {
t.Error(err)
}
if cfg.ReplicationFactorMin != -1 || cfg.ReplicationFactorMax != -1 {
t.Error("expected default replication factors")
}
})
t.Run("conn manager default", func(t *testing.T) {
cfg, err := loadJSON2(
t,
func(j *configJSON) {
j.ConnectionManager = nil
},
)
if err != nil {
t.Fatal(err)
}
if cfg.ConnMgr.LowWater != DefaultConnMgrLowWater {
t.Error("default conn manager values not set")
}
})
}
func TestToJSON(t *testing.T) {
cfg := &Config{}
err := cfg.LoadJSON(ccfgTestJSON)
if err != nil {
t.Fatal(err)
}
newjson, err := cfg.ToJSON()
if err != nil {
t.Fatal(err)
}
cfg = &Config{}
err = cfg.LoadJSON(newjson)
if err != nil {
t.Fatal(err)
}
}
func TestDefault(t *testing.T) {
cfg := &Config{}
cfg.Default()
if err := cfg.Validate(); err != nil {
t.Fatal(err)
}
}
func TestApplyEnvVars(t *testing.T) {
os.Setenv("CLUSTER_PEERNAME", "envsetpeername")
cfg := &Config{}
cfg.Default()
cfg.ApplyEnvVars()
if cfg.Peername != "envsetpeername" {
t.Fatal("failed to override peername with env var")
}
}
func TestValidate(t *testing.T) {
cfg := &Config{}
cfg.Default()
cfg.MonitorPingInterval = 0
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ReplicationFactorMin = 10
cfg.ReplicationFactorMax = 5
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ReplicationFactorMin = 0
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.ConnMgr.GracePeriod = 0
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
cfg.Default()
cfg.PinRecoverInterval = 0
if cfg.Validate() == nil {
t.Fatal("expected error validating")
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,169 +0,0 @@
package ipfscluster
import (
"context"
"encoding/hex"
config "github.com/ipfs-cluster/ipfs-cluster/config"
ds "github.com/ipfs/go-datastore"
namespace "github.com/ipfs/go-datastore/namespace"
ipns "github.com/ipfs/go-ipns"
libp2p "github.com/libp2p/go-libp2p"
crypto "github.com/libp2p/go-libp2p/core/crypto"
host "github.com/libp2p/go-libp2p/core/host"
network "github.com/libp2p/go-libp2p/core/network"
corepnet "github.com/libp2p/go-libp2p/core/pnet"
routing "github.com/libp2p/go-libp2p/core/routing"
dht "github.com/libp2p/go-libp2p-kad-dht"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
pubsub "github.com/libp2p/go-libp2p-pubsub"
record "github.com/libp2p/go-libp2p-record"
connmgr "github.com/libp2p/go-libp2p/p2p/net/connmgr"
identify "github.com/libp2p/go-libp2p/p2p/protocol/identify"
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
tcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
websocket "github.com/libp2p/go-libp2p/p2p/transport/websocket"
)
const dhtNamespace = "dht"
var _ = libp2pquic.NewTransport
func init() {
// Cluster peers should advertise their public IPs as soon as they
// learn about them. Default for this is 4, which prevents clusters
// with less than 4 peers to advertise an external address they know
// of, therefore they cannot be remembered by other peers asap. This
// affects dockerized setups mostly. This may announce non-dialable
// NATed addresses too eagerly, but they should progressively be
// cleaned up.
identify.ActivationThresh = 1
}
// NewClusterHost creates a fully-featured libp2p Host with the options from
// the provided cluster configuration. Using that host, it creates pubsub and
// a DHT instances (persisting to the given datastore), for shared use by all
// cluster components. The returned host uses the DHT for routing. Relay and
// NATService are additionally setup for this host.
func NewClusterHost(
ctx context.Context,
ident *config.Identity,
cfg *Config,
ds ds.Datastore,
) (host.Host, *pubsub.PubSub, *dual.DHT, error) {
// Set the default dial timeout for all libp2p connections. It is not
// very good to touch this global variable here, but the alternative
// is to used a modify context everywhere, even if the user supplies
// it.
network.DialPeerTimeout = cfg.DialPeerTimeout
connman, err := connmgr.NewConnManager(cfg.ConnMgr.LowWater, cfg.ConnMgr.HighWater, connmgr.WithGracePeriod(cfg.ConnMgr.GracePeriod))
if err != nil {
return nil, nil, nil, err
}
var idht *dual.DHT
opts := []libp2p.Option{
libp2p.ListenAddrs(cfg.ListenAddr...),
libp2p.NATPortMap(),
libp2p.ConnectionManager(connman),
libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) {
idht, err = newDHT(ctx, h, ds)
return idht, err
}),
libp2p.EnableNATService(),
libp2p.EnableRelay(),
libp2p.EnableAutoRelay(),
libp2p.EnableHolePunching(),
}
if cfg.EnableRelayHop {
opts = append(opts, libp2p.EnableRelayService())
}
h, err := newHost(
ctx,
cfg.Secret,
ident.PrivateKey,
opts...,
)
if err != nil {
return nil, nil, nil, err
}
psub, err := newPubSub(ctx, h)
if err != nil {
h.Close()
return nil, nil, nil, err
}
return h, psub, idht, nil
}
// newHost creates a base cluster host without dht, pubsub, relay or nat etc.
// mostly used for testing.
func newHost(ctx context.Context, psk corepnet.PSK, priv crypto.PrivKey, opts ...libp2p.Option) (host.Host, error) {
finalOpts := []libp2p.Option{
libp2p.Identity(priv),
}
finalOpts = append(finalOpts, baseOpts(psk)...)
finalOpts = append(finalOpts, opts...)
h, err := libp2p.New(
finalOpts...,
)
if err != nil {
return nil, err
}
return h, nil
}
func baseOpts(psk corepnet.PSK) []libp2p.Option {
return []libp2p.Option{
libp2p.PrivateNetwork(psk),
libp2p.EnableNATService(),
libp2p.Security(noise.ID, noise.New),
libp2p.Security(libp2ptls.ID, libp2ptls.New),
// TODO: quic does not support private networks
// libp2p.DefaultTransports,
libp2p.NoTransports,
libp2p.Transport(tcp.NewTCPTransport),
libp2p.Transport(websocket.New),
}
}
func newDHT(ctx context.Context, h host.Host, store ds.Datastore, extraopts ...dual.Option) (*dual.DHT, error) {
opts := []dual.Option{
dual.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})),
dual.DHTOption(dht.NamespacedValidator("ipns", ipns.Validator{KeyBook: h.Peerstore()})),
dual.DHTOption(dht.Concurrency(10)),
}
opts = append(opts, extraopts...)
if batchingDs, ok := store.(ds.Batching); ok {
dhtDatastore := namespace.Wrap(batchingDs, ds.NewKey(dhtNamespace))
opts = append(opts, dual.DHTOption(dht.Datastore(dhtDatastore)))
logger.Debug("enabling DHT record persistence to datastore")
}
return dual.New(ctx, h, opts...)
}
func newPubSub(ctx context.Context, h host.Host) (*pubsub.PubSub, error) {
return pubsub.NewGossipSub(
ctx,
h,
pubsub.WithMessageSigning(true),
pubsub.WithStrictSignatureVerification(true),
)
}
// EncodeProtectorKey converts a byte slice to its hex string representation.
func EncodeProtectorKey(secretBytes []byte) string {
return hex.EncodeToString(secretBytes)
}

View file

@ -1,19 +0,0 @@
# go source files
SRC := $(shell find ../.. -type f -name '*.go')
GOPATH := $(shell go env GOPATH)
GOFLAGS := "-trimpath"
all: ipfs-cluster-ctl
ipfs-cluster-ctl: $(SRC)
go build $(GOFLAGS) -mod=readonly
build: ipfs-cluster-ctl
install:
go install $(GOFLAGS)
clean:
rm -f ipfs-cluster-ctl
.PHONY: clean install build

View file

@ -1,5 +0,0 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View file

@ -1,13 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,19 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,17 +0,0 @@
# `ipfs-cluster-ctl`
> IPFS cluster management tool
`ipfs-cluster-ctl` is the client application to manage the cluster nodes and perform actions. `ipfs-cluster-ctl` uses the HTTP API provided by the nodes and it is completely separate from the cluster service.
### Usage
Usage information can be obtained by running:
```
$ ipfs-cluster-ctl --help
```
You can also obtain command-specific help with `ipfs-cluster-ctl help [cmd]`. The (`--host`) can be used to talk to any remote cluster peer (`localhost` is used by default).
For more information, please check the [Documentation](https://ipfscluster.io/documentation), in particular the [`ipfs-cluster-ctl` section](https://ipfscluster.io/documentation/ipfs-cluster-ctl).

View file

@ -1,329 +0,0 @@
package main
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"strings"
"time"
"github.com/ipfs-cluster/ipfs-cluster/api"
humanize "github.com/dustin/go-humanize"
)
type addedOutputQuiet struct {
api.AddedOutput
quiet bool
}
func jsonFormatObject(resp interface{}) {
switch r := resp.(type) {
case nil:
return
case []addedOutputQuiet:
// print original objects as in JSON it makes
// no sense to have a human "quiet" output
var actual []api.AddedOutput
for _, s := range r {
actual = append(actual, s.AddedOutput)
}
jsonFormatPrint(actual)
default:
jsonFormatPrint(resp)
}
}
func jsonFormatPrint(obj interface{}) {
print := func(o interface{}) {
j, err := json.MarshalIndent(o, "", " ")
checkErr("generating json output", err)
fmt.Printf("%s\n", j)
}
switch r := obj.(type) {
case chan api.Pin:
for o := range r {
print(o)
}
case chan api.GlobalPinInfo:
for o := range r {
print(o)
}
case chan api.ID:
for o := range r {
print(o)
}
default:
print(obj)
}
}
func textFormatObject(resp interface{}) {
switch r := resp.(type) {
case nil:
return
case string:
fmt.Println(resp)
case api.ID:
textFormatPrintID(r)
case api.GlobalPinInfo:
textFormatPrintGPInfo(r)
case api.Pin:
textFormatPrintPin(r)
case api.AddedOutput:
textFormatPrintAddedOutput(r)
case addedOutputQuiet:
textFormatPrintAddedOutputQuiet(r)
case api.Version:
textFormatPrintVersion(r)
case api.Error:
textFormatPrintError(r)
case api.Metric:
textFormatPrintMetric(r)
case api.Alert:
textFormatPrintAlert(r)
case chan api.ID:
for item := range r {
textFormatObject(item)
}
case chan api.GlobalPinInfo:
for item := range r {
textFormatObject(item)
}
case chan api.Pin:
for item := range r {
textFormatObject(item)
}
case []api.AddedOutput:
for _, item := range r {
textFormatObject(item)
}
case []addedOutputQuiet:
for _, item := range r {
textFormatObject(item)
}
case []api.Metric:
for _, item := range r {
textFormatObject(item)
}
case api.GlobalRepoGC:
textFormatPrintGlobalRepoGC(r)
case []string:
for _, item := range r {
textFormatObject(item)
}
case []api.Alert:
for _, item := range r {
textFormatObject(item)
}
default:
checkErr("", errors.New("unsupported type returned"+reflect.TypeOf(r).String()))
}
}
func textFormatPrintID(obj api.ID) {
if obj.Error != "" {
fmt.Printf("%s | ERROR: %s\n", obj.ID.Pretty(), obj.Error)
return
}
fmt.Printf(
"%s | %s | Sees %d other peers\n",
obj.ID.Pretty(),
obj.Peername,
len(obj.ClusterPeers)-1,
)
addrs := make(sort.StringSlice, 0, len(obj.Addresses))
for _, a := range obj.Addresses {
addrs = append(addrs, a.String())
}
addrs.Sort()
fmt.Println(" > Addresses:")
for _, a := range addrs {
fmt.Printf(" - %s\n", a)
}
if obj.IPFS.Error != "" {
fmt.Printf(" > IPFS ERROR: %s\n", obj.IPFS.Error)
return
}
ipfsAddrs := make(sort.StringSlice, 0, len(obj.Addresses))
for _, a := range obj.IPFS.Addresses {
ipfsAddrs = append(ipfsAddrs, a.String())
}
ipfsAddrs.Sort()
fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID.Pretty())
for _, a := range ipfsAddrs {
fmt.Printf(" - %s\n", a)
}
}
func textFormatPrintGPInfo(obj api.GlobalPinInfo) {
var b strings.Builder
peers := make([]string, 0, len(obj.PeerMap))
for k := range obj.PeerMap {
peers = append(peers, k)
}
sort.Strings(peers)
fmt.Fprintf(&b, "%s", obj.Cid)
if obj.Name != "" {
fmt.Fprintf(&b, " | %s", obj.Name)
}
b.WriteString(":\n")
for _, k := range peers {
v := obj.PeerMap[k]
if len(v.PeerName) > 0 {
fmt.Fprintf(&b, " > %-20s : %s", v.PeerName, strings.ToUpper(v.Status.String()))
} else {
fmt.Fprintf(&b, " > %-20s : %s", k, strings.ToUpper(v.Status.String()))
}
if v.Error != "" {
fmt.Fprintf(&b, ": %s", v.Error)
}
txt, _ := v.TS.MarshalText()
fmt.Fprintf(&b, " | %s", txt)
fmt.Fprintf(&b, " | Attempts: %d", v.AttemptCount)
fmt.Fprintf(&b, " | Priority: %t", v.PriorityPin)
fmt.Fprintf(&b, "\n")
}
fmt.Print(b.String())
}
func textFormatPrintVersion(obj api.Version) {
fmt.Println(obj.Version)
}
func textFormatPrintPin(obj api.Pin) {
t := strings.ToUpper(obj.Type.String())
if obj.Mode == api.PinModeDirect {
t = t + "-DIRECT"
}
fmt.Printf("%s | %s | %s | ", obj.Cid, obj.Name, t)
if obj.IsPinEverywhere() {
fmt.Printf("Repl. Factor: -1 | Allocations: [everywhere]")
} else {
sortAlloc := api.PeersToStrings(obj.Allocations)
sort.Strings(sortAlloc)
fmt.Printf("Repl. Factor: %d--%d | Allocations: %s",
obj.ReplicationFactorMin, obj.ReplicationFactorMax,
sortAlloc)
}
var recStr string
switch obj.MaxDepth {
case 0:
recStr = "Direct"
case -1:
recStr = "Recursive"
default:
recStr = fmt.Sprintf("Recursive-%d", obj.MaxDepth)
}
fmt.Printf(" | %s", recStr)
fmt.Printf(" | Metadata:")
if len(obj.Metadata) == 0 {
fmt.Printf(" no")
} else {
fmt.Printf(" yes")
}
expireAt := "∞"
if !obj.ExpireAt.IsZero() {
expireAt = obj.ExpireAt.Format("2006-01-02 15:04:05")
}
fmt.Printf(" | Exp: %s", expireAt)
added := "unknown"
if !obj.Timestamp.IsZero() {
added = obj.Timestamp.Format("2006-01-02 15:04:05")
}
fmt.Printf(" | Added: %s\n", added)
}
func textFormatPrintAddedOutput(obj api.AddedOutput) {
fmt.Printf("added %s %s\n", obj.Cid, obj.Name)
}
func textFormatPrintAddedOutputQuiet(obj addedOutputQuiet) {
if obj.quiet {
fmt.Printf("%s\n", obj.AddedOutput.Cid)
} else {
textFormatPrintAddedOutput(obj.AddedOutput)
}
}
func textFormatPrintMetric(obj api.Metric) {
v := obj.Value
if obj.Name == "freespace" && obj.Weight > 0 {
v = humanize.Bytes(uint64(obj.Weight))
}
fmt.Printf("%s | %s: %s | Expires in: %s\n", obj.Peer, obj.Name, v, humanize.Time(time.Unix(0, obj.Expire)))
}
func textFormatPrintAlert(obj api.Alert) {
fmt.Printf("%s: %s. Expired at: %s. Triggered at: %s\n",
obj.Peer,
obj.Name,
humanize.Time(time.Unix(0, obj.Expire)),
humanize.Time(obj.TriggeredAt),
)
}
func textFormatPrintGlobalRepoGC(obj api.GlobalRepoGC) {
peers := make(sort.StringSlice, 0, len(obj.PeerMap))
for peer := range obj.PeerMap {
peers = append(peers, peer)
}
peers.Sort()
for _, peer := range peers {
item := obj.PeerMap[peer]
// If peer name is set, use it instead of peer ID.
if len(item.Peername) > 0 {
peer = item.Peername
}
if item.Error != "" {
fmt.Printf("%-15s | ERROR: %s\n", peer, item.Error)
} else {
fmt.Printf("%-15s\n", peer)
}
fmt.Printf(" > CIDs:\n")
for _, key := range item.Keys {
if key.Error != "" {
// key.Key will be empty
fmt.Printf(" - ERROR: %s\n", key.Error)
continue
}
fmt.Printf(" - %s\n", key.Key)
}
}
}
func textFormatPrintError(obj api.Error) {
fmt.Printf("An error occurred:\n")
fmt.Printf(" Code: %d\n", obj.Code)
fmt.Printf(" Message: %s\n", obj.Message)
}
func trackerStatusAllString() string {
var strs []string
for _, st := range api.TrackerStatusAll() {
strs = append(strs, " - "+st.String())
}
sort.Strings(strs)
return strings.Join(strs, "\n")
}

View file

@ -1,265 +0,0 @@
package main
import (
"errors"
"fmt"
"io"
"sort"
dot "github.com/kishansagathiya/go-dot"
peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/ipfs-cluster/ipfs-cluster/api"
)
/*
These functions are used to write an IPFS Cluster connectivity graph to a
graphviz-style dot file. Input an api.ConnectGraphSerial object, makeDot
does some preprocessing and then passes all 3 link maps to a
cluster-dotWriter which handles iterating over the link maps and writing
dot file node and edge statements to make a dot-file graph. Nodes are
labeled with the go-libp2p-peer shortened peer id. IPFS nodes are rendered
with turquoise boundaries, Cluster nodes with orange. Currently preprocessing
consists of moving IPFS swarm peers not connected to any cluster peer to
the IPFSLinks map in the event that the function was invoked with the
allIpfs flag. This allows all IPFS peers connected to the cluster to be
rendered as nodes in the final graph.
*/
// nodeType specifies the type of node being represented in the dot file:
// either IPFS or Cluster
type nodeType int
const (
tSelfCluster nodeType = iota // cluster self node
tCluster // cluster node
tTrustedCluster // trusted cluster node
tIPFS // IPFS node
tIPFSMissing // Missing IPFS node
)
var errUnknownNodeType = errors.New("unsupported node type. Expected cluster or ipfs")
func makeDot(cg api.ConnectGraph, w io.Writer, allIpfs bool) error {
ipfsEdges := make(map[string][]peer.ID)
for k, v := range cg.IPFSLinks {
ipfsEdges[k] = make([]peer.ID, 0)
for _, id := range v {
strPid := id.String()
if _, ok := cg.IPFSLinks[strPid]; ok || allIpfs {
ipfsEdges[k] = append(ipfsEdges[k], id)
}
if allIpfs { // include all swarm peers in the graph
if _, ok := ipfsEdges[strPid]; !ok {
// if id in IPFSLinks this will be overwritten
// if id not in IPFSLinks this will stay blank
ipfsEdges[strPid] = make([]peer.ID, 0)
}
}
}
}
dW := dotWriter{
w: w,
dotGraph: dot.NewGraph("cluster"),
self: cg.ClusterID.String(),
trustMap: cg.ClusterTrustLinks,
idToPeername: cg.IDtoPeername,
ipfsEdges: ipfsEdges,
clusterEdges: cg.ClusterLinks,
clusterIpfsEdges: cg.ClustertoIPFS,
clusterNodes: make(map[string]*dot.VertexDescription),
ipfsNodes: make(map[string]*dot.VertexDescription),
}
return dW.print()
}
type dotWriter struct {
clusterNodes map[string]*dot.VertexDescription
ipfsNodes map[string]*dot.VertexDescription
w io.Writer
dotGraph dot.Graph
self string
idToPeername map[string]string
trustMap map[string]bool
ipfsEdges map[string][]peer.ID
clusterEdges map[string][]peer.ID
clusterIpfsEdges map[string]peer.ID
}
func (dW *dotWriter) addSubGraph(sGraph dot.Graph, rank string) {
sGraph.IsSubGraph = true
sGraph.Rank = rank
dW.dotGraph.AddSubGraph(&sGraph)
}
// writes nodes to dot file output and creates and stores an ordering over nodes
func (dW *dotWriter) addNode(graph *dot.Graph, id string, nT nodeType) error {
node := dot.NewVertexDescription("")
node.Group = id
node.ColorScheme = "x11"
node.FontName = "Arial"
node.Style = "filled"
node.FontColor = "black"
switch nT {
case tSelfCluster:
node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes))
node.Shape = "box3d"
node.Label = label(dW.idToPeername[id], shorten(id))
node.Color = "orange"
node.Peripheries = 2
dW.clusterNodes[id] = &node
case tTrustedCluster:
node.ID = fmt.Sprintf("T%d", len(dW.clusterNodes))
node.Shape = "box3d"
node.Label = label(dW.idToPeername[id], shorten(id))
node.Color = "orange"
dW.clusterNodes[id] = &node
case tCluster:
node.Shape = "box3d"
node.Label = label(dW.idToPeername[id], shorten(id))
node.ID = fmt.Sprintf("C%d", len(dW.clusterNodes))
node.Color = "darkorange3"
dW.clusterNodes[id] = &node
case tIPFS:
node.ID = fmt.Sprintf("I%d", len(dW.ipfsNodes))
node.Shape = "cylinder"
node.Label = label("IPFS", shorten(id))
node.Color = "turquoise3"
dW.ipfsNodes[id] = &node
case tIPFSMissing:
node.ID = fmt.Sprintf("I%d", len(dW.ipfsNodes))
node.Shape = "cylinder"
node.Label = label("IPFS", "Errored")
node.Color = "firebrick1"
dW.ipfsNodes[id] = &node
default:
return errUnknownNodeType
}
graph.AddVertex(&node)
return nil
}
func shorten(id string) string {
return id[:2] + "*" + id[len(id)-6:]
}
func label(peername, id string) string {
return fmt.Sprintf("< <B> %s </B> <BR/> <B> %s </B> >", peername, id)
}
func (dW *dotWriter) print() error {
dW.dotGraph.AddComment("The nodes of the connectivity graph")
dW.dotGraph.AddComment("The cluster-service peers")
// Write cluster nodes, use sorted order for consistent labels
sGraphCluster := dot.NewGraph("")
sGraphCluster.IsSubGraph = true
sortedClusterEdges := sortedKeys(dW.clusterEdges)
for _, k := range sortedClusterEdges {
var err error
if k == dW.self {
err = dW.addNode(&sGraphCluster, k, tSelfCluster)
} else if dW.trustMap[k] {
err = dW.addNode(&sGraphCluster, k, tTrustedCluster)
} else {
err = dW.addNode(&sGraphCluster, k, tCluster)
}
if err != nil {
return err
}
}
dW.addSubGraph(sGraphCluster, "min")
dW.dotGraph.AddNewLine()
dW.dotGraph.AddComment("The ipfs peers")
sGraphIPFS := dot.NewGraph("")
sGraphIPFS.IsSubGraph = true
// Write ipfs nodes, use sorted order for consistent labels
for _, k := range sortedKeys(dW.ipfsEdges) {
err := dW.addNode(&sGraphIPFS, k, tIPFS)
if err != nil {
return err
}
}
for _, k := range sortedClusterEdges {
if _, ok := dW.clusterIpfsEdges[k]; !ok {
err := dW.addNode(&sGraphIPFS, k, tIPFSMissing)
if err != nil {
return err
}
}
}
dW.addSubGraph(sGraphIPFS, "max")
dW.dotGraph.AddNewLine()
dW.dotGraph.AddComment("Edges representing active connections in the cluster")
dW.dotGraph.AddComment("The connections among cluster-service peers")
// Write cluster edges
for _, k := range sortedClusterEdges {
v := dW.clusterEdges[k]
for _, id := range v {
toNode := dW.clusterNodes[k]
fromNode := dW.clusterNodes[id.String()]
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
}
}
dW.dotGraph.AddNewLine()
dW.dotGraph.AddComment("The connections between cluster peers and their ipfs daemons")
// Write cluster to ipfs edges
for _, k := range sortedClusterEdges {
var fromNode *dot.VertexDescription
toNode := dW.clusterNodes[k]
ipfsID, ok := dW.clusterIpfsEdges[k]
if !ok {
fromNode, ok2 := dW.ipfsNodes[k]
if !ok2 {
logger.Error("expected a node at this id")
continue
}
dW.dotGraph.AddEdge(toNode, fromNode, true, "dotted")
continue
}
fromNode, ok = dW.ipfsNodes[ipfsID.String()]
if !ok {
logger.Error("expected a node at this id")
continue
}
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
}
dW.dotGraph.AddNewLine()
dW.dotGraph.AddComment("The swarm peer connections among ipfs daemons in the cluster")
// Write ipfs edges
for _, k := range sortedKeys(dW.ipfsEdges) {
v := dW.ipfsEdges[k]
toNode := dW.ipfsNodes[k]
for _, id := range v {
idStr := id.String()
fromNode, ok := dW.ipfsNodes[idStr]
if !ok {
logger.Error("expected a node here")
continue
}
dW.dotGraph.AddEdge(toNode, fromNode, true, "")
}
}
return dW.dotGraph.Write(dW.w)
}
func sortedKeys(dict map[string][]peer.ID) []string {
keys := make([]string, len(dict))
i := 0
for k := range dict {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}

View file

@ -1,233 +0,0 @@
package main
import (
"bytes"
"fmt"
"strings"
"testing"
"github.com/ipfs-cluster/ipfs-cluster/api"
peer "github.com/libp2p/go-libp2p/core/peer"
)
func verifyOutput(t *testing.T, outStr string, trueStr string) {
outLines := strings.Split(outStr, "\n")
trueLines := strings.Split(trueStr, "\n")
if len(outLines) != len(trueLines) {
fmt.Printf("expected:\n-%s-\n\n\nactual:\n-%s-", trueStr, outStr)
t.Fatal("Number of output lines does not match expectation")
}
for i := range outLines {
if outLines[i] != trueLines[i] {
t.Errorf("Difference in sorted outputs (%d): %s vs %s", i, outLines[i], trueLines[i])
}
}
}
var simpleIpfs = `digraph cluster {
/* The nodes of the connectivity graph */
/* The cluster-service peers */
subgraph {
rank="min"
C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
}
/* The ipfs peers */
subgraph {
rank="max"
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
}
/* Edges representing active connections in the cluster */
/* The connections among cluster-service peers */
C0 -> C1
C0 -> C2
C1 -> C0
C1 -> C2
C2 -> C0
C2 -> C1
/* The connections between cluster peers and their ipfs daemons */
C0 -> I1
C1 -> I0
C2 -> I2
/* The swarm peer connections among ipfs daemons in the cluster */
I0 -> I1
I0 -> I2
I1 -> I0
I1 -> I2
I2 -> I1
I2 -> I0
}`
var (
pid1, _ = peer.Decode("QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD")
pid2, _ = peer.Decode("QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ")
pid3, _ = peer.Decode("QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu")
pid4, _ = peer.Decode("QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV")
pid5, _ = peer.Decode("QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq")
pid6, _ = peer.Decode("QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL")
pid7, _ = peer.Decode("QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb")
pid8, _ = peer.Decode("QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8")
pid9, _ = peer.Decode("QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD")
)
func TestSimpleIpfsGraphs(t *testing.T) {
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
pid1.String(): {
pid2,
pid3,
},
pid2.String(): {
pid1,
pid3,
},
pid3.String(): {
pid1,
pid2,
},
},
IPFSLinks: map[string][]peer.ID{
pid4.String(): {
pid5,
pid6,
},
pid5.String(): {
pid4,
pid6,
},
pid6.String(): {
pid4,
pid5,
},
},
ClustertoIPFS: map[string]peer.ID{
pid1.String(): pid4,
pid2.String(): pid5,
pid3.String(): pid6,
},
}
buf := new(bytes.Buffer)
err := makeDot(cg, buf, false)
if err != nil {
t.Fatal(err)
}
verifyOutput(t, buf.String(), simpleIpfs)
}
var allIpfs = `digraph cluster {
/* The nodes of the connectivity graph */
/* The cluster-service peers */
subgraph {
rank="min"
C0 [label=< <B> </B> <BR/> <B> Qm*eqhEhD </B> > group="QmUBuxVHoNNjfmNpTad36UeaFQv3gXAtCv9r6KhmeqhEhD" color="orange" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" peripheries="2" ]
C1 [label=< <B> </B> <BR/> <B> Qm*cgHDQJ </B> > group="QmV35LjbEGPfN7KfMAJp43VV2enwXqqQf5esx4vUcgHDQJ" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
C2 [label=< <B> </B> <BR/> <B> Qm*6MQmJu </B> > group="QmZ2ckU7G35MYyJgMTwMUnicsGqSy3YUxGBX7qny6MQmJu" color="darkorange3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="box3d" ]
}
/* The ipfs peers */
subgraph {
rank="max"
I0 [label=< <B> IPFS </B> <BR/> <B> Qm*N5LSsq </B> > group="QmPFKAGZbUjdzt8BBx8VTWCe9UeUQVcoqHFehSPzN5LSsq" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I1 [label=< <B> IPFS </B> <BR/> <B> Qm*S8xccb </B> > group="QmQsdAdCHs4PRLi5tcoLfasYppryqQENxgAy4b2aS8xccb" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I2 [label=< <B> IPFS </B> <BR/> <B> Qm*aaanM8 </B> > group="QmVV2enwXqqQf5esx4v36UeaFQvFehSPzNfi8aaaaaanM8" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I3 [label=< <B> IPFS </B> <BR/> <B> Qm*R3DZDV </B> > group="QmXbiVZd93SLiu9TAm21F2y9JwGiFLydbEVkPBaMR3DZDV" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I4 [label=< <B> IPFS </B> <BR/> <B> Qm*wbBsuL </B> > group="QmbU7273zH6jxwDe2nqRmEm2rp5PpqP2xeQr2xCmwbBsuL" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
I5 [label=< <B> IPFS </B> <BR/> <B> Qm*tWZdeD </B> > group="QmfCHNQ2vbUmAuJZhE2hEpgiJq4sL1XScWEKnUrVtWZdeD" color="turquoise3" style="filled" colorscheme="x11" fontcolor="black" fontname="Arial" shape="cylinder" ]
}
/* Edges representing active connections in the cluster */
/* The connections among cluster-service peers */
C0 -> C1
C0 -> C2
C1 -> C0
C1 -> C2
C2 -> C0
C2 -> C1
/* The connections between cluster peers and their ipfs daemons */
C0 -> I3
C1 -> I0
C2 -> I4
/* The swarm peer connections among ipfs daemons in the cluster */
I0 -> I3
I0 -> I4
I0 -> I1
I0 -> I2
I0 -> I5
I3 -> I0
I3 -> I4
I3 -> I1
I3 -> I2
I3 -> I5
I4 -> I3
I4 -> I0
I4 -> I1
I4 -> I2
I4 -> I5
}`
func TestIpfsAllGraphs(t *testing.T) {
cg := api.ConnectGraph{
ClusterID: pid1,
ClusterLinks: map[string][]peer.ID{
pid1.String(): {
pid2,
pid3,
},
pid2.String(): {
pid1,
pid3,
},
pid3.String(): {
pid1,
pid2,
},
},
IPFSLinks: map[string][]peer.ID{
pid4.String(): {
pid5,
pid6,
pid7,
pid8,
pid9,
},
pid5.String(): {
pid4,
pid6,
pid7,
pid8,
pid9,
},
pid6.String(): {
pid4,
pid5,
pid7,
pid8,
pid9,
},
},
ClustertoIPFS: map[string]peer.ID{
pid1.String(): pid4,
pid2.String(): pid5,
pid3.String(): pid6,
},
}
buf := new(bytes.Buffer)
err := makeDot(cg, buf, true)
if err != nil {
t.Fatal(err)
}
verifyOutput(t, buf.String(), allIpfs)
}

File diff suppressed because it is too large Load diff

View file

@ -1,19 +0,0 @@
# go source files
SRC := $(shell find ../.. -type f -name '*.go')
GOPATH := $(shell go env GOPATH)
GOFLAGS := "-trimpath"
all: ipfs-cluster-follow
ipfs-cluster-follow: $(SRC)
go build $(GOFLAGS) -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
build: ipfs-cluster-follow
install:
go install $(GOFLAGS) -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
clean:
rm -f ipfs-cluster-follow
.PHONY: clean install build

View file

@ -1,555 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"strings"
"time"
ipfscluster "github.com/ipfs-cluster/ipfs-cluster"
"github.com/ipfs-cluster/ipfs-cluster/allocator/balanced"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/api/rest"
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
"github.com/ipfs-cluster/ipfs-cluster/config"
"github.com/ipfs-cluster/ipfs-cluster/consensus/crdt"
"github.com/ipfs-cluster/ipfs-cluster/datastore/badger"
"github.com/ipfs-cluster/ipfs-cluster/datastore/leveldb"
"github.com/ipfs-cluster/ipfs-cluster/informer/disk"
"github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs-cluster/ipfs-cluster/observations"
"github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
cli "github.com/urfave/cli/v2"
)
func printFirstStart() {
fmt.Printf(`
No clusters configured yet!
If this is the first time you are running %s,
be sure to check out the usage documentation. Here are some
examples to get you going:
$ %s --help - general description and usage help
$ %s <clusterName> --help - Help and subcommands for the <clusterName>'s follower peer
$ %s <clusterName> info --help - Help for the "info" subcommand (same for others).
`, programName, programName, programName, programName)
}
func printNotInitialized(clusterName string) {
fmt.Printf(`
This cluster peer has not been initialized.
Try running "%s %s init <config-url>" first.
`, programName, clusterName)
}
func setLogLevels(lvl string) {
for f := range ipfscluster.LoggingFacilities {
ipfscluster.SetFacilityLogLevel(f, lvl)
}
for f := range ipfscluster.LoggingFacilitiesExtra {
ipfscluster.SetFacilityLogLevel(f, lvl)
}
}
// returns whether the config folder exists
func isInitialized(absPath string) bool {
_, err := os.Stat(absPath)
return err == nil
}
func listClustersCmd(c *cli.Context) error {
absPath, _, _ := buildPaths(c, "")
f, err := os.Open(absPath)
if os.IsNotExist(err) {
printFirstStart()
return nil
}
if err != nil {
return cli.Exit(err, 1)
}
dirs, err := f.Readdir(-1)
if err != nil {
return cli.Exit(errors.Wrapf(err, "reading %s", absPath), 1)
}
var filteredDirs []string
for _, d := range dirs {
if d.IsDir() {
configPath := filepath.Join(absPath, d.Name(), DefaultConfigFile)
if _, err := os.Stat(configPath); err == nil {
filteredDirs = append(filteredDirs, d.Name())
}
}
}
if len(filteredDirs) == 0 {
printFirstStart()
return nil
}
fmt.Printf("Configurations found for %d follower peers. For info and help, try running:\n\n", len(filteredDirs))
for _, d := range filteredDirs {
fmt.Printf("%s \"%s\"\n", programName, d)
}
fmt.Printf("\nTip: \"%s --help\" for help and examples.\n", programName)
return nil
}
func infoCmd(c *cli.Context) error {
clusterName := c.String("clusterName")
// Avoid pollution of the screen
setLogLevels("critical")
absPath, configPath, identityPath := buildPaths(c, clusterName)
if !isInitialized(absPath) {
printNotInitialized(clusterName)
return cli.Exit("", 1)
}
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
var url string
if err != nil {
if config.IsErrFetchingSource(err) {
url = fmt.Sprintf(
"failed retrieving configuration source (%s)",
cfgHelper.Manager().Source,
)
ipfsCfg := ipfshttp.Config{}
ipfsCfg.Default()
cfgHelper.Configs().Ipfshttp = &ipfsCfg
} else {
return cli.Exit(errors.Wrapf(err, "reading the configurations in %s", absPath), 1)
}
} else {
url = fmt.Sprintf("Available (%s)", cfgHelper.Manager().Source)
}
cfgHelper.Manager().Shutdown()
fmt.Printf("Information about follower peer for Cluster \"%s\":\n\n", clusterName)
fmt.Printf("Config folder: %s\n", absPath)
fmt.Printf("Config source URL: %s\n", url)
ctx := context.Background()
client, err := getClient(absPath, clusterName)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating client"), 1)
}
_, err = client.Version(ctx)
fmt.Printf("Cluster Peer online: %t\n", err == nil)
// Either we loaded a valid config, or we are using a default. Worth
// applying env vars in the second case.
if err := cfgHelper.Configs().Ipfshttp.ApplyEnvVars(); err != nil {
return cli.Exit(errors.Wrap(err, "applying environment variables to ipfshttp config"), 1)
}
cfgHelper.Configs().Ipfshttp.ConnectSwarmsDelay = 0
connector, err := ipfshttp.NewConnector(cfgHelper.Configs().Ipfshttp)
if err == nil {
_, err = connector.ID(ctx)
}
fmt.Printf("IPFS peer online: %t\n", err == nil)
if c.Command.Name == "" {
fmt.Printf("Additional help:\n\n")
fmt.Printf("-------------------------------------------------\n\n")
return cli.ShowAppHelp(c)
}
return nil
}
func initCmd(c *cli.Context) error {
if !c.Args().Present() {
return cli.Exit("configuration URL not provided", 1)
}
cfgURL := c.Args().First()
return initCluster(c, false, cfgURL)
}
func initCluster(c *cli.Context, ignoreReinit bool, cfgURL string) error {
clusterName := c.String(clusterNameFlag)
absPath, configPath, identityPath := buildPaths(c, clusterName)
if isInitialized(absPath) {
if ignoreReinit {
fmt.Println("Configuration for this cluster already exists. Skipping initialization.")
fmt.Printf("If you wish to re-initialize, simply delete %s\n\n", absPath)
return nil
}
cmdutils.ErrorOut("Configuration for this cluster already exists.\n")
cmdutils.ErrorOut("Please delete %s if you wish to re-initialize.", absPath)
return cli.Exit("", 1)
}
gw := c.String("gateway")
if !strings.HasPrefix(cfgURL, "http://") && !strings.HasPrefix(cfgURL, "https://") {
fmt.Printf("%s will be assumed to be an DNSLink-powered address: /ipns/%s.\n", cfgURL, cfgURL)
fmt.Printf("It will be resolved using the local IPFS daemon's gateway (%s).\n", gw)
fmt.Println("If this is not the case, specify the full url starting with http:// or https://.")
fmt.Println("(You can override the gateway URL by setting IPFS_GATEWAY)")
fmt.Println()
cfgURL = fmt.Sprintf("http://%s/ipns/%s", gw, cfgURL)
}
// Setting the datastore here is useless, as we initialize with remote
// config and we will have an empty service.json with the source only.
// That source will decide which datastore is actually used.
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt", "")
cfgHelper.Manager().Shutdown()
cfgHelper.Manager().Source = cfgURL
err := cfgHelper.Manager().Default()
if err != nil {
return cli.Exit(errors.Wrap(err, "error generating default config"), 1)
}
ident := cfgHelper.Identity()
err = ident.Default()
if err != nil {
return cli.Exit(errors.Wrap(err, "error generating identity"), 1)
}
err = ident.ApplyEnvVars()
if err != nil {
return cli.Exit(errors.Wrap(err, "error applying environment variables to the identity"), 1)
}
err = cfgHelper.SaveIdentityToDisk()
if err != nil {
return cli.Exit(errors.Wrapf(err, "error saving %s", identityPath), 1)
}
fmt.Printf("Identity written to %s.\n", identityPath)
err = cfgHelper.SaveConfigToDisk()
if err != nil {
return cli.Exit(errors.Wrapf(err, "saving %s", configPath), 1)
}
fmt.Printf("Configuration written to %s.\n", configPath)
fmt.Printf("Cluster \"%s\" follower peer initialized.\n\n", clusterName)
fmt.Printf(
"You can now use \"%s %s run\" to start a follower peer for this cluster.\n",
programName,
clusterName,
)
fmt.Println("(Remember to start your IPFS daemon before)")
return nil
}
func runCmd(c *cli.Context) error {
clusterName := c.String(clusterNameFlag)
if cfgURL := c.String("init"); cfgURL != "" {
err := initCluster(c, true, cfgURL)
if err != nil {
return err
}
}
absPath, configPath, identityPath := buildPaths(c, clusterName)
if !isInitialized(absPath) {
printNotInitialized(clusterName)
return cli.Exit("", 1)
}
fmt.Printf("Starting the IPFS Cluster follower peer for \"%s\".\nCTRL-C to stop it.\n", clusterName)
fmt.Println("Checking if IPFS is online (will wait for 2 minutes)...")
ctxIpfs, cancelIpfs := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancelIpfs()
err := cmdutils.WaitForIPFS(ctxIpfs)
if err != nil {
return cli.Exit("timed out waiting for IPFS to be available", 1)
}
setLogLevels(logLevel) // set to "info" by default.
// Avoid API logs polluting the screen everytime we
// run some "list" command.
ipfscluster.SetFacilityLogLevel("restapilog", "error")
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
if err != nil {
return cli.Exit(errors.Wrapf(err, "reading the configurations in %s", absPath), 1)
}
cfgHelper.Manager().Shutdown()
cfgs := cfgHelper.Configs()
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), cfgHelper.GetDatastore(), cfgHelper.Identity(), cfgs)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating state manager"), 1)
}
store, err := stmgr.GetStore()
if err != nil {
return cli.Exit(errors.Wrap(err, "creating datastore"), 1)
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster, store)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating libp2p components"), 1)
}
// Always run followers in follower mode.
cfgs.Cluster.FollowerMode = true
// Do not let trusted peers GC this peer
// Defaults to Trusted otherwise.
cfgs.Cluster.RPCPolicy["Cluster.RepoGCLocal"] = ipfscluster.RPCClosed
// Discard API configurations and create our own
apiCfg := rest.NewConfig()
cfgs.Restapi = apiCfg
_ = apiCfg.Default()
listenSocket, err := socketAddress(absPath, clusterName)
if err != nil {
return cli.Exit(err, 1)
}
apiCfg.HTTPListenAddr = []multiaddr.Multiaddr{listenSocket}
// Allow customization via env vars
err = apiCfg.ApplyEnvVars()
if err != nil {
return cli.Exit(errors.Wrap(err, "error applying environmental variables to restapi configuration"), 1)
}
rest, err := rest.NewAPI(ctx, apiCfg)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating REST API component"), 1)
}
connector, err := ipfshttp.NewConnector(cfgs.Ipfshttp)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating IPFS Connector component"), 1)
}
informer, err := disk.NewInformer(cfgs.DiskInf)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating disk informer"), 1)
}
alloc, err := balanced.New(cfgs.BalancedAlloc)
if err != nil {
return cli.Exit(errors.Wrap(err, "creating metrics allocator"), 1)
}
crdtcons, err := crdt.New(
host,
dht,
pubsub,
cfgs.Crdt,
store,
)
if err != nil {
store.Close()
return cli.Exit(errors.Wrap(err, "creating CRDT component"), 1)
}
tracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername, crdtcons.State)
mon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, nil)
if err != nil {
store.Close()
return cli.Exit(errors.Wrap(err, "setting up PeerMonitor"), 1)
}
// Hardcode disabled tracing and metrics to avoid mistakenly
// exposing any user data.
tracerCfg := observations.TracingConfig{}
_ = tracerCfg.Default()
tracerCfg.EnableTracing = false
cfgs.Tracing = &tracerCfg
tracer, err := observations.SetupTracing(&tracerCfg)
if err != nil {
return cli.Exit(errors.Wrap(err, "error setting up tracer"), 1)
}
// This does nothing since we are not calling SetupMetrics anyways
// But stays just to be explicit.
metricsCfg := observations.MetricsConfig{}
_ = metricsCfg.Default()
metricsCfg.EnableStats = false
cfgs.Metrics = &metricsCfg
// We are going to run a cluster peer and should do an
// oderly shutdown if we are interrupted: cancel default
// signal handling and leave things to HandleSignals.
signal.Stop(signalChan)
close(signalChan)
cluster, err := ipfscluster.NewCluster(
ctx,
host,
dht,
cfgs.Cluster,
store,
crdtcons,
[]ipfscluster.API{rest},
connector,
tracker,
mon,
alloc,
[]ipfscluster.Informer{informer},
tracer,
)
if err != nil {
store.Close()
return cli.Exit(errors.Wrap(err, "error creating cluster peer"), 1)
}
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht, store)
}
// List
func listCmd(c *cli.Context) error {
clusterName := c.String("clusterName")
absPath, configPath, identityPath := buildPaths(c, clusterName)
if !isInitialized(absPath) {
printNotInitialized(clusterName)
return cli.Exit("", 1)
}
err := printStatusOnline(absPath, clusterName)
if err == nil {
return nil
}
// There was an error. Try offline status
apiErr, ok := err.(*api.Error)
if ok && apiErr.Code != 0 {
return cli.Exit(
errors.Wrapf(
err,
"The Peer API seems to be running but returned with code %d",
apiErr.Code,
), 1)
}
// We are on offline mode so we cannot rely on IPFS being
// running and most probably our configuration is remote and
// to be loaded from IPFS. Thus we need to find a different
// way to decide whether to load badger/leveldb, and once we
// know, do it with the default settings.
hasLevelDB := false
lDBCfg := &leveldb.Config{}
lDBCfg.SetBaseDir(absPath)
lDBCfg.Default()
levelDBInfo, err := os.Stat(lDBCfg.GetFolder())
if err == nil && levelDBInfo.IsDir() {
hasLevelDB = true
}
hasBadger := false
badgerCfg := &badger.Config{}
badgerCfg.SetBaseDir(absPath)
badgerCfg.Default()
badgerInfo, err := os.Stat(badgerCfg.GetFolder())
if err == nil && badgerInfo.IsDir() {
hasBadger = true
}
if hasLevelDB && hasBadger {
return cli.Exit(errors.Wrapf(err, "found both leveldb (%s) and badger (%s) folders: cannot determine which to use in offline mode", lDBCfg.GetFolder(), badgerCfg.GetFolder()), 1)
}
// Since things were initialized, assume there is one at least.
dstoreType := "leveldb"
if hasBadger {
dstoreType = "badger"
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "crdt", dstoreType)
cfgHelper.Manager().Shutdown() // not needed
cfgHelper.Configs().Badger.SetBaseDir(absPath)
cfgHelper.Configs().LevelDB.SetBaseDir(absPath)
cfgHelper.Manager().Default() // we have a default crdt config with either leveldb or badger registered.
cfgHelper.Manager().ApplyEnvVars()
err = printStatusOffline(cfgHelper)
if err != nil {
return cli.Exit(errors.Wrap(err, "error obtaining the pinset"), 1)
}
return nil
}
func printStatusOnline(absPath, clusterName string) error {
ctx := context.Background()
client, err := getClient(absPath, clusterName)
if err != nil {
return cli.Exit(errors.Wrap(err, "error creating client"), 1)
}
out := make(chan api.GlobalPinInfo, 1024)
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- client.StatusAll(ctx, 0, true, out)
}()
var pid string
for gpi := range out {
if pid == "" { // do this once
// PeerMap will only have one key
for k := range gpi.PeerMap {
pid = k
break
}
}
pinInfo := gpi.PeerMap[pid]
printPin(gpi.Cid, pinInfo.Status.String(), gpi.Name, pinInfo.Error)
}
err = <-errCh
return err
}
func printStatusOffline(cfgHelper *cmdutils.ConfigHelper) error {
mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper)
if err != nil {
return err
}
store, err := mgr.GetStore()
if err != nil {
return err
}
defer store.Close()
st, err := mgr.GetOfflineState(store)
if err != nil {
return err
}
out := make(chan api.Pin, 1024)
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- st.List(context.Background(), out)
}()
for pin := range out {
printPin(pin.Cid, "offline", pin.Name, "")
}
err = <-errCh
return err
}
func printPin(c api.Cid, status, name, err string) {
if err != "" {
name = name + " (" + err + ")"
}
fmt.Printf("%-20s %s %s\n", status, c, name)
}

View file

@ -1,5 +0,0 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View file

@ -1,13 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,19 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,33 +0,0 @@
# `ipfs-cluster-follow`
> A tool to run IPFS Cluster follower peers
`ipfs-cluster-follow` allows to setup and run IPFS Cluster follower peers.
Follower peers can join collaborative clusters to track content in the
cluster. Follower peers do not have permissions to modify the cluster pinset
or access endpoints from other follower peers.
`ipfs-cluster-follow` allows to run several peers at the same time (each
joining a different cluster) and it is intended to be a very easy to use
application with a minimal feature set. In order to run a fully-featured peer
(follower or not), use `ipfs-cluster-service`.
### Usage
The `ipfs-cluster-follow` command is always followed by the cluster name
that we wish to work with. Full usage information can be obtained by running:
```
$ ipfs-cluster-follow --help
$ ipfs-cluster-follow --help
$ ipfs-cluster-follow <clusterName> --help
$ ipfs-cluster-follow <clusterName> info --help
$ ipfs-cluster-follow <clusterName> init --help
$ ipfs-cluster-follow <clusterName> run --help
$ ipfs-cluster-follow <clusterName> list --help
```
For more information, please check the [Documentation](https://ipfscluster.io/documentation), in particular the [`ipfs-cluster-follow` section](https://ipfscluster.io/documentation/ipfs-cluster-follow).

View file

@ -1,331 +0,0 @@
// The ipfs-cluster-follow application.
package main
import (
"fmt"
"os"
"os/signal"
"os/user"
"path/filepath"
"syscall"
"github.com/ipfs-cluster/ipfs-cluster/api/rest/client"
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
"github.com/ipfs-cluster/ipfs-cluster/version"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
semver "github.com/blang/semver"
cli "github.com/urfave/cli/v2"
)
const (
// ProgramName of this application
programName = "ipfs-cluster-follow"
clusterNameFlag = "clusterName"
logLevel = "info"
)
// Default location for the configurations and data
var (
// DefaultFolder is the name of the cluster folder
DefaultFolder = ".ipfs-cluster-follow"
// DefaultPath is set on init() to $HOME/DefaultFolder
// and holds all the ipfs-cluster data
DefaultPath string
// The name of the configuration file inside DefaultPath
DefaultConfigFile = "service.json"
// The name of the identity file inside DefaultPath
DefaultIdentityFile = "identity.json"
DefaultGateway = "127.0.0.1:8080"
)
var (
commit string
configPath string
identityPath string
signalChan = make(chan os.Signal, 20)
)
// Description provides a short summary of the functionality of this tool
var Description = fmt.Sprintf(`
%s helps running IPFS Cluster follower peers.
Follower peers subscribe to a Cluster controlled by a set of "trusted
peers". They collaborate in pinning items as dictated by the trusted peers and
do not have the power to make Cluster-wide modifications to the pinset.
Follower peers cannot access information nor trigger actions in other peers.
%s can be used to follow different clusters by launching it
with different options. Each Cluster has an identity, a configuration
and a datastore associated to it, which are kept under
"~/%s/<cluster_name>".
For feedback, bug reports or any additional information, visit
https://github.com/ipfs-cluster/ipfs-cluster.
EXAMPLES:
List configured follower peers:
$ %s
Display information for a follower peer:
$ %s <clusterName> info
Initialize a follower peer:
$ %s <clusterName> init <example.url>
Launch a follower peer (will stay running):
$ %s <clusterName> run
List items in the pinset for a given cluster:
$ %s <clusterName> list
Getting help and usage info:
$ %s --help
$ %s <clusterName> --help
$ %s <clusterName> info --help
$ %s <clusterName> init --help
$ %s <clusterName> run --help
$ %s <clusterName> list --help
`,
programName,
programName,
DefaultFolder,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
programName,
)
func init() {
// Set build information.
if build, err := semver.NewBuildVersion(commit); err == nil {
version.Version.Build = []string{"git" + build}
}
// We try guessing user's home from the HOME variable. This
// allows HOME hacks for things like Snapcraft builds. HOME
// should be set in all UNIX by the OS. Alternatively, we fall back to
// usr.HomeDir (which should work on Windows etc.).
home := os.Getenv("HOME")
if home == "" {
usr, err := user.Current()
if err != nil {
panic(fmt.Sprintf("cannot get current user: %s", err))
}
home = usr.HomeDir
}
DefaultPath = filepath.Join(home, DefaultFolder)
// This will abort the program on signal. We close the signal channel
// when launching the peer so that we can do an orderly shutdown in
// that case though.
go func() {
signal.Notify(
signalChan,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGHUP,
)
_, ok := <-signalChan // channel closed.
if !ok {
return
}
os.Exit(1)
}()
}
func main() {
app := cli.NewApp()
app.Name = programName
app.Usage = "IPFS Cluster Follower"
app.UsageText = fmt.Sprintf("%s [global options] <clusterName> [subcommand]...", programName)
app.Description = Description
//app.Copyright = "© Protocol Labs, Inc."
app.Version = version.Version.String()
app.Flags = []cli.Flag{
&cli.StringFlag{
Name: "config, c",
Value: DefaultPath,
Usage: "path to the followers configuration and data `FOLDER`",
EnvVars: []string{"IPFS_CLUSTER_PATH"},
},
}
app.Action = func(c *cli.Context) error {
if !c.Args().Present() {
return listClustersCmd(c)
}
clusterName := c.Args().Get(0)
clusterApp := cli.NewApp()
clusterApp.Name = fmt.Sprintf("%s %s", programName, clusterName)
clusterApp.HelpName = clusterApp.Name
clusterApp.Usage = fmt.Sprintf("Follower peer management for \"%s\"", clusterName)
clusterApp.UsageText = fmt.Sprintf("%s %s [subcommand]", programName, clusterName)
clusterApp.Action = infoCmd
clusterApp.HideVersion = true
clusterApp.Flags = []cli.Flag{
&cli.StringFlag{ // pass clusterName to subcommands
Name: clusterNameFlag,
Value: clusterName,
Hidden: true,
},
}
clusterApp.Commands = []*cli.Command{
{
Name: "info",
Usage: "displays information for this peer",
ArgsUsage: "",
Description: fmt.Sprintf(`
This command display useful information for "%s"'s follower peer.
`, clusterName),
Action: infoCmd,
},
{
Name: "init",
Usage: "initializes the follower peer",
ArgsUsage: "<template_URL>",
Description: fmt.Sprintf(`
This command initializes a follower peer for the cluster named "%s". You
will need to pass the peer configuration URL. The command will generate a new
peer identity and leave things ready to run "%s %s run".
An error will be returned if a configuration folder for a cluster peer with
this name already exists. If you wish to re-initialize from scratch, delete
this folder first.
`, clusterName, programName, clusterName),
Action: initCmd,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "gateway",
Value: DefaultGateway,
Usage: "gateway URL",
EnvVars: []string{"IPFS_GATEWAY"},
Hidden: true,
},
},
},
{
Name: "run",
Usage: "runs the follower peer",
ArgsUsage: "",
Description: fmt.Sprintf(`
This commands runs a "%s" cluster follower peer. The peer should have already
been initialized with "init" alternatively the --init flag needs to be
passed.
Before running, ensure that you have connectivity and that the IPFS daemon is
running.
You can obtain more information about this follower peer by running
"%s %s" (without any arguments).
The peer will stay running in the foreground until manually stopped.
`, clusterName, programName, clusterName),
Action: runCmd,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "init",
Usage: "initialize cluster peer with the given URL before running",
},
&cli.StringFlag{
Name: "gateway",
Value: DefaultGateway,
Usage: "gateway URL",
EnvVars: []string{"IPFS_GATEWAY"},
Hidden: true,
},
},
},
{
Name: "list",
Usage: "list items in the peers' pinset",
ArgsUsage: "",
Description: `
This commands lists all the items pinned by this follower cluster peer on IPFS.
If the peer is currently running, it will display status information for each
pin (such as PINNING). If not, it will just display the current list of pins
as obtained from the internal state on disk.
`,
Action: listCmd,
},
}
return clusterApp.RunAsSubcommand(c)
}
app.Run(os.Args)
}
// build paths returns the path to the configuration folder,
// the identity.json and the service.json files.
func buildPaths(c *cli.Context, clusterName string) (string, string, string) {
absPath, err := filepath.Abs(c.String("config"))
if err != nil {
cmdutils.ErrorOut("error getting absolute path for %s: %s", err, clusterName)
os.Exit(1)
}
// ~/.ipfs-cluster-follow/clusterName
absPath = filepath.Join(absPath, clusterName)
// ~/.ipfs-cluster-follow/clusterName/service.json
configPath = filepath.Join(absPath, DefaultConfigFile)
// ~/.ipfs-cluster-follow/clusterName/indentity.json
identityPath = filepath.Join(absPath, DefaultIdentityFile)
return absPath, configPath, identityPath
}
func socketAddress(absPath, clusterName string) (multiaddr.Multiaddr, error) {
socket := fmt.Sprintf("/unix/%s", filepath.Join(absPath, "api-socket"))
ma, err := multiaddr.NewMultiaddr(socket)
if err != nil {
return nil, errors.Wrapf(err, "error parsing socket: %s", socket)
}
return ma, nil
}
// returns an REST API client. Points to the socket address unless
// CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS is set, in which case it uses it.
func getClient(absPath, clusterName string) (client.Client, error) {
var endp multiaddr.Multiaddr
var err error
if endpStr := os.Getenv("CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS"); endpStr != "" {
endp, err = multiaddr.NewMultiaddr(endpStr)
if err != nil {
return nil, errors.Wrapf(err, "error parsing the value of CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS: %s", endpStr)
}
} else {
endp, err = socketAddress(absPath, clusterName)
}
if err != nil {
return nil, err
}
cfg := client.Config{
APIAddr: endp,
}
return client.NewDefaultClient(&cfg)
}

View file

@ -1,19 +0,0 @@
# go source files
SRC := $(shell find ../.. -type f -name '*.go')
GOPATH := $(shell go env GOPATH)
GOFLAGS := "-trimpath"
all: ipfs-cluster-service
ipfs-cluster-service: $(SRC)
go build $(GOFLAGS) -mod=readonly -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
build: ipfs-cluster-service
install:
go install $(GOFLAGS) -ldflags "-X main.commit=$(shell git rev-parse HEAD)"
clean:
rm -f ipfs-cluster-service
.PHONY: clean install build

View file

@ -1,309 +0,0 @@
package main
import (
"context"
"strings"
"time"
ipfscluster "github.com/ipfs-cluster/ipfs-cluster"
"github.com/ipfs-cluster/ipfs-cluster/allocator/balanced"
"github.com/ipfs-cluster/ipfs-cluster/api/ipfsproxy"
"github.com/ipfs-cluster/ipfs-cluster/api/pinsvcapi"
"github.com/ipfs-cluster/ipfs-cluster/api/rest"
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
"github.com/ipfs-cluster/ipfs-cluster/config"
"github.com/ipfs-cluster/ipfs-cluster/consensus/crdt"
"github.com/ipfs-cluster/ipfs-cluster/consensus/raft"
"github.com/ipfs-cluster/ipfs-cluster/informer/disk"
"github.com/ipfs-cluster/ipfs-cluster/informer/pinqueue"
"github.com/ipfs-cluster/ipfs-cluster/informer/tags"
"github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs-cluster/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs-cluster/ipfs-cluster/observations"
"github.com/ipfs-cluster/ipfs-cluster/pintracker/stateless"
"go.opencensus.io/tag"
ds "github.com/ipfs/go-datastore"
host "github.com/libp2p/go-libp2p/core/host"
peer "github.com/libp2p/go-libp2p/core/peer"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
pubsub "github.com/libp2p/go-libp2p-pubsub"
ma "github.com/multiformats/go-multiaddr"
errors "github.com/pkg/errors"
cli "github.com/urfave/cli"
)
func parseBootstraps(flagVal []string) (bootstraps []ma.Multiaddr) {
for _, a := range flagVal {
bAddr, err := ma.NewMultiaddr(strings.TrimSpace(a))
checkErr("error parsing bootstrap multiaddress (%s)", err, a)
bootstraps = append(bootstraps, bAddr)
}
return
}
// Runs the cluster peer
func daemon(c *cli.Context) error {
logger.Info("Initializing. For verbose output run with \"-l debug\". Please wait...")
ctx, cancel := context.WithCancel(context.Background())
var bootstraps []ma.Multiaddr
if bootStr := c.String("bootstrap"); bootStr != "" {
bootstraps = parseBootstraps(strings.Split(bootStr, ","))
}
// Execution lock
locker.lock()
defer locker.tryUnlock()
// Load all the configurations and identity
cfgHelper, err := cmdutils.NewLoadedConfigHelper(configPath, identityPath)
checkErr("loading configurations", err)
defer cfgHelper.Manager().Shutdown()
cfgs := cfgHelper.Configs()
if c.Bool("stats") {
cfgs.Metrics.EnableStats = true
}
cfgHelper.SetupTracing(c.Bool("tracing"))
// Setup bootstrapping
raftStaging := false
switch cfgHelper.GetConsensus() {
case cfgs.Raft.ConfigKey():
if len(bootstraps) > 0 {
// Cleanup state if bootstrapping
raft.CleanupRaft(cfgs.Raft)
raftStaging = true
}
case cfgs.Crdt.ConfigKey():
if !c.Bool("no-trust") {
crdtCfg := cfgs.Crdt
crdtCfg.TrustedPeers = append(crdtCfg.TrustedPeers, ipfscluster.PeersFromMultiaddrs(bootstraps)...)
}
}
if c.Bool("leave") {
cfgs.Cluster.LeaveOnShutdown = true
}
store := setupDatastore(cfgHelper)
host, pubsub, dht, err := ipfscluster.NewClusterHost(ctx, cfgHelper.Identity(), cfgs.Cluster, store)
checkErr("creating libp2p host", err)
cluster, err := createCluster(ctx, c, cfgHelper, host, pubsub, dht, store, raftStaging)
checkErr("starting cluster", err)
// noop if no bootstraps
// if bootstrapping fails, consensus will never be ready
// and timeout. So this can happen in background and we
// avoid worrying about error handling here (since Cluster
// will realize).
go bootstrap(ctx, cluster, bootstraps)
return cmdutils.HandleSignals(ctx, cancel, cluster, host, dht, store)
}
// createCluster creates all the necessary things to produce the cluster
// object and returns it along the datastore so the lifecycle can be handled
// (the datastore needs to be Closed after shutting down the Cluster).
func createCluster(
ctx context.Context,
c *cli.Context,
cfgHelper *cmdutils.ConfigHelper,
host host.Host,
pubsub *pubsub.PubSub,
dht *dual.DHT,
store ds.Datastore,
raftStaging bool,
) (*ipfscluster.Cluster, error) {
cfgs := cfgHelper.Configs()
cfgMgr := cfgHelper.Manager()
cfgBytes, err := cfgMgr.ToDisplayJSON()
checkErr("getting configuration string", err)
logger.Debugf("Configuration:\n%s\n", cfgBytes)
ctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))
checkErr("tag context with host id", err)
err = observations.SetupMetrics(cfgs.Metrics)
checkErr("setting up Metrics", err)
tracer, err := observations.SetupTracing(cfgs.Tracing)
checkErr("setting up Tracing", err)
var apis []ipfscluster.API
if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Restapi.ConfigKey()) {
var api *rest.API
// Do NOT enable default Libp2p API endpoint on CRDT
// clusters. Collaborative clusters are likely to share the
// secret with untrusted peers, thus the API would be open for
// anyone.
if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {
api, err = rest.NewAPIWithHost(ctx, cfgs.Restapi, host)
} else {
api, err = rest.NewAPI(ctx, cfgs.Restapi)
}
checkErr("creating REST API component", err)
apis = append(apis, api)
}
if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Pinsvcapi.ConfigKey()) {
pinsvcapi, err := pinsvcapi.NewAPI(ctx, cfgs.Pinsvcapi)
checkErr("creating Pinning Service API component", err)
apis = append(apis, pinsvcapi)
}
if cfgMgr.IsLoadedFromJSON(config.API, cfgs.Ipfsproxy.ConfigKey()) {
proxy, err := ipfsproxy.New(cfgs.Ipfsproxy)
checkErr("creating IPFS Proxy component", err)
apis = append(apis, proxy)
}
connector, err := ipfshttp.NewConnector(cfgs.Ipfshttp)
checkErr("creating IPFS Connector component", err)
var informers []ipfscluster.Informer
if cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.DiskInf.ConfigKey()) {
diskInf, err := disk.NewInformer(cfgs.DiskInf)
checkErr("creating disk informer", err)
informers = append(informers, diskInf)
}
if cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.TagsInf.ConfigKey()) {
tagsInf, err := tags.New(cfgs.TagsInf)
checkErr("creating numpin informer", err)
informers = append(informers, tagsInf)
}
if cfgMgr.IsLoadedFromJSON(config.Informer, cfgs.PinQueueInf.ConfigKey()) {
pinQueueInf, err := pinqueue.New(cfgs.PinQueueInf)
checkErr("creating pinqueue informer", err)
informers = append(informers, pinQueueInf)
}
// For legacy compatibility we need to make the allocator
// automatically compatible with informers that have been loaded. For
// simplicity we assume that anyone that does not specify an allocator
// configuration (legacy configs), will be using "freespace"
if !cfgMgr.IsLoadedFromJSON(config.Allocator, cfgs.BalancedAlloc.ConfigKey()) {
cfgs.BalancedAlloc.AllocateBy = []string{"freespace"}
}
alloc, err := balanced.New(cfgs.BalancedAlloc)
checkErr("creating allocator", err)
ipfscluster.ReadyTimeout = cfgs.Raft.WaitForLeaderTimeout + 5*time.Second
cons, err := setupConsensus(
cfgHelper,
host,
dht,
pubsub,
store,
raftStaging,
)
if err != nil {
store.Close()
checkErr("setting up Consensus", err)
}
var peersF func(context.Context) ([]peer.ID, error)
if cfgHelper.GetConsensus() == cfgs.Raft.ConfigKey() {
peersF = cons.Peers
}
tracker := stateless.New(cfgs.Statelesstracker, host.ID(), cfgs.Cluster.Peername, cons.State)
logger.Debug("stateless pintracker loaded")
mon, err := pubsubmon.New(ctx, cfgs.Pubsubmon, pubsub, peersF)
if err != nil {
store.Close()
checkErr("setting up PeerMonitor", err)
}
return ipfscluster.NewCluster(
ctx,
host,
dht,
cfgs.Cluster,
store,
cons,
apis,
connector,
tracker,
mon,
alloc,
informers,
tracer,
)
}
// bootstrap will bootstrap this peer to one of the bootstrap addresses
// if there are any.
func bootstrap(ctx context.Context, cluster *ipfscluster.Cluster, bootstraps []ma.Multiaddr) {
for _, bstrap := range bootstraps {
logger.Infof("Bootstrapping to %s", bstrap)
err := cluster.Join(ctx, bstrap)
if err != nil {
logger.Errorf("bootstrap to %s failed: %s", bstrap, err)
}
}
}
func setupDatastore(cfgHelper *cmdutils.ConfigHelper) ds.Datastore {
dsName := cfgHelper.GetDatastore()
stmgr, err := cmdutils.NewStateManager(cfgHelper.GetConsensus(), dsName, cfgHelper.Identity(), cfgHelper.Configs())
checkErr("creating state manager", err)
store, err := stmgr.GetStore()
checkErr("creating datastore", err)
if dsName != "" {
logger.Infof("Datastore backend: %s", dsName)
}
return store
}
func setupConsensus(
cfgHelper *cmdutils.ConfigHelper,
h host.Host,
dht *dual.DHT,
pubsub *pubsub.PubSub,
store ds.Datastore,
raftStaging bool,
) (ipfscluster.Consensus, error) {
cfgs := cfgHelper.Configs()
switch cfgHelper.GetConsensus() {
case cfgs.Raft.ConfigKey():
rft, err := raft.NewConsensus(
h,
cfgHelper.Configs().Raft,
store,
raftStaging,
)
if err != nil {
return nil, errors.Wrap(err, "creating Raft component")
}
return rft, nil
case cfgs.Crdt.ConfigKey():
convrdt, err := crdt.New(
h,
dht,
pubsub,
cfgHelper.Configs().Crdt,
store,
)
if err != nil {
return nil, errors.Wrap(err, "creating CRDT component")
}
return convrdt, nil
default:
return nil, errors.New("unknown consensus component")
}
}

View file

@ -1,5 +0,0 @@
Dual-licensed under MIT and ASLv2, by way of the [Permissive License
Stack](https://protocol.ai/blog/announcing-the-permissive-license-stack/).
Apache-2.0: https://www.apache.org/licenses/license-2.0
MIT: https://www.opensource.org/licenses/mit

View file

@ -1,13 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,19 +0,0 @@
Copyright 2020. Protocol Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,15 +0,0 @@
# `ipfs-cluster-service`
> The IPFS cluster peer daemon
`ipfs-cluster-service` runs a full IPFS Cluster peer.
### Usage
Usage information can be obtained with:
```
$ ipfs-cluster-service --help
```
For more information, please check the [Documentation](https://ipfscluster.io/documentation), in particular the [`ipfs-cluster-service` section](https://ipfscluster.io/documentation/ipfs-cluster-service).

View file

@ -1 +0,0 @@
{"replication_factor_min":-1,"replication_factor_max":-1,"name":"","mode":"direct","shard_size":0,"user_allocations":null,"expire_at":"0001-01-01T00:00:00Z","metadata":null,"pin_update":null,"cid":{"/":"QmUaFyXjZUNaUwYF8rBtbJc7fEJ46aJXvgV8z2HHs6jvmJ"},"type":2,"allocations":[],"max_depth":0,"reference":null}

View file

@ -1,71 +0,0 @@
package main
import (
"errors"
"fmt"
"io"
"path"
fslock "github.com/ipfs/go-fs-lock"
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
)
// lock logic heavily inspired by go-ipfs/repo/fsrepo/lock/lock.go
// The name of the file used for locking
const lockFileName = "cluster.lock"
var locker *lock
// lock helps to coordinate proceeds via a lock file
type lock struct {
lockCloser io.Closer
path string
}
func (l *lock) lock() {
if l.lockCloser != nil {
checkErr("", errors.New("cannot acquire lock twice"))
}
// we should have a config folder whenever we try to lock
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, "", "")
cfgHelper.MakeConfigFolder()
// set the lock file within this function
logger.Debug("checking lock")
lk, err := fslock.Lock(l.path, lockFileName)
if err != nil {
logger.Debug(err)
l.lockCloser = nil
errStr := "%s. If no other "
errStr += "%s process is running, remove %s, or make sure "
errStr += "that the config folder is writable for the user "
errStr += "running %s."
errStr = fmt.Sprintf(
errStr,
err,
programName,
path.Join(l.path, lockFileName),
programName,
)
checkErr("obtaining execution lock", errors.New(errStr))
}
logger.Debugf("%s execution lock acquired", programName)
l.lockCloser = lk
}
func (l *lock) tryUnlock() error {
// Noop in the uninitialized case
if l.lockCloser == nil {
logger.Debug("locking not initialized, unlock is noop")
return nil
}
err := l.lockCloser.Close()
if err != nil {
return err
}
logger.Debug("successfully released execution lock")
l.lockCloser = nil
return nil
}

View file

@ -1,749 +0,0 @@
// The ipfs-cluster-service application.
package main
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"strings"
ipfscluster "github.com/ipfs-cluster/ipfs-cluster"
"github.com/ipfs-cluster/ipfs-cluster/api"
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
"github.com/ipfs-cluster/ipfs-cluster/pstoremgr"
"github.com/ipfs-cluster/ipfs-cluster/version"
peer "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
semver "github.com/blang/semver"
logging "github.com/ipfs/go-log/v2"
cli "github.com/urfave/cli"
)
// ProgramName of this application
const programName = "ipfs-cluster-service"
// flag defaults
const (
defaultLogLevel = "info"
defaultConsensus = "crdt"
defaultDatastore = "badger"
)
const (
stateCleanupPrompt = "The peer state will be removed. Existing pins may be lost."
configurationOverwritePrompt = "The configuration file will be overwritten."
)
// We store a commit id here
var commit string
// Description provides a short summary of the functionality of this tool
var Description = fmt.Sprintf(`
%s runs an IPFS Cluster peer.
A peer participates in the cluster consensus, follows a distributed log
of pinning and unpinning requests and manages pinning operations to a
configured IPFS daemon.
This peer also provides an API for cluster management, an IPFS Proxy API which
forwards requests to IPFS and a number of components for internal communication
using LibP2P. This is a simplified view of the components:
+------------------+
| ipfs-cluster-ctl |
+---------+--------+
|
| HTTP(s)
ipfs-cluster-service | HTTP
+----------+--------+--v--+----------------------+ +-------------+
| RPC | Peer 1 | API | IPFS Connector/Proxy +------> IPFS daemon |
+----^-----+--------+-----+----------------------+ +-------------+
| libp2p
|
+----v-----+--------+-----+----------------------+ +-------------+
| RPC | Peer 2 | API | IPFS Connector/Proxy +------> IPFS daemon |
+----^-----+--------+-----+----------------------+ +-------------+
|
|
+----v-----+--------+-----+----------------------+ +-------------+
| RPC | Peer 3 | API | IPFS Connector/Proxy +------> IPFS daemon |
+----------+--------+-----+----------------------+ +-------------+
%s needs valid configuration and identity files to run.
These are independent from IPFS. The identity includes its own
libp2p key-pair. They can be initialized with "init" and their
default locations are ~/%s/%s
and ~/%s/%s.
For feedback, bug reports or any additional information, visit
https://github.com/ipfs-cluster/ipfs-cluster.
EXAMPLES:
Initial configuration:
$ ipfs-cluster-service init
Launch a cluster:
$ ipfs-cluster-service daemon
Launch a peer and join existing cluster:
$ ipfs-cluster-service daemon --bootstrap /ip4/192.168.1.2/tcp/9096/p2p/QmPSoSaPXpyunaBwHs1rZBKYSqRV4bLRk32VGYLuvdrypL
Customize logs using --loglevel flag. To customize component-level
logging pass a comma-separated list of component-identifer:log-level
pair or without identifier for overall loglevel. Valid loglevels
are critical, error, warning, notice, info and debug.
$ ipfs-cluster-service --loglevel info,cluster:debug,pintracker:debug daemon
`,
programName,
programName,
DefaultFolder,
DefaultConfigFile,
DefaultFolder,
DefaultIdentityFile,
)
var logger = logging.Logger("service")
// Default location for the configurations and data
var (
// DefaultFolder is the name of the cluster folder
DefaultFolder = ".ipfs-cluster"
// DefaultPath is set on init() to $HOME/DefaultFolder
// and holds all the ipfs-cluster data
DefaultPath string
// The name of the configuration file inside DefaultPath
DefaultConfigFile = "service.json"
// The name of the identity file inside DefaultPath
DefaultIdentityFile = "identity.json"
)
var (
configPath string
identityPath string
)
func init() {
// Set build information.
if build, err := semver.NewBuildVersion(commit); err == nil {
version.Version.Build = []string{"git" + build}
}
// We try guessing user's home from the HOME variable. This
// allows HOME hacks for things like Snapcraft builds. HOME
// should be set in all UNIX by the OS. Alternatively, we fall back to
// usr.HomeDir (which should work on Windows etc.).
home := os.Getenv("HOME")
if home == "" {
usr, err := user.Current()
if err != nil {
panic(fmt.Sprintf("cannot get current user: %s", err))
}
home = usr.HomeDir
}
DefaultPath = filepath.Join(home, DefaultFolder)
}
func out(m string, a ...interface{}) {
fmt.Fprintf(os.Stderr, m, a...)
}
func checkErr(doing string, err error, args ...interface{}) {
if err != nil {
if len(args) > 0 {
doing = fmt.Sprintf(doing, args...)
}
out("error %s: %s\n", doing, err)
err = locker.tryUnlock()
if err != nil {
out("error releasing execution lock: %s\n", err)
}
os.Exit(1)
}
}
func main() {
app := cli.NewApp()
app.Name = programName
app.Usage = "IPFS Cluster peer"
app.Description = Description
//app.Copyright = "© Protocol Labs, Inc."
app.Version = version.Version.String()
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "config, c",
Value: DefaultPath,
Usage: "path to the configuration and data `FOLDER`",
EnvVar: "IPFS_CLUSTER_PATH",
},
cli.BoolFlag{
Name: "force, f",
Usage: "forcefully proceed with some actions. i.e. overwriting configuration",
},
cli.BoolFlag{
Name: "debug, d",
Usage: "enable full debug logging (very verbose)",
},
cli.StringFlag{
Name: "loglevel, l",
EnvVar: "IPFS_CLUSTER_LOG_LEVEL",
Usage: "set overall and component-wise log levels",
},
}
app.Before = func(c *cli.Context) error {
absPath, err := filepath.Abs(c.String("config"))
if err != nil {
return err
}
configPath = filepath.Join(absPath, DefaultConfigFile)
identityPath = filepath.Join(absPath, DefaultIdentityFile)
err = setupLogLevel(c.Bool("debug"), c.String("loglevel"))
if err != nil {
return err
}
locker = &lock{path: absPath}
return nil
}
app.Commands = []cli.Command{
{
Name: "init",
Usage: "Creates a configuration and generates an identity",
Description: fmt.Sprintf(`
This command will initialize a new %s configuration file and, if it
does already exist, generate a new %s for %s.
If the optional [source-url] is given, the generated configuration file
will refer to it. The source configuration will be fetched from its source
URL during the launch of the daemon. If not, a default standard configuration
file will be created.
In the latter case, a cluster secret will be generated as required
by %s. Alternatively, this secret can be manually
provided with --custom-secret (in which case it will be prompted), or
by setting the CLUSTER_SECRET environment variable.
The --consensus flag allows to select an alternative consensus components for
in the newly-generated configuration.
Note that the --force flag allows to overwrite an existing
configuration with default values. To generate a new identity, please
remove the %s file first and clean any Raft state.
By default, an empty peerstore file will be created too. Initial contents can
be provided with the --peers flag. Depending on the chosen consensus, the
"trusted_peers" list in the "crdt" configuration section and the
"init_peerset" list in the "raft" configuration section will be prefilled to
the peer IDs in the given multiaddresses.
`,
DefaultConfigFile,
DefaultIdentityFile,
programName,
programName,
DefaultIdentityFile,
),
ArgsUsage: "[http-source-url]",
Flags: []cli.Flag{
cli.StringFlag{
Name: "consensus",
Usage: "select consensus component: 'crdt' or 'raft'",
Value: defaultConsensus,
},
cli.StringFlag{
Name: "datastore",
Usage: "select datastore component: 'badger' or 'leveldb'",
Value: defaultDatastore,
},
cli.BoolFlag{
Name: "custom-secret, s",
Usage: "prompt for the cluster secret (when no source specified)",
},
cli.StringFlag{
Name: "peers",
Usage: "comma-separated list of multiaddresses to init with (see help)",
},
cli.BoolFlag{
Name: "force, f",
Usage: "overwrite configuration without prompting",
},
cli.BoolFlag{
Name: "randomports",
Usage: "configure random ports to listen on instead of defaults",
},
},
Action: func(c *cli.Context) error {
consensus := c.String("consensus")
switch consensus {
case "raft", "crdt":
default:
checkErr("choosing consensus", errors.New("flag value must be set to 'raft' or 'crdt'"))
}
datastore := c.String("datastore")
switch datastore {
case "leveldb", "badger":
default:
checkErr("choosing datastore", errors.New("flag value must be set to 'leveldb' or 'badger'"))
}
cfgHelper := cmdutils.NewConfigHelper(configPath, identityPath, consensus, datastore)
defer cfgHelper.Manager().Shutdown() // wait for saves
configExists := false
if _, err := os.Stat(configPath); !os.IsNotExist(err) {
configExists = true
}
identityExists := false
if _, err := os.Stat(identityPath); !os.IsNotExist(err) {
identityExists = true
}
if configExists || identityExists {
// cluster might be running
// acquire lock for config folder
locker.lock()
defer locker.tryUnlock()
}
if configExists {
confirm := fmt.Sprintf(
"%s Continue? [y/n]:",
configurationOverwritePrompt,
)
// --force allows override of the prompt
if !c.Bool("force") {
if !yesNoPrompt(confirm) {
return nil
}
}
}
// Set url. If exists, it will be the only thing saved.
cfgHelper.Manager().Source = c.Args().First()
// Generate defaults for all registered components
err := cfgHelper.Manager().Default()
checkErr("generating default configuration", err)
if c.Bool("randomports") {
cfgs := cfgHelper.Configs()
cfgs.Cluster.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Cluster.ListenAddr)
checkErr("randomizing ports", err)
cfgs.Restapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Restapi.HTTPListenAddr)
checkErr("randomizing ports", err)
cfgs.Ipfsproxy.ListenAddr, err = cmdutils.RandomizePorts(cfgs.Ipfsproxy.ListenAddr)
checkErr("randomizing ports", err)
cfgs.Pinsvcapi.HTTPListenAddr, err = cmdutils.RandomizePorts(cfgs.Pinsvcapi.HTTPListenAddr)
checkErr("randomizing ports", err)
}
err = cfgHelper.Manager().ApplyEnvVars()
checkErr("applying environment variables to configuration", err)
userSecret, userSecretDefined := userProvidedSecret(c.Bool("custom-secret") && !c.Args().Present())
// Set user secret
if userSecretDefined {
cfgHelper.Configs().Cluster.Secret = userSecret
}
peersOpt := c.String("peers")
var multiAddrs []ma.Multiaddr
if peersOpt != "" {
addrs := strings.Split(peersOpt, ",")
for _, addr := range addrs {
addr = strings.TrimSpace(addr)
multiAddr, err := ma.NewMultiaddr(addr)
checkErr("parsing peer multiaddress: "+addr, err)
multiAddrs = append(multiAddrs, multiAddr)
}
peers := ipfscluster.PeersFromMultiaddrs(multiAddrs)
cfgHelper.Configs().Crdt.TrustAll = false
cfgHelper.Configs().Crdt.TrustedPeers = peers
cfgHelper.Configs().Raft.InitPeerset = peers
}
// Save config. Creates the folder.
// Sets BaseDir in components.
checkErr("saving default configuration", cfgHelper.SaveConfigToDisk())
out("configuration written to %s.\n", configPath)
if !identityExists {
ident := cfgHelper.Identity()
err := ident.Default()
checkErr("generating an identity", err)
err = ident.ApplyEnvVars()
checkErr("applying environment variables to the identity", err)
err = cfgHelper.SaveIdentityToDisk()
checkErr("saving "+DefaultIdentityFile, err)
out("new identity written to %s\n", identityPath)
}
// Initialize peerstore file - even if empty
peerstorePath := cfgHelper.Configs().Cluster.GetPeerstorePath()
peerManager := pstoremgr.New(context.Background(), nil, peerstorePath)
addrInfos, err := peer.AddrInfosFromP2pAddrs(multiAddrs...)
checkErr("getting AddrInfos from peer multiaddresses", err)
err = peerManager.SavePeerstore(addrInfos)
checkErr("saving peers to peerstore", err)
if l := len(multiAddrs); l > 0 {
out("peerstore written to %s with %d entries.\n", peerstorePath, len(multiAddrs))
} else {
out("new empty peerstore written to %s.\n", peerstorePath)
}
return nil
},
},
{
Name: "daemon",
Usage: "Runs the IPFS Cluster peer (default)",
Flags: []cli.Flag{
cli.BoolFlag{
Name: "upgrade, u",
Usage: "run state migrations before starting (deprecated/unused)",
},
cli.StringFlag{
Name: "bootstrap, j",
Usage: "join a cluster providing a comma-separated list of existing peers multiaddress(es)",
},
cli.BoolFlag{
Name: "leave, x",
Usage: "remove peer from cluster on exit. Overrides \"leave_on_shutdown\"",
Hidden: true,
},
cli.BoolFlag{
Name: "stats",
Usage: "enable stats collection",
},
cli.BoolFlag{
Name: "tracing",
Usage: "enable tracing collection",
},
cli.BoolFlag{
Name: "no-trust",
Usage: "do not trust bootstrap peers (only for \"crdt\" consensus)",
},
},
Action: daemon,
},
{
Name: "state",
Usage: "Manages the peer's consensus state (pinset)",
Subcommands: []cli.Command{
{
Name: "export",
Usage: "save the state to a JSON file",
Description: `
This command dumps the current cluster pinset (state) as a JSON file. The
resulting file can be used to migrate, restore or backup a Cluster peer.
By default, the state will be printed to stdout.
`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "file, f",
Value: "",
Usage: "writes to an output file",
},
},
Action: func(c *cli.Context) error {
locker.lock()
defer locker.tryUnlock()
mgr := getStateManager()
var w io.WriteCloser
var err error
outputPath := c.String("file")
if outputPath == "" {
// Output to stdout
w = os.Stdout
} else {
// Create the export file
w, err = os.Create(outputPath)
checkErr("creating output file", err)
}
buf := bufio.NewWriter(w)
defer func() {
buf.Flush()
w.Close()
}()
checkErr("exporting state", mgr.ExportState(buf))
logger.Info("state successfully exported")
return nil
},
},
{
Name: "import",
Usage: "load the state from a file produced by 'export'",
Description: `
This command reads in an exported pinset (state) file and replaces the
existing one. This can be used, for example, to restore a Cluster peer from a
backup.
If an argument is provided, it will be treated it as the path of the file
to import. If no argument is provided, stdin will be used.
`,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "skips confirmation prompt",
},
cli.IntFlag{
Name: "replication-min, rmin",
Value: 0,
Usage: "Overwrite replication-factor-min for all pins on import",
},
cli.IntFlag{
Name: "replication-max, rmax",
Value: 0,
Usage: "Overwrite replication-factor-max for all pins on import",
},
cli.StringFlag{
Name: "allocations, allocs",
Usage: "Overwrite allocations for all pins on import. Comma-separated list of peer IDs",
},
},
Action: func(c *cli.Context) error {
locker.lock()
defer locker.tryUnlock()
confirm := "The pinset (state) of this peer "
confirm += "will be replaced. Continue? [y/n]:"
if !c.Bool("force") && !yesNoPrompt(confirm) {
return nil
}
// importState allows overwriting of some options on import
opts := api.PinOptions{
ReplicationFactorMin: c.Int("replication-min"),
ReplicationFactorMax: c.Int("replication-max"),
UserAllocations: api.StringsToPeers(strings.Split(c.String("allocations"), ",")),
}
mgr := getStateManager()
// Get the importing file path
importFile := c.Args().First()
var r io.ReadCloser
var err error
if importFile == "" {
r = os.Stdin
fmt.Println("reading from stdin, Ctrl-D to finish")
} else {
r, err = os.Open(importFile)
checkErr("reading import file", err)
}
defer r.Close()
buf := bufio.NewReader(r)
checkErr("importing state", mgr.ImportState(buf, opts))
logger.Info("state successfully imported. Make sure all peers have consistent states")
return nil
},
},
{
Name: "cleanup",
Usage: "remove persistent data",
Description: `
This command removes any persisted consensus data in this peer, including the
current pinset (state). The next start of the peer will be like the first start
to all effects. Peers may need to bootstrap and sync from scratch after this.
`,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "skip confirmation prompt",
},
},
Action: func(c *cli.Context) error {
locker.lock()
defer locker.tryUnlock()
confirm := fmt.Sprintf(
"%s Continue? [y/n]:",
stateCleanupPrompt,
)
if !c.Bool("force") && !yesNoPrompt(confirm) {
return nil
}
mgr := getStateManager()
checkErr("cleaning state", mgr.Clean())
logger.Info("data correctly cleaned up")
return nil
},
},
},
},
{
Name: "version",
Usage: "Prints the ipfs-cluster version",
Action: func(c *cli.Context) error {
fmt.Printf("%s\n", version.Version)
return nil
},
},
}
app.Action = run
app.Run(os.Args)
}
// run daemon() by default, or error.
func run(c *cli.Context) error {
cli.ShowAppHelp(c)
os.Exit(1)
return nil
}
func setupLogLevel(debug bool, l string) error {
// if debug is set to true, log everything in debug level
if debug {
ipfscluster.SetFacilityLogLevel("*", "DEBUG")
return nil
}
compLogLevel := strings.Split(l, ",")
var logLevel string
compLogFacs := make(map[string]string)
// get overall log level and component-wise log levels from arguments
for _, cll := range compLogLevel {
if cll == "" {
continue
}
identifierToLevel := strings.Split(cll, ":")
var lvl string
var comp string
switch len(identifierToLevel) {
case 1:
lvl = identifierToLevel[0]
comp = "all"
case 2:
lvl = identifierToLevel[1]
comp = identifierToLevel[0]
default:
return errors.New("log level not in expected format \"identifier:loglevel\" or \"loglevel\"")
}
_, ok := compLogFacs[comp]
if ok {
fmt.Printf("overwriting existing %s log level\n", comp)
}
compLogFacs[comp] = lvl
}
logLevel, ok := compLogFacs["all"]
if !ok {
logLevel = defaultLogLevel
} else {
delete(compLogFacs, "all")
}
// log service with logLevel
ipfscluster.SetFacilityLogLevel("service", logLevel)
logfacs := make(map[string]string)
// fill component-wise log levels
for identifier, level := range compLogFacs {
logfacs[identifier] = level
}
// Set the values for things not set by the user or for
// things set by "all".
for key := range ipfscluster.LoggingFacilities {
if _, ok := logfacs[key]; !ok {
logfacs[key] = logLevel
}
}
// For Extra facilities, set the defaults per logging.go unless
// manually set
for key, defaultLvl := range ipfscluster.LoggingFacilitiesExtra {
if _, ok := logfacs[key]; !ok {
logfacs[key] = defaultLvl
}
}
for identifier, level := range logfacs {
ipfscluster.SetFacilityLogLevel(identifier, level)
}
return nil
}
func userProvidedSecret(enterSecret bool) ([]byte, bool) {
if enterSecret {
secret := promptUser("Enter cluster secret (32-byte hex string): ")
decodedSecret, err := ipfscluster.DecodeClusterSecret(secret)
checkErr("parsing user-provided secret", err)
return decodedSecret, true
}
return nil, false
}
func promptUser(msg string) string {
scanner := bufio.NewScanner(os.Stdin)
fmt.Print(msg)
scanner.Scan()
return scanner.Text()
}
// Lifted from go-ipfs/cmd/ipfs/daemon.go
func yesNoPrompt(prompt string) bool {
var s string
for i := 0; i < 3; i++ {
fmt.Printf("%s ", prompt)
fmt.Scanf("%s", &s)
switch s {
case "y", "Y":
return true
case "n", "N":
return false
case "":
return false
}
fmt.Println("Please press either 'y' or 'n'")
}
return false
}
func getStateManager() cmdutils.StateManager {
cfgHelper, err := cmdutils.NewLoadedConfigHelper(
configPath,
identityPath,
)
checkErr("loading configurations", err)
cfgHelper.Manager().Shutdown()
mgr, err := cmdutils.NewStateManagerWithHelper(cfgHelper)
checkErr("creating state manager", err)
return mgr
}

View file

@ -1,38 +0,0 @@
package main
import (
"testing"
"github.com/ipfs-cluster/ipfs-cluster/cmdutils"
ma "github.com/multiformats/go-multiaddr"
)
func TestRandomPorts(t *testing.T) {
port := "9096"
m1, _ := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/9096")
m2, _ := ma.NewMultiaddr("/ip6/::/udp/9096")
addresses, err := cmdutils.RandomizePorts([]ma.Multiaddr{m1, m2})
if err != nil {
t.Fatal(err)
}
v1, err := addresses[0].ValueForProtocol(ma.P_TCP)
if err != nil {
t.Fatal(err)
}
v2, err := addresses[1].ValueForProtocol(ma.P_UDP)
if err != nil {
t.Fatal(err)
}
if v1 == port {
t.Error("expected different ipv4 ports")
}
if v2 == port {
t.Error("expected different ipv6 ports")
}
}

View file

@ -1,207 +0,0 @@
// Package cmdutils contains utilities to facilitate building of command line
// applications launching cluster peers.
package cmdutils
import (
"context"
"fmt"
"io"
"net"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/ipfs/go-datastore"
ipfscluster "github.com/ipfs-cluster/ipfs-cluster"
ipfshttp "github.com/ipfs-cluster/ipfs-cluster/ipfsconn/ipfshttp"
host "github.com/libp2p/go-libp2p/core/host"
dual "github.com/libp2p/go-libp2p-kad-dht/dual"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"go.uber.org/multierr"
)
// RandomizePorts replaces TCP and UDP ports with random, but valid port
// values, on the given multiaddresses
func RandomizePorts(addrs []ma.Multiaddr) ([]ma.Multiaddr, error) {
results := make([]ma.Multiaddr, 0, len(addrs))
for _, m := range addrs {
var prev string
var err error
components := []ma.Multiaddr{}
ma.ForEach(m, func(c ma.Component) bool {
code := c.Protocol().Code
if code != ma.P_TCP && code != ma.P_UDP {
components = append(components, &c)
prev = c.Value()
return true
}
var ln io.Closer
var port int
ip := prev
if strings.Contains(ip, ":") { // ipv6 needs bracketing
ip = "[" + ip + "]"
}
if c.Protocol().Code == ma.P_UDP {
ln, port, err = listenUDP(c.Protocol().Name, ip)
} else {
ln, port, err = listenTCP(c.Protocol().Name, ip)
}
if err != nil {
return false
}
defer ln.Close()
var c1 *ma.Component
c1, err = ma.NewComponent(c.Protocol().Name, fmt.Sprintf("%d", port))
if err != nil {
return false
}
components = append(components, c1)
prev = c.Value()
return true
})
if err != nil {
return results, err
}
results = append(results, ma.Join(components...))
}
return results, nil
}
// returns the listener so it can be closed later and port
func listenTCP(name, ip string) (io.Closer, int, error) {
ln, err := net.Listen(name, ip+":0")
if err != nil {
return nil, 0, err
}
return ln, ln.Addr().(*net.TCPAddr).Port, nil
}
// returns the listener so it can be cloesd later and port
func listenUDP(name, ip string) (io.Closer, int, error) {
ln, err := net.ListenPacket(name, ip+":0")
if err != nil {
return nil, 0, err
}
return ln, ln.LocalAddr().(*net.UDPAddr).Port, nil
}
// HandleSignals orderly shuts down an IPFS Cluster peer
// on SIGINT, SIGTERM, SIGHUP. It forces command termination
// on the 3rd-signal count.
func HandleSignals(
ctx context.Context,
cancel context.CancelFunc,
cluster *ipfscluster.Cluster,
host host.Host,
dht *dual.DHT,
store datastore.Datastore,
) error {
signalChan := make(chan os.Signal, 20)
signal.Notify(
signalChan,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGHUP,
)
var ctrlcCount int
for {
select {
case <-signalChan:
ctrlcCount++
handleCtrlC(ctx, cluster, ctrlcCount)
case <-cluster.Done():
cancel()
return multierr.Combine(
dht.Close(),
host.Close(),
store.Close(),
)
}
}
}
func handleCtrlC(ctx context.Context, cluster *ipfscluster.Cluster, ctrlcCount int) {
switch ctrlcCount {
case 1:
go func() {
if err := cluster.Shutdown(ctx); err != nil {
ErrorOut("error shutting down cluster: %s", err)
os.Exit(1)
}
}()
case 2:
ErrorOut(`
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Shutdown is taking too long! Press Ctrl-c again to manually kill cluster.
Note that this may corrupt the local cluster state.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
`)
case 3:
ErrorOut("exiting cluster NOW")
os.Exit(1)
}
}
// ErrorOut formats something and prints it to sdterr.
func ErrorOut(m string, a ...interface{}) {
fmt.Fprintf(os.Stderr, m, a...)
}
// WaitForIPFS hangs until IPFS API becomes available or the given context is
// canceled. The IPFS API location is determined by the default ipfshttp
// component configuration and can be overridden using environment variables
// that affect that configuration. Note that we have to do this in the blind,
// since we want to wait for IPFS before we even fetch the IPFS component
// configuration (because the configuration might be hosted on IPFS itself)
func WaitForIPFS(ctx context.Context) error {
ipfshttpCfg := ipfshttp.Config{}
ipfshttpCfg.Default()
ipfshttpCfg.ApplyEnvVars()
ipfshttpCfg.ConnectSwarmsDelay = 0
ipfshttpCfg.Tracing = false
ipfscluster.SetFacilityLogLevel("ipfshttp", "critical")
defer ipfscluster.SetFacilityLogLevel("ipfshttp", "info")
ipfs, err := ipfshttp.NewConnector(&ipfshttpCfg)
if err != nil {
return errors.Wrap(err, "error creating an ipfshttp instance to wait for IPFS")
}
i := 0
for {
select {
case <-ctx.Done():
return ctx.Err()
default:
if i%10 == 0 {
fmt.Printf("waiting for IPFS to become available on %s...\n", ipfshttpCfg.NodeAddr)
}
i++
time.Sleep(time.Second)
_, err := ipfs.ID(ctx)
if err == nil {
// sleep an extra second and quit
time.Sleep(time.Second)
return nil
}
}
}
}

Some files were not shown because too many files have changed in this diff Show more