Skip to content
Snippets Groups Projects
Select Git revision
  • f49ec30b2eaf65486038000fd0d1f1ca688fc204
  • master default protected
  • feature/ipv6
  • systemd-resolved-docker-1.0.0-1
  • systemd-resolved-docker-0.4.0-1
  • systemd-resolved-docker-0.1.1-1
  • python-systemd-resolved-docker-0.1.0-1
7 results

cli.py

Blame
  • builder.go 7.92 KiB
    package core
    
    import (
    	"context"
    	"crypto/rand"
    	"encoding/base64"
    	"errors"
    	"os"
    	"syscall"
    	"time"
    
    	filestore "github.com/ipfs/go-ipfs/filestore"
    	namesys "github.com/ipfs/go-ipfs/namesys"
    	pin "github.com/ipfs/go-ipfs/pin"
    	repo "github.com/ipfs/go-ipfs/repo"
    	cidv0v1 "github.com/ipfs/go-ipfs/thirdparty/cidv0v1"
    	"github.com/ipfs/go-ipfs/thirdparty/verifbs"
    
    	ci "gx/ipfs/QmNiJiXwWE3kRhZrC5ej3kSjWHm337pYfhjLGSCDNKJP2s/go-libp2p-crypto"
    	pstore "gx/ipfs/QmPiemjiKBC9VA7vZF82m4x1oygtg2c2YVqag8PX7dN1BD/go-libp2p-peerstore"
    	pstoremem "gx/ipfs/QmPiemjiKBC9VA7vZF82m4x1oygtg2c2YVqag8PX7dN1BD/go-libp2p-peerstore/pstoremem"
    	uio "gx/ipfs/QmQ1JnYpnzkaurjW1yxkQxC2w3K1PorNE1nv1vaP5Le7sq/go-unixfs/io"
    	bstore "gx/ipfs/QmS2aqUZLJp8kF1ihE5rvDGE5LvmKDPnx32w9Z1BW9xLV5/go-ipfs-blockstore"
    	goprocessctx "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
    	bserv "gx/ipfs/QmVKQHuzni68SWByzJgBUCwHvvr4TWiXfutNWWwpZpp4rE/go-blockservice"
    	offroute "gx/ipfs/QmVZ6cQXHoTQja4oo9GhhHZi7dThi4x98mRKgGtKnTy37u/go-ipfs-routing/offline"
    	ipns "gx/ipfs/QmWPFehHmySCdaGttQ48iwF7M6mBRrGE5GSPWKCuMWqJDR/go-ipns"
    	resolver "gx/ipfs/QmWqh9oob7ZHQRwU5CdTqpnC8ip8BEkFNrwXRxeNo5Y7vA/go-path/resolver"
    	peer "gx/ipfs/QmY5Grm8pJdiSSVsYxx4uNRgweY72EmYwuSDbRnbFok3iY/go-libp2p-peer"
    	offline "gx/ipfs/QmYZwey1thDTynSrvd6qQkX24UpTka6TFhQ2v569UpoqxD/go-ipfs-exchange-offline"
    	libp2p "gx/ipfs/QmYxivS34F2M2n44WQQnRHGAKS8aoRUxwGpi9wk4Cdn4Jf/go-libp2p"
    	p2phost "gx/ipfs/QmaoXrM4Z41PD48JY36YqQGKQpLGjyLA2cKcLsES7YddAq/go-libp2p-host"
    	dag "gx/ipfs/Qmb2UEG2TAeVrEJSjqsZF7Y2he7wRDkrdt6c3bECxwZf4k/go-merkledag"
    	cfg "gx/ipfs/QmcRKBUqc2p3L1ZraoJjbXfs9E6xzvEuyK9iypb5RGwfsr/go-ipfs-config"
    	metrics "gx/ipfs/QmekzFM3hPZjTjUFGTABdQkEnQ3PTiMstY198PwSFr5w1Q/go-metrics-interface"
    	ds "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore"
    	retry "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/retrystore"
    	dsync "gx/ipfs/Qmf4xQhNomPNhrtZc67qSnfJSjxjXs9LWvknJtSXwimPrM/go-datastore/sync"
    	record "gx/ipfs/QmfARXVCzpwFXQdepAJZuqyNDgV9doEsMnVCo1ssmuSe1U/go-libp2p-record"
    )
    
    type BuildCfg struct {
    	// If online is set, the node will have networking enabled
    	Online bool
    
    	// ExtraOpts is a map of extra options used to configure the ipfs nodes creation
    	ExtraOpts map[string]bool
    
    	// If permanent then node should run more expensive processes
    	// that will improve performance in long run
    	Permanent bool
    
    	// DisableEncryptedConnections disables connection encryption *entirely*.
    	// DO NOT SET THIS UNLESS YOU'RE TESTING.
    	DisableEncryptedConnections bool
    
    	// If NilRepo is set, a repo backed by a nil datastore will be constructed
    	NilRepo bool
    
    	Routing RoutingOption
    	Host    HostOption
    	Repo    repo.Repo
    }
    
    func (cfg *BuildCfg) getOpt(key string) bool {
    	if cfg.ExtraOpts == nil {
    		return false
    	}
    
    	return cfg.ExtraOpts[key]
    }
    
    func (cfg *BuildCfg) fillDefaults() error {
    	if cfg.Repo != nil && cfg.NilRepo {
    		return errors.New("cannot set a repo and specify nilrepo at the same time")
    	}
    
    	if cfg.Repo == nil {
    		var d ds.Datastore
    		d = ds.NewMapDatastore()
    
    		if cfg.NilRepo {
    			d = ds.NewNullDatastore()
    		}
    		r, err := defaultRepo(dsync.MutexWrap(d))
    		if err != nil {
    			return err
    		}
    		cfg.Repo = r
    	}
    
    	if cfg.Routing == nil {
    		cfg.Routing = DHTOption
    	}
    
    	if cfg.Host == nil {
    		cfg.Host = DefaultHostOption
    	}
    
    	return nil
    }
    
    func defaultRepo(dstore repo.Datastore) (repo.Repo, error) {
    	c := cfg.Config{}
    	priv, pub, err := ci.GenerateKeyPairWithReader(ci.RSA, 1024, rand.Reader)
    	if err != nil {
    		return nil, err
    	}
    
    	pid, err := peer.IDFromPublicKey(pub)
    	if err != nil {
    		return nil, err
    	}
    
    	privkeyb, err := priv.Bytes()
    	if err != nil {
    		return nil, err
    	}
    
    	c.Bootstrap = cfg.DefaultBootstrapAddresses
    	c.Addresses.Swarm = []string{"/ip4/0.0.0.0/tcp/4001"}
    	c.Identity.PeerID = pid.Pretty()
    	c.Identity.PrivKey = base64.StdEncoding.EncodeToString(privkeyb)
    
    	return &repo.Mock{
    		D: dstore,
    		C: c,
    	}, nil
    }
    
    // NewNode constructs and returns an IpfsNode using the given cfg.
    func NewNode(ctx context.Context, cfg *BuildCfg) (*IpfsNode, error) {
    	if cfg == nil {
    		cfg = new(BuildCfg)
    	}
    
    	err := cfg.fillDefaults()
    	if err != nil {
    		return nil, err
    	}
    
    	ctx = metrics.CtxScope(ctx, "ipfs")
    
    	n := &IpfsNode{
    		mode:      offlineMode,
    		Repo:      cfg.Repo,
    		ctx:       ctx,
    		Peerstore: pstoremem.NewPeerstore(),
    	}
    
    	n.RecordValidator = record.NamespacedValidator{
    		"pk":   record.PublicKeyValidator{},
    		"ipns": ipns.Validator{KeyBook: n.Peerstore},
    	}
    
    	if cfg.Online {
    		n.mode = onlineMode
    	}
    
    	// TODO: this is a weird circular-ish dependency, rework it
    	n.proc = goprocessctx.WithContextAndTeardown(ctx, n.teardown)
    
    	if err := setupNode(ctx, n, cfg); err != nil {
    		n.Close()
    		return nil, err
    	}
    
    	return n, nil
    }
    
    func isTooManyFDError(err error) bool {
    	perr, ok := err.(*os.PathError)
    	if ok && perr.Err == syscall.EMFILE {
    		return true
    	}
    
    	return false
    }
    
    func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error {
    	// setup local identity
    	if err := n.loadID(); err != nil {
    		return err
    	}
    
    	// load the private key (if present)
    	if err := n.loadPrivateKey(); err != nil {
    		return err
    	}
    
    	rds := &retry.Datastore{
    		Batching:    n.Repo.Datastore(),
    		Delay:       time.Millisecond * 200,
    		Retries:     6,
    		TempErrFunc: isTooManyFDError,
    	}
    
    	// hash security
    	bs := bstore.NewBlockstore(rds)
    	bs = &verifbs.VerifBS{Blockstore: bs}
    
    	opts := bstore.DefaultCacheOpts()
    	conf, err := n.Repo.Config()
    	if err != nil {
    		return err
    	}
    
    	// TEMP: setting global sharding switch here
    	uio.UseHAMTSharding = conf.Experimental.ShardingEnabled
    
    	opts.HasBloomFilterSize = conf.Datastore.BloomFilterSize
    	if !cfg.Permanent {
    		opts.HasBloomFilterSize = 0
    	}
    
    	if !cfg.NilRepo {
    		bs, err = bstore.CachedBlockstore(ctx, bs, opts)
    		if err != nil {
    			return err
    		}
    	}
    
    	bs = bstore.NewIdStore(bs)
    
    	bs = cidv0v1.NewBlockstore(bs)
    
    	n.BaseBlocks = bs
    	n.GCLocker = bstore.NewGCLocker()
    	n.Blockstore = bstore.NewGCBlockstore(bs, n.GCLocker)
    
    	if conf.Experimental.FilestoreEnabled || conf.Experimental.UrlstoreEnabled {
    		// hash security
    		n.Filestore = filestore.NewFilestore(bs, n.Repo.FileManager())
    		n.Blockstore = bstore.NewGCBlockstore(n.Filestore, n.GCLocker)
    		n.Blockstore = &verifbs.VerifBSGC{GCBlockstore: n.Blockstore}
    	}
    
    	rcfg, err := n.Repo.Config()
    	if err != nil {
    		return err
    	}
    
    	if rcfg.Datastore.HashOnRead {
    		bs.HashOnRead(true)
    	}
    
    	hostOption := cfg.Host
    	if cfg.DisableEncryptedConnections {
    		innerHostOption := hostOption
    		hostOption = func(ctx context.Context, id peer.ID, ps pstore.Peerstore, options ...libp2p.Option) (p2phost.Host, error) {
    			return innerHostOption(ctx, id, ps, append(options, libp2p.NoSecurity)...)
    		}
    		log.Warningf(`Your IPFS node has been configured to run WITHOUT ENCRYPTED CONNECTIONS.
    		You will not be able to connect to any nodes configured to use encrypted connections`)
    	}
    
    	if cfg.Online {
    		do := setupDiscoveryOption(rcfg.Discovery)
    		if err := n.startOnlineServices(ctx, cfg.Routing, hostOption, do, cfg.getOpt("pubsub"), cfg.getOpt("ipnsps"), cfg.getOpt("mplex")); err != nil {
    			return err
    		}
    	} else {
    		n.Exchange = offline.Exchange(n.Blockstore)
    		n.Routing = offroute.NewOfflineRouter(n.Repo.Datastore(), n.RecordValidator)
    		n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), 0)
    	}
    
    	n.Blocks = bserv.New(n.Blockstore, n.Exchange)
    	n.DAG = dag.NewDAGService(n.Blocks)
    
    	internalDag := dag.NewDAGService(bserv.New(n.Blockstore, offline.Exchange(n.Blockstore)))
    	n.Pinning, err = pin.LoadPinner(n.Repo.Datastore(), n.DAG, internalDag)
    	if err != nil {
    		// TODO: we should move towards only running 'NewPinner' explicitly on
    		// node init instead of implicitly here as a result of the pinner keys
    		// not being found in the datastore.
    		// this is kinda sketchy and could cause data loss
    		n.Pinning = pin.NewPinner(n.Repo.Datastore(), n.DAG, internalDag)
    	}
    	n.Resolver = resolver.NewBasicResolver(n.DAG)
    
    	if cfg.Online {
    		if err := n.startLateOnlineServices(ctx); err != nil {
    			return err
    		}
    	}
    
    	return n.loadFilesRoot()
    }