| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502 |
- // Copyright 2017 The go-ethereum Authors
- // This file is part of go-ethereum.
- //
- // go-ethereum is free software: you can redistribute it and/or modify
- // it under the terms of the GNU General Public License as published by
- // the Free Software Foundation, either version 3 of the License, or
- // (at your option) any later version.
- //
- // go-ethereum is distributed in the hope that it will be useful,
- // but WITHOUT ANY WARRANTY; without even the implied warranty of
- // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- // GNU General Public License for more details.
- //
- // You should have received a copy of the GNU General Public License
- // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
- package main
- import (
- "context"
- "crypto/ecdsa"
- "flag"
- "fmt"
- "io/ioutil"
- "net"
- "os"
- "path"
- "path/filepath"
- "runtime"
- "sync"
- "syscall"
- "testing"
- "time"
- "github.com/docker/docker/pkg/reexec"
- "github.com/ethereum/go-ethereum/accounts"
- "github.com/ethereum/go-ethereum/accounts/keystore"
- "github.com/ethereum/go-ethereum/internal/cmdtest"
- "github.com/ethereum/go-ethereum/node"
- "github.com/ethereum/go-ethereum/p2p"
- "github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/swarm"
- "github.com/ethereum/go-ethereum/swarm/api"
- swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
- )
- var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
- func init() {
- // Run the app if we've been exec'd as "swarm-test" in runSwarm.
- reexec.Register("swarm-test", func() {
- if err := app.Run(os.Args); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- os.Exit(0)
- })
- }
- const clusterSize = 3
- func serverFunc(api *api.API) swarmhttp.TestServer {
- return swarmhttp.NewServer(api, "")
- }
- func TestMain(m *testing.M) {
- // check if we have been reexec'd
- if reexec.Init() {
- return
- }
- os.Exit(m.Run())
- }
- func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
- tt := cmdtest.NewTestCmd(t, nil)
- found := false
- for _, v := range args {
- if v == "--bootnodes" {
- found = true
- break
- }
- }
- if !found {
- args = append([]string{"--bootnodes", ""}, args...)
- }
- // Boot "swarm". This actually runs the test binary but the TestMain
- // function will prevent any tests from running.
- tt.Run("swarm-test", args...)
- return tt
- }
- type testCluster struct {
- Nodes []*testNode
- TmpDir string
- }
- // newTestCluster starts a test swarm cluster of the given size.
- //
- // A temporary directory is created and each node gets a data directory inside
- // it.
- //
- // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
- // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
- // as flags).
- //
- // When starting more than one node, they are connected together using the
- // admin SetPeer RPC method.
- func newTestCluster(t *testing.T, size int) *testCluster {
- cluster := &testCluster{}
- defer func() {
- if t.Failed() {
- cluster.Shutdown()
- }
- }()
- tmpdir, err := ioutil.TempDir("", "swarm-test")
- if err != nil {
- t.Fatal(err)
- }
- cluster.TmpDir = tmpdir
- // start the nodes
- cluster.StartNewNodes(t, size)
- if size == 1 {
- return cluster
- }
- // connect the nodes together
- for _, node := range cluster.Nodes {
- if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
- t.Fatal(err)
- }
- }
- // wait until all nodes have the correct number of peers
- outer:
- for _, node := range cluster.Nodes {
- var peers []*p2p.PeerInfo
- for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
- if err := node.Client.Call(&peers, "admin_peers"); err != nil {
- t.Fatal(err)
- }
- if len(peers) == len(cluster.Nodes)-1 {
- continue outer
- }
- }
- t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
- }
- return cluster
- }
- func (c *testCluster) Shutdown() {
- c.Stop()
- c.Cleanup()
- }
- func (c *testCluster) Stop() {
- for _, node := range c.Nodes {
- node.Shutdown()
- }
- }
- func (c *testCluster) StartNewNodes(t *testing.T, size int) {
- c.Nodes = make([]*testNode, 0, size)
- errors := make(chan error, size)
- nodes := make(chan *testNode, size)
- for i := 0; i < size; i++ {
- go func(nodeIndex int) {
- dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", nodeIndex))
- if err := os.Mkdir(dir, 0700); err != nil {
- errors <- err
- return
- }
- node := newTestNode(t, dir)
- node.Name = fmt.Sprintf("swarm%02d", nodeIndex)
- nodes <- node
- }(i)
- }
- for i := 0; i < size; i++ {
- select {
- case node := <-nodes:
- c.Nodes = append(c.Nodes, node)
- case err := <-errors:
- t.Error(err)
- }
- }
- if t.Failed() {
- c.Shutdown()
- t.FailNow()
- }
- }
- func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
- c.Nodes = make([]*testNode, 0, size)
- for i := 0; i < size; i++ {
- dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
- node := existingTestNode(t, dir, bzzaccount)
- node.Name = fmt.Sprintf("swarm%02d", i)
- c.Nodes = append(c.Nodes, node)
- }
- }
- func (c *testCluster) Cleanup() {
- os.RemoveAll(c.TmpDir)
- }
- type testNode struct {
- Name string
- Addr string
- URL string
- Enode string
- Dir string
- IpcPath string
- PrivateKey *ecdsa.PrivateKey
- Client *rpc.Client
- Cmd *cmdtest.TestCmd
- }
- const testPassphrase = "swarm-test-passphrase"
- func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
- // create key
- conf = &node.Config{
- DataDir: dir,
- IPCPath: "bzzd.ipc",
- NoUSB: true,
- }
- n, err := node.New(conf)
- if err != nil {
- t.Fatal(err)
- }
- account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
- if err != nil {
- t.Fatal(err)
- }
- // use a unique IPCPath when running tests on Windows
- if runtime.GOOS == "windows" {
- conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
- }
- return conf, account
- }
- func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
- conf, _ := getTestAccount(t, dir)
- node := &testNode{Dir: dir}
- // use a unique IPCPath when running tests on Windows
- if runtime.GOOS == "windows" {
- conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
- }
- // assign ports
- ports, err := getAvailableTCPPorts(2)
- if err != nil {
- t.Fatal(err)
- }
- p2pPort := ports[0]
- httpPort := ports[1]
- // start the node
- node.Cmd = runSwarm(t,
- "--bootnodes", "",
- "--port", p2pPort,
- "--nat", "extip:127.0.0.1",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
- "--ens-api", "",
- "--bzzaccount", bzzaccount,
- "--bzznetworkid", "321",
- "--bzzport", httpPort,
- "--verbosity", fmt.Sprint(*loglevel),
- )
- node.Cmd.InputLine(testPassphrase)
- defer func() {
- if t.Failed() {
- node.Shutdown()
- }
- }()
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancel()
- // ensure that all ports have active listeners
- // so that the next node will not get the same
- // when calling getAvailableTCPPorts
- err = waitTCPPorts(ctx, ports...)
- if err != nil {
- t.Fatal(err)
- }
- // wait for the node to start
- for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
- node.Client, err = rpc.Dial(conf.IPCEndpoint())
- if err == nil {
- break
- }
- }
- if node.Client == nil {
- t.Fatal(err)
- }
- // load info
- var info swarm.Info
- if err := node.Client.Call(&info, "bzz_info"); err != nil {
- t.Fatal(err)
- }
- node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
- node.URL = "http://" + node.Addr
- var nodeInfo p2p.NodeInfo
- if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
- t.Fatal(err)
- }
- node.Enode = nodeInfo.Enode
- node.IpcPath = conf.IPCPath
- return node
- }
- func newTestNode(t *testing.T, dir string) *testNode {
- conf, account := getTestAccount(t, dir)
- ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
- pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
- node := &testNode{Dir: dir, PrivateKey: pk}
- // assign ports
- ports, err := getAvailableTCPPorts(2)
- if err != nil {
- t.Fatal(err)
- }
- p2pPort := ports[0]
- httpPort := ports[1]
- // start the node
- node.Cmd = runSwarm(t,
- "--bootnodes", "",
- "--port", p2pPort,
- "--nat", "extip:127.0.0.1",
- "--datadir", dir,
- "--ipcpath", conf.IPCPath,
- "--ens-api", "",
- "--bzzaccount", account.Address.String(),
- "--bzznetworkid", "321",
- "--bzzport", httpPort,
- "--verbosity", fmt.Sprint(*loglevel),
- )
- node.Cmd.InputLine(testPassphrase)
- defer func() {
- if t.Failed() {
- node.Shutdown()
- }
- }()
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer cancel()
- // ensure that all ports have active listeners
- // so that the next node will not get the same
- // when calling getAvailableTCPPorts
- err = waitTCPPorts(ctx, ports...)
- if err != nil {
- t.Fatal(err)
- }
- // wait for the node to start
- for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
- node.Client, err = rpc.Dial(conf.IPCEndpoint())
- if err == nil {
- break
- }
- }
- if node.Client == nil {
- t.Fatal(err)
- }
- // load info
- var info swarm.Info
- if err := node.Client.Call(&info, "bzz_info"); err != nil {
- t.Fatal(err)
- }
- node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
- node.URL = "http://" + node.Addr
- var nodeInfo p2p.NodeInfo
- if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
- t.Fatal(err)
- }
- node.Enode = nodeInfo.Enode
- node.IpcPath = conf.IPCPath
- return node
- }
- func (n *testNode) Shutdown() {
- if n.Cmd != nil {
- n.Cmd.Kill()
- }
- }
- // getAvailableTCPPorts returns a set of ports that
- // nothing is listening on at the time.
- //
- // Function assignTCPPort cannot be called in sequence
- // and guardantee that the same port will be returned in
- // different calls as the listener is closed within the function,
- // not after all listeners are started and selected unique
- // available ports.
- func getAvailableTCPPorts(count int) (ports []string, err error) {
- for i := 0; i < count; i++ {
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return nil, err
- }
- // defer close in the loop to be sure the same port will not
- // be selected in the next iteration
- defer l.Close()
- _, port, err := net.SplitHostPort(l.Addr().String())
- if err != nil {
- return nil, err
- }
- ports = append(ports, port)
- }
- return ports, nil
- }
- // waitTCPPorts blocks until tcp connections can be
- // established on all provided ports. It runs all
- // ports dialers in parallel, and returns the first
- // encountered error.
- // See waitTCPPort also.
- func waitTCPPorts(ctx context.Context, ports ...string) error {
- var err error
- // mu locks err variable that is assigned in
- // other goroutines
- var mu sync.Mutex
- // cancel is canceling all goroutines
- // when the firs error is returned
- // to prevent unnecessary waiting
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- var wg sync.WaitGroup
- for _, port := range ports {
- wg.Add(1)
- go func(port string) {
- defer wg.Done()
- e := waitTCPPort(ctx, port)
- mu.Lock()
- defer mu.Unlock()
- if e != nil && err == nil {
- err = e
- cancel()
- }
- }(port)
- }
- wg.Wait()
- return err
- }
- // waitTCPPort blocks until tcp connection can be established
- // ona provided port. It has a 3 minute timeout as maximum,
- // to prevent long waiting, but it can be shortened with
- // a provided context instance. Dialer has a 10 second timeout
- // in every iteration, and connection refused error will be
- // retried in 100 milliseconds periods.
- func waitTCPPort(ctx context.Context, port string) error {
- ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
- defer cancel()
- for {
- c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
- if err != nil {
- if operr, ok := err.(*net.OpError); ok {
- if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
- time.Sleep(100 * time.Millisecond)
- continue
- }
- }
- return err
- }
- return c.Close()
- }
- }
|