run_test.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. // Copyright 2017 The go-ethereum Authors
  2. // This file is part of go-ethereum.
  3. //
  4. // go-ethereum is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // go-ethereum is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU General Public License
  15. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
  16. package main
  17. import (
  18. "context"
  19. "crypto/ecdsa"
  20. "flag"
  21. "fmt"
  22. "io/ioutil"
  23. "net"
  24. "os"
  25. "path"
  26. "path/filepath"
  27. "runtime"
  28. "sync"
  29. "syscall"
  30. "testing"
  31. "time"
  32. "github.com/docker/docker/pkg/reexec"
  33. "github.com/ethereum/go-ethereum/accounts"
  34. "github.com/ethereum/go-ethereum/accounts/keystore"
  35. "github.com/ethereum/go-ethereum/internal/cmdtest"
  36. "github.com/ethereum/go-ethereum/node"
  37. "github.com/ethereum/go-ethereum/p2p"
  38. "github.com/ethereum/go-ethereum/rpc"
  39. "github.com/ethereum/go-ethereum/swarm"
  40. "github.com/ethereum/go-ethereum/swarm/api"
  41. swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
  42. )
  43. var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
  44. func init() {
  45. // Run the app if we've been exec'd as "swarm-test" in runSwarm.
  46. reexec.Register("swarm-test", func() {
  47. if err := app.Run(os.Args); err != nil {
  48. fmt.Fprintln(os.Stderr, err)
  49. os.Exit(1)
  50. }
  51. os.Exit(0)
  52. })
  53. }
  54. const clusterSize = 3
  55. func serverFunc(api *api.API) swarmhttp.TestServer {
  56. return swarmhttp.NewServer(api, "")
  57. }
  58. func TestMain(m *testing.M) {
  59. // check if we have been reexec'd
  60. if reexec.Init() {
  61. return
  62. }
  63. os.Exit(m.Run())
  64. }
  65. func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
  66. tt := cmdtest.NewTestCmd(t, nil)
  67. found := false
  68. for _, v := range args {
  69. if v == "--bootnodes" {
  70. found = true
  71. break
  72. }
  73. }
  74. if !found {
  75. args = append([]string{"--bootnodes", ""}, args...)
  76. }
  77. // Boot "swarm". This actually runs the test binary but the TestMain
  78. // function will prevent any tests from running.
  79. tt.Run("swarm-test", args...)
  80. return tt
  81. }
  82. type testCluster struct {
  83. Nodes []*testNode
  84. TmpDir string
  85. }
  86. // newTestCluster starts a test swarm cluster of the given size.
  87. //
  88. // A temporary directory is created and each node gets a data directory inside
  89. // it.
  90. //
  91. // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
  92. // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
  93. // as flags).
  94. //
  95. // When starting more than one node, they are connected together using the
  96. // admin SetPeer RPC method.
  97. func newTestCluster(t *testing.T, size int) *testCluster {
  98. cluster := &testCluster{}
  99. defer func() {
  100. if t.Failed() {
  101. cluster.Shutdown()
  102. }
  103. }()
  104. tmpdir, err := ioutil.TempDir("", "swarm-test")
  105. if err != nil {
  106. t.Fatal(err)
  107. }
  108. cluster.TmpDir = tmpdir
  109. // start the nodes
  110. cluster.StartNewNodes(t, size)
  111. if size == 1 {
  112. return cluster
  113. }
  114. // connect the nodes together
  115. for _, node := range cluster.Nodes {
  116. if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
  117. t.Fatal(err)
  118. }
  119. }
  120. // wait until all nodes have the correct number of peers
  121. outer:
  122. for _, node := range cluster.Nodes {
  123. var peers []*p2p.PeerInfo
  124. for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
  125. if err := node.Client.Call(&peers, "admin_peers"); err != nil {
  126. t.Fatal(err)
  127. }
  128. if len(peers) == len(cluster.Nodes)-1 {
  129. continue outer
  130. }
  131. }
  132. t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
  133. }
  134. return cluster
  135. }
  136. func (c *testCluster) Shutdown() {
  137. c.Stop()
  138. c.Cleanup()
  139. }
  140. func (c *testCluster) Stop() {
  141. for _, node := range c.Nodes {
  142. node.Shutdown()
  143. }
  144. }
  145. func (c *testCluster) StartNewNodes(t *testing.T, size int) {
  146. c.Nodes = make([]*testNode, 0, size)
  147. errors := make(chan error, size)
  148. nodes := make(chan *testNode, size)
  149. for i := 0; i < size; i++ {
  150. go func(nodeIndex int) {
  151. dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", nodeIndex))
  152. if err := os.Mkdir(dir, 0700); err != nil {
  153. errors <- err
  154. return
  155. }
  156. node := newTestNode(t, dir)
  157. node.Name = fmt.Sprintf("swarm%02d", nodeIndex)
  158. nodes <- node
  159. }(i)
  160. }
  161. for i := 0; i < size; i++ {
  162. select {
  163. case node := <-nodes:
  164. c.Nodes = append(c.Nodes, node)
  165. case err := <-errors:
  166. t.Error(err)
  167. }
  168. }
  169. if t.Failed() {
  170. c.Shutdown()
  171. t.FailNow()
  172. }
  173. }
  174. func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
  175. c.Nodes = make([]*testNode, 0, size)
  176. for i := 0; i < size; i++ {
  177. dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
  178. node := existingTestNode(t, dir, bzzaccount)
  179. node.Name = fmt.Sprintf("swarm%02d", i)
  180. c.Nodes = append(c.Nodes, node)
  181. }
  182. }
  183. func (c *testCluster) Cleanup() {
  184. os.RemoveAll(c.TmpDir)
  185. }
  186. type testNode struct {
  187. Name string
  188. Addr string
  189. URL string
  190. Enode string
  191. Dir string
  192. IpcPath string
  193. PrivateKey *ecdsa.PrivateKey
  194. Client *rpc.Client
  195. Cmd *cmdtest.TestCmd
  196. }
  197. const testPassphrase = "swarm-test-passphrase"
  198. func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
  199. // create key
  200. conf = &node.Config{
  201. DataDir: dir,
  202. IPCPath: "bzzd.ipc",
  203. NoUSB: true,
  204. }
  205. n, err := node.New(conf)
  206. if err != nil {
  207. t.Fatal(err)
  208. }
  209. account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
  210. if err != nil {
  211. t.Fatal(err)
  212. }
  213. // use a unique IPCPath when running tests on Windows
  214. if runtime.GOOS == "windows" {
  215. conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
  216. }
  217. return conf, account
  218. }
  219. func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
  220. conf, _ := getTestAccount(t, dir)
  221. node := &testNode{Dir: dir}
  222. // use a unique IPCPath when running tests on Windows
  223. if runtime.GOOS == "windows" {
  224. conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
  225. }
  226. // assign ports
  227. ports, err := getAvailableTCPPorts(2)
  228. if err != nil {
  229. t.Fatal(err)
  230. }
  231. p2pPort := ports[0]
  232. httpPort := ports[1]
  233. // start the node
  234. node.Cmd = runSwarm(t,
  235. "--bootnodes", "",
  236. "--port", p2pPort,
  237. "--nat", "extip:127.0.0.1",
  238. "--datadir", dir,
  239. "--ipcpath", conf.IPCPath,
  240. "--ens-api", "",
  241. "--bzzaccount", bzzaccount,
  242. "--bzznetworkid", "321",
  243. "--bzzport", httpPort,
  244. "--verbosity", fmt.Sprint(*loglevel),
  245. )
  246. node.Cmd.InputLine(testPassphrase)
  247. defer func() {
  248. if t.Failed() {
  249. node.Shutdown()
  250. }
  251. }()
  252. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  253. defer cancel()
  254. // ensure that all ports have active listeners
  255. // so that the next node will not get the same
  256. // when calling getAvailableTCPPorts
  257. err = waitTCPPorts(ctx, ports...)
  258. if err != nil {
  259. t.Fatal(err)
  260. }
  261. // wait for the node to start
  262. for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
  263. node.Client, err = rpc.Dial(conf.IPCEndpoint())
  264. if err == nil {
  265. break
  266. }
  267. }
  268. if node.Client == nil {
  269. t.Fatal(err)
  270. }
  271. // load info
  272. var info swarm.Info
  273. if err := node.Client.Call(&info, "bzz_info"); err != nil {
  274. t.Fatal(err)
  275. }
  276. node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
  277. node.URL = "http://" + node.Addr
  278. var nodeInfo p2p.NodeInfo
  279. if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
  280. t.Fatal(err)
  281. }
  282. node.Enode = nodeInfo.Enode
  283. node.IpcPath = conf.IPCPath
  284. return node
  285. }
  286. func newTestNode(t *testing.T, dir string) *testNode {
  287. conf, account := getTestAccount(t, dir)
  288. ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
  289. pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
  290. node := &testNode{Dir: dir, PrivateKey: pk}
  291. // assign ports
  292. ports, err := getAvailableTCPPorts(2)
  293. if err != nil {
  294. t.Fatal(err)
  295. }
  296. p2pPort := ports[0]
  297. httpPort := ports[1]
  298. // start the node
  299. node.Cmd = runSwarm(t,
  300. "--bootnodes", "",
  301. "--port", p2pPort,
  302. "--nat", "extip:127.0.0.1",
  303. "--datadir", dir,
  304. "--ipcpath", conf.IPCPath,
  305. "--ens-api", "",
  306. "--bzzaccount", account.Address.String(),
  307. "--bzznetworkid", "321",
  308. "--bzzport", httpPort,
  309. "--verbosity", fmt.Sprint(*loglevel),
  310. )
  311. node.Cmd.InputLine(testPassphrase)
  312. defer func() {
  313. if t.Failed() {
  314. node.Shutdown()
  315. }
  316. }()
  317. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
  318. defer cancel()
  319. // ensure that all ports have active listeners
  320. // so that the next node will not get the same
  321. // when calling getAvailableTCPPorts
  322. err = waitTCPPorts(ctx, ports...)
  323. if err != nil {
  324. t.Fatal(err)
  325. }
  326. // wait for the node to start
  327. for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
  328. node.Client, err = rpc.Dial(conf.IPCEndpoint())
  329. if err == nil {
  330. break
  331. }
  332. }
  333. if node.Client == nil {
  334. t.Fatal(err)
  335. }
  336. // load info
  337. var info swarm.Info
  338. if err := node.Client.Call(&info, "bzz_info"); err != nil {
  339. t.Fatal(err)
  340. }
  341. node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
  342. node.URL = "http://" + node.Addr
  343. var nodeInfo p2p.NodeInfo
  344. if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
  345. t.Fatal(err)
  346. }
  347. node.Enode = nodeInfo.Enode
  348. node.IpcPath = conf.IPCPath
  349. return node
  350. }
  351. func (n *testNode) Shutdown() {
  352. if n.Cmd != nil {
  353. n.Cmd.Kill()
  354. }
  355. }
  356. // getAvailableTCPPorts returns a set of ports that
  357. // nothing is listening on at the time.
  358. //
  359. // Function assignTCPPort cannot be called in sequence
  360. // and guardantee that the same port will be returned in
  361. // different calls as the listener is closed within the function,
  362. // not after all listeners are started and selected unique
  363. // available ports.
  364. func getAvailableTCPPorts(count int) (ports []string, err error) {
  365. for i := 0; i < count; i++ {
  366. l, err := net.Listen("tcp", "127.0.0.1:0")
  367. if err != nil {
  368. return nil, err
  369. }
  370. // defer close in the loop to be sure the same port will not
  371. // be selected in the next iteration
  372. defer l.Close()
  373. _, port, err := net.SplitHostPort(l.Addr().String())
  374. if err != nil {
  375. return nil, err
  376. }
  377. ports = append(ports, port)
  378. }
  379. return ports, nil
  380. }
  381. // waitTCPPorts blocks until tcp connections can be
  382. // established on all provided ports. It runs all
  383. // ports dialers in parallel, and returns the first
  384. // encountered error.
  385. // See waitTCPPort also.
  386. func waitTCPPorts(ctx context.Context, ports ...string) error {
  387. var err error
  388. // mu locks err variable that is assigned in
  389. // other goroutines
  390. var mu sync.Mutex
  391. // cancel is canceling all goroutines
  392. // when the firs error is returned
  393. // to prevent unnecessary waiting
  394. ctx, cancel := context.WithCancel(ctx)
  395. defer cancel()
  396. var wg sync.WaitGroup
  397. for _, port := range ports {
  398. wg.Add(1)
  399. go func(port string) {
  400. defer wg.Done()
  401. e := waitTCPPort(ctx, port)
  402. mu.Lock()
  403. defer mu.Unlock()
  404. if e != nil && err == nil {
  405. err = e
  406. cancel()
  407. }
  408. }(port)
  409. }
  410. wg.Wait()
  411. return err
  412. }
  413. // waitTCPPort blocks until tcp connection can be established
  414. // ona provided port. It has a 3 minute timeout as maximum,
  415. // to prevent long waiting, but it can be shortened with
  416. // a provided context instance. Dialer has a 10 second timeout
  417. // in every iteration, and connection refused error will be
  418. // retried in 100 milliseconds periods.
  419. func waitTCPPort(ctx context.Context, port string) error {
  420. ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
  421. defer cancel()
  422. for {
  423. c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
  424. if err != nil {
  425. if operr, ok := err.(*net.OpError); ok {
  426. if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
  427. time.Sleep(100 * time.Millisecond)
  428. continue
  429. }
  430. }
  431. return err
  432. }
  433. return c.Close()
  434. }
  435. }