upload_and_sync.go 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of go-ethereum.
  3. //
  4. // go-ethereum is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // go-ethereum is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU General Public License
  15. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
  16. package main
  17. import (
  18. "bytes"
  19. "context"
  20. "fmt"
  21. "io/ioutil"
  22. "math/rand"
  23. "os"
  24. "strings"
  25. "sync"
  26. "time"
  27. "github.com/ethereum/go-ethereum/log"
  28. "github.com/ethereum/go-ethereum/metrics"
  29. "github.com/ethereum/go-ethereum/rpc"
  30. "github.com/ethereum/go-ethereum/swarm/api"
  31. "github.com/ethereum/go-ethereum/swarm/storage"
  32. "github.com/ethereum/go-ethereum/swarm/testutil"
  33. "github.com/pborman/uuid"
  34. cli "gopkg.in/urfave/cli.v1"
  35. )
  36. func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
  37. randomBytes := testutil.RandomBytes(seed, filesize*1000)
  38. errc := make(chan error)
  39. go func() {
  40. errc <- uplaodAndSync(ctx, randomBytes, tuid)
  41. }()
  42. select {
  43. case err := <-errc:
  44. if err != nil {
  45. metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
  46. }
  47. return err
  48. case <-time.After(time.Duration(timeout) * time.Second):
  49. metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
  50. e := fmt.Errorf("timeout after %v sec", timeout)
  51. // trigger debug functionality on randomBytes
  52. err := trackChunks(randomBytes[:])
  53. if err != nil {
  54. e = fmt.Errorf("%v; triggerChunkDebug failed: %v", e, err)
  55. }
  56. return e
  57. }
  58. }
  59. func trackChunks(testData []byte) error {
  60. log.Warn("Test timed out, running chunk debug sequence")
  61. addrs, err := getAllRefs(testData)
  62. if err != nil {
  63. return err
  64. }
  65. for i, ref := range addrs {
  66. log.Trace(fmt.Sprintf("ref %d", i), "ref", ref)
  67. }
  68. for _, host := range hosts {
  69. httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
  70. hostChunks := []string{}
  71. rpcClient, err := rpc.Dial(httpHost)
  72. if err != nil {
  73. log.Error("Error dialing host", "err", err)
  74. return err
  75. }
  76. var hasInfo []api.HasInfo
  77. err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
  78. if err != nil {
  79. log.Error("Error calling host", "err", err)
  80. return err
  81. }
  82. count := 0
  83. for _, info := range hasInfo {
  84. if info.Has {
  85. hostChunks = append(hostChunks, "1")
  86. } else {
  87. hostChunks = append(hostChunks, "0")
  88. count++
  89. }
  90. }
  91. if count == 0 {
  92. log.Info("host reported to have all chunks", "host", host)
  93. }
  94. log.Trace("chunks", "chunks", strings.Join(hostChunks, ""), "host", host)
  95. }
  96. return nil
  97. }
  98. func getAllRefs(testData []byte) (storage.AddressCollection, error) {
  99. log.Trace("Getting all references for given root hash")
  100. datadir, err := ioutil.TempDir("", "chunk-debug")
  101. if err != nil {
  102. return nil, fmt.Errorf("unable to create temp dir: %v", err)
  103. }
  104. defer os.RemoveAll(datadir)
  105. fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
  106. if err != nil {
  107. return nil, err
  108. }
  109. ctx, cancel := context.WithTimeout(context.Background(), time.Duration(trackTimeout)*time.Second)
  110. defer cancel()
  111. reader := bytes.NewReader(testData)
  112. return fileStore.GetAllReferences(ctx, reader, false)
  113. }
  114. func uplaodAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
  115. log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "tuid", tuid, "seed", seed)
  116. t1 := time.Now()
  117. hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
  118. if err != nil {
  119. log.Error(err.Error())
  120. return err
  121. }
  122. t2 := time.Since(t1)
  123. metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).Update(t2)
  124. fhash, err := digest(bytes.NewReader(randomBytes))
  125. if err != nil {
  126. log.Error(err.Error())
  127. return err
  128. }
  129. log.Info("uploaded successfully", "tuid", tuid, "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
  130. time.Sleep(time.Duration(syncDelay) * time.Second)
  131. wg := sync.WaitGroup{}
  132. if single {
  133. randIndex := 1 + rand.Intn(len(hosts)-1)
  134. ruid := uuid.New()[:8]
  135. wg.Add(1)
  136. go func(endpoint string, ruid string) {
  137. for {
  138. start := time.Now()
  139. err := fetch(hash, endpoint, fhash, ruid, tuid)
  140. if err != nil {
  141. continue
  142. }
  143. ended := time.Since(start)
  144. metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
  145. log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
  146. wg.Done()
  147. return
  148. }
  149. }(httpEndpoint(hosts[randIndex]), ruid)
  150. } else {
  151. for _, endpoint := range hosts[1:] {
  152. ruid := uuid.New()[:8]
  153. wg.Add(1)
  154. go func(endpoint string, ruid string) {
  155. for {
  156. start := time.Now()
  157. err := fetch(hash, endpoint, fhash, ruid, tuid)
  158. if err != nil {
  159. continue
  160. }
  161. ended := time.Since(start)
  162. metrics.GetOrRegisterResettingTimer("upload-and-sync.each.fetch-time", nil).Update(ended)
  163. log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
  164. wg.Done()
  165. return
  166. }
  167. }(httpEndpoint(endpoint), ruid)
  168. }
  169. }
  170. wg.Wait()
  171. log.Info("all hosts synced random file successfully")
  172. return nil
  173. }