syncer_test.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. // Copyright 2018 The go-ethereum Authors
  2. // This file is part of the go-ethereum library.
  3. //
  4. // The go-ethereum library is free software: you can redistribute it and/or modify
  5. // it under the terms of the GNU Lesser General Public License as published by
  6. // the Free Software Foundation, either version 3 of the License, or
  7. // (at your option) any later version.
  8. //
  9. // The go-ethereum library is distributed in the hope that it will be useful,
  10. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. // GNU Lesser General Public License for more details.
  13. //
  14. // You should have received a copy of the GNU Lesser General Public License
  15. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
  16. package stream
  17. import (
  18. "context"
  19. "fmt"
  20. "io/ioutil"
  21. "math"
  22. "os"
  23. "sync"
  24. "testing"
  25. "time"
  26. "github.com/ethereum/go-ethereum/common"
  27. "github.com/ethereum/go-ethereum/node"
  28. "github.com/ethereum/go-ethereum/p2p/enode"
  29. "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
  30. "github.com/ethereum/go-ethereum/swarm/log"
  31. "github.com/ethereum/go-ethereum/swarm/network"
  32. "github.com/ethereum/go-ethereum/swarm/network/simulation"
  33. "github.com/ethereum/go-ethereum/swarm/state"
  34. "github.com/ethereum/go-ethereum/swarm/storage"
  35. "github.com/ethereum/go-ethereum/swarm/storage/mock"
  36. mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
  37. "github.com/ethereum/go-ethereum/swarm/testutil"
  38. )
  39. const dataChunkCount = 200
  40. func TestSyncerSimulation(t *testing.T) {
  41. testSyncBetweenNodes(t, 2, dataChunkCount, true, 1)
  42. testSyncBetweenNodes(t, 4, dataChunkCount, true, 1)
  43. testSyncBetweenNodes(t, 8, dataChunkCount, true, 1)
  44. testSyncBetweenNodes(t, 16, dataChunkCount, true, 1)
  45. }
  46. func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
  47. address := common.BytesToAddress(id.Bytes())
  48. mockStore := globalStore.NewNodeStore(address)
  49. params := storage.NewDefaultLocalStoreParams()
  50. datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
  51. if err != nil {
  52. return nil, "", err
  53. }
  54. params.Init(datadir)
  55. params.BaseKey = addr.Over()
  56. lstore, err = storage.NewLocalStore(params, mockStore)
  57. if err != nil {
  58. return nil, "", err
  59. }
  60. return lstore, datadir, nil
  61. }
  62. func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
  63. sim := simulation.New(map[string]simulation.ServiceFunc{
  64. "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
  65. var store storage.ChunkStore
  66. var datadir string
  67. node := ctx.Config.Node()
  68. addr := network.NewAddr(node)
  69. //hack to put addresses in same space
  70. addr.OAddr[0] = byte(0)
  71. if *useMockStore {
  72. store, datadir, err = createMockStore(mockmem.NewGlobalStore(), node.ID(), addr)
  73. } else {
  74. store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
  75. }
  76. if err != nil {
  77. return nil, nil, err
  78. }
  79. bucket.Store(bucketKeyStore, store)
  80. cleanup = func() {
  81. store.Close()
  82. os.RemoveAll(datadir)
  83. }
  84. localStore := store.(*storage.LocalStore)
  85. netStore, err := storage.NewNetStore(localStore, nil)
  86. if err != nil {
  87. return nil, nil, err
  88. }
  89. bucket.Store(bucketKeyDB, netStore)
  90. kad := network.NewKademlia(addr.Over(), network.NewKadParams())
  91. delivery := NewDelivery(kad, netStore)
  92. netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
  93. bucket.Store(bucketKeyDelivery, delivery)
  94. r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
  95. Retrieval: RetrievalDisabled,
  96. Syncing: SyncingAutoSubscribe,
  97. SkipCheck: skipCheck,
  98. }, nil)
  99. fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
  100. bucket.Store(bucketKeyFileStore, fileStore)
  101. return r, cleanup, nil
  102. },
  103. })
  104. defer sim.Close()
  105. // create context for simulation run
  106. timeout := 30 * time.Second
  107. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  108. // defer cancel should come before defer simulation teardown
  109. defer cancel()
  110. _, err := sim.AddNodesAndConnectChain(nodes)
  111. if err != nil {
  112. t.Fatal(err)
  113. }
  114. result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  115. nodeIDs := sim.UpNodeIDs()
  116. nodeIndex := make(map[enode.ID]int)
  117. for i, id := range nodeIDs {
  118. nodeIndex[id] = i
  119. }
  120. disconnections := sim.PeerEvents(
  121. context.Background(),
  122. sim.NodeIDs(),
  123. simulation.NewPeerEventsFilter().Drop(),
  124. )
  125. go func() {
  126. for d := range disconnections {
  127. if d.Error != nil {
  128. log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
  129. t.Fatal(d.Error)
  130. }
  131. }
  132. }()
  133. // each node Subscribes to each other's swarmChunkServerStreamName
  134. for j := 0; j < nodes-1; j++ {
  135. id := nodeIDs[j]
  136. client, err := sim.Net.GetNode(id).Client()
  137. if err != nil {
  138. t.Fatal(err)
  139. }
  140. sid := nodeIDs[j+1]
  141. client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
  142. if err != nil {
  143. return err
  144. }
  145. if j > 0 || nodes == 2 {
  146. item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore)
  147. if !ok {
  148. return fmt.Errorf("No filestore")
  149. }
  150. fileStore := item.(*storage.FileStore)
  151. size := chunkCount * chunkSize
  152. _, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false)
  153. if err != nil {
  154. t.Fatal(err.Error())
  155. }
  156. wait(ctx)
  157. }
  158. }
  159. // here we distribute chunks of a random file into stores 1...nodes
  160. if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
  161. return err
  162. }
  163. // collect hashes in po 1 bin for each node
  164. hashes := make([][]storage.Address, nodes)
  165. totalHashes := 0
  166. hashCounts := make([]int, nodes)
  167. for i := nodes - 1; i >= 0; i-- {
  168. if i < nodes-1 {
  169. hashCounts[i] = hashCounts[i+1]
  170. }
  171. item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
  172. if !ok {
  173. return fmt.Errorf("No DB")
  174. }
  175. netStore := item.(*storage.NetStore)
  176. netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
  177. hashes[i] = append(hashes[i], addr)
  178. totalHashes++
  179. hashCounts[i]++
  180. return true
  181. })
  182. }
  183. var total, found int
  184. for _, node := range nodeIDs {
  185. i := nodeIndex[node]
  186. for j := i; j < nodes; j++ {
  187. total += len(hashes[j])
  188. for _, key := range hashes[j] {
  189. item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
  190. if !ok {
  191. return fmt.Errorf("No DB")
  192. }
  193. db := item.(*storage.NetStore)
  194. _, err := db.Get(ctx, key)
  195. if err == nil {
  196. found++
  197. }
  198. }
  199. }
  200. log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
  201. }
  202. if total == found && total > 0 {
  203. return nil
  204. }
  205. return fmt.Errorf("Total not equallying found: total is %d", total)
  206. })
  207. if result.Error != nil {
  208. t.Fatal(result.Error)
  209. }
  210. }
  211. //TestSameVersionID just checks that if the version is not changed,
  212. //then streamer peers see each other
  213. func TestSameVersionID(t *testing.T) {
  214. //test version ID
  215. v := uint(1)
  216. sim := simulation.New(map[string]simulation.ServiceFunc{
  217. "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
  218. var store storage.ChunkStore
  219. var datadir string
  220. node := ctx.Config.Node()
  221. addr := network.NewAddr(node)
  222. store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
  223. if err != nil {
  224. return nil, nil, err
  225. }
  226. bucket.Store(bucketKeyStore, store)
  227. cleanup = func() {
  228. store.Close()
  229. os.RemoveAll(datadir)
  230. }
  231. localStore := store.(*storage.LocalStore)
  232. netStore, err := storage.NewNetStore(localStore, nil)
  233. if err != nil {
  234. return nil, nil, err
  235. }
  236. bucket.Store(bucketKeyDB, netStore)
  237. kad := network.NewKademlia(addr.Over(), network.NewKadParams())
  238. delivery := NewDelivery(kad, netStore)
  239. netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
  240. bucket.Store(bucketKeyDelivery, delivery)
  241. r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
  242. Retrieval: RetrievalDisabled,
  243. Syncing: SyncingAutoSubscribe,
  244. }, nil)
  245. //assign to each node the same version ID
  246. r.spec.Version = v
  247. bucket.Store(bucketKeyRegistry, r)
  248. return r, cleanup, nil
  249. },
  250. })
  251. defer sim.Close()
  252. //connect just two nodes
  253. log.Info("Adding nodes to simulation")
  254. _, err := sim.AddNodesAndConnectChain(2)
  255. if err != nil {
  256. t.Fatal(err)
  257. }
  258. log.Info("Starting simulation")
  259. ctx := context.Background()
  260. //make sure they have time to connect
  261. time.Sleep(200 * time.Millisecond)
  262. result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  263. //get the pivot node's filestore
  264. nodes := sim.UpNodeIDs()
  265. item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
  266. if !ok {
  267. return fmt.Errorf("No filestore")
  268. }
  269. registry := item.(*Registry)
  270. //the peers should connect, thus getting the peer should not return nil
  271. if registry.getPeer(nodes[1]) == nil {
  272. t.Fatal("Expected the peer to not be nil, but it is")
  273. }
  274. return nil
  275. })
  276. if result.Error != nil {
  277. t.Fatal(result.Error)
  278. }
  279. log.Info("Simulation ended")
  280. }
  281. //TestDifferentVersionID proves that if the streamer protocol version doesn't match,
  282. //then the peers are not connected at streamer level
  283. func TestDifferentVersionID(t *testing.T) {
  284. //create a variable to hold the version ID
  285. v := uint(0)
  286. sim := simulation.New(map[string]simulation.ServiceFunc{
  287. "streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
  288. var store storage.ChunkStore
  289. var datadir string
  290. node := ctx.Config.Node()
  291. addr := network.NewAddr(node)
  292. store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
  293. if err != nil {
  294. return nil, nil, err
  295. }
  296. bucket.Store(bucketKeyStore, store)
  297. cleanup = func() {
  298. store.Close()
  299. os.RemoveAll(datadir)
  300. }
  301. localStore := store.(*storage.LocalStore)
  302. netStore, err := storage.NewNetStore(localStore, nil)
  303. if err != nil {
  304. return nil, nil, err
  305. }
  306. bucket.Store(bucketKeyDB, netStore)
  307. kad := network.NewKademlia(addr.Over(), network.NewKadParams())
  308. delivery := NewDelivery(kad, netStore)
  309. netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
  310. bucket.Store(bucketKeyDelivery, delivery)
  311. r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
  312. Retrieval: RetrievalDisabled,
  313. Syncing: SyncingAutoSubscribe,
  314. }, nil)
  315. //increase the version ID for each node
  316. v++
  317. r.spec.Version = v
  318. bucket.Store(bucketKeyRegistry, r)
  319. return r, cleanup, nil
  320. },
  321. })
  322. defer sim.Close()
  323. //connect the nodes
  324. log.Info("Adding nodes to simulation")
  325. _, err := sim.AddNodesAndConnectChain(2)
  326. if err != nil {
  327. t.Fatal(err)
  328. }
  329. log.Info("Starting simulation")
  330. ctx := context.Background()
  331. //make sure they have time to connect
  332. time.Sleep(200 * time.Millisecond)
  333. result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  334. //get the pivot node's filestore
  335. nodes := sim.UpNodeIDs()
  336. item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
  337. if !ok {
  338. return fmt.Errorf("No filestore")
  339. }
  340. registry := item.(*Registry)
  341. //getting the other peer should fail due to the different version numbers
  342. if registry.getPeer(nodes[1]) != nil {
  343. t.Fatal("Expected the peer to be nil, but it is not")
  344. }
  345. return nil
  346. })
  347. if result.Error != nil {
  348. t.Fatal(result.Error)
  349. }
  350. log.Info("Simulation ended")
  351. }